1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
61 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
62 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
63 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
64 u16 *allocated_size, bool is_alloc);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70 static struct hnae3_ae_algo ae_algo;
72 static const struct pci_device_id ae_algo_pci_tbl[] = {
73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
80 /* required last entry */
84 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
86 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
87 HCLGE_CMDQ_TX_ADDR_H_REG,
88 HCLGE_CMDQ_TX_DEPTH_REG,
89 HCLGE_CMDQ_TX_TAIL_REG,
90 HCLGE_CMDQ_TX_HEAD_REG,
91 HCLGE_CMDQ_RX_ADDR_L_REG,
92 HCLGE_CMDQ_RX_ADDR_H_REG,
93 HCLGE_CMDQ_RX_DEPTH_REG,
94 HCLGE_CMDQ_RX_TAIL_REG,
95 HCLGE_CMDQ_RX_HEAD_REG,
96 HCLGE_VECTOR0_CMDQ_SRC_REG,
97 HCLGE_CMDQ_INTR_STS_REG,
98 HCLGE_CMDQ_INTR_EN_REG,
99 HCLGE_CMDQ_INTR_GEN_REG};
101 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
102 HCLGE_VECTOR0_OTER_EN_REG,
103 HCLGE_MISC_RESET_STS_REG,
104 HCLGE_MISC_VECTOR_INT_STS,
105 HCLGE_GLOBAL_RESET_REG,
109 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
110 HCLGE_RING_RX_ADDR_H_REG,
111 HCLGE_RING_RX_BD_NUM_REG,
112 HCLGE_RING_RX_BD_LENGTH_REG,
113 HCLGE_RING_RX_MERGE_EN_REG,
114 HCLGE_RING_RX_TAIL_REG,
115 HCLGE_RING_RX_HEAD_REG,
116 HCLGE_RING_RX_FBD_NUM_REG,
117 HCLGE_RING_RX_OFFSET_REG,
118 HCLGE_RING_RX_FBD_OFFSET_REG,
119 HCLGE_RING_RX_STASH_REG,
120 HCLGE_RING_RX_BD_ERR_REG,
121 HCLGE_RING_TX_ADDR_L_REG,
122 HCLGE_RING_TX_ADDR_H_REG,
123 HCLGE_RING_TX_BD_NUM_REG,
124 HCLGE_RING_TX_PRIORITY_REG,
125 HCLGE_RING_TX_TC_REG,
126 HCLGE_RING_TX_MERGE_EN_REG,
127 HCLGE_RING_TX_TAIL_REG,
128 HCLGE_RING_TX_HEAD_REG,
129 HCLGE_RING_TX_FBD_NUM_REG,
130 HCLGE_RING_TX_OFFSET_REG,
131 HCLGE_RING_TX_EBD_NUM_REG,
132 HCLGE_RING_TX_EBD_OFFSET_REG,
133 HCLGE_RING_TX_BD_ERR_REG,
136 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
137 HCLGE_TQP_INTR_GL0_REG,
138 HCLGE_TQP_INTR_GL1_REG,
139 HCLGE_TQP_INTR_GL2_REG,
140 HCLGE_TQP_INTR_RL_REG};
142 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
144 "Serdes serial Loopback test",
145 "Serdes parallel Loopback test",
149 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
150 {"mac_tx_mac_pause_num",
151 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
152 {"mac_rx_mac_pause_num",
153 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
154 {"mac_tx_control_pkt_num",
155 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
156 {"mac_rx_control_pkt_num",
157 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
158 {"mac_tx_pfc_pkt_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
160 {"mac_tx_pfc_pri0_pkt_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
162 {"mac_tx_pfc_pri1_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
164 {"mac_tx_pfc_pri2_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
166 {"mac_tx_pfc_pri3_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
168 {"mac_tx_pfc_pri4_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
170 {"mac_tx_pfc_pri5_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
172 {"mac_tx_pfc_pri6_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
174 {"mac_tx_pfc_pri7_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
176 {"mac_rx_pfc_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
178 {"mac_rx_pfc_pri0_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
180 {"mac_rx_pfc_pri1_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
182 {"mac_rx_pfc_pri2_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
184 {"mac_rx_pfc_pri3_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
186 {"mac_rx_pfc_pri4_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
188 {"mac_rx_pfc_pri5_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
190 {"mac_rx_pfc_pri6_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
192 {"mac_rx_pfc_pri7_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
194 {"mac_tx_total_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
196 {"mac_tx_total_oct_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
198 {"mac_tx_good_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
200 {"mac_tx_bad_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
202 {"mac_tx_good_oct_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
204 {"mac_tx_bad_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
206 {"mac_tx_uni_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
208 {"mac_tx_multi_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
210 {"mac_tx_broad_pkt_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
212 {"mac_tx_undersize_pkt_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
214 {"mac_tx_oversize_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
216 {"mac_tx_64_oct_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
218 {"mac_tx_65_127_oct_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
220 {"mac_tx_128_255_oct_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
222 {"mac_tx_256_511_oct_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
224 {"mac_tx_512_1023_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
226 {"mac_tx_1024_1518_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
228 {"mac_tx_1519_2047_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
230 {"mac_tx_2048_4095_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
232 {"mac_tx_4096_8191_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
234 {"mac_tx_8192_9216_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
236 {"mac_tx_9217_12287_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
238 {"mac_tx_12288_16383_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
240 {"mac_tx_1519_max_good_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
242 {"mac_tx_1519_max_bad_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
244 {"mac_rx_total_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
246 {"mac_rx_total_oct_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
248 {"mac_rx_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
250 {"mac_rx_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
252 {"mac_rx_good_oct_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
254 {"mac_rx_bad_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
256 {"mac_rx_uni_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
258 {"mac_rx_multi_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
260 {"mac_rx_broad_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
262 {"mac_rx_undersize_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
264 {"mac_rx_oversize_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
266 {"mac_rx_64_oct_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
268 {"mac_rx_65_127_oct_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
270 {"mac_rx_128_255_oct_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
272 {"mac_rx_256_511_oct_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
274 {"mac_rx_512_1023_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
276 {"mac_rx_1024_1518_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
278 {"mac_rx_1519_2047_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
280 {"mac_rx_2048_4095_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
282 {"mac_rx_4096_8191_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
284 {"mac_rx_8192_9216_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
286 {"mac_rx_9217_12287_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
288 {"mac_rx_12288_16383_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
290 {"mac_rx_1519_max_good_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
292 {"mac_rx_1519_max_bad_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
295 {"mac_tx_fragment_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
297 {"mac_tx_undermin_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
299 {"mac_tx_jabber_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
301 {"mac_tx_err_all_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
303 {"mac_tx_from_app_good_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
305 {"mac_tx_from_app_bad_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
307 {"mac_rx_fragment_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
309 {"mac_rx_undermin_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
311 {"mac_rx_jabber_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
313 {"mac_rx_fcs_err_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
315 {"mac_rx_send_app_good_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
317 {"mac_rx_send_app_bad_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
321 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
323 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
324 .ethter_type = cpu_to_le16(ETH_P_LLDP),
325 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
326 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
327 .i_port_bitmap = 0x1,
331 static const u8 hclge_hash_key[] = {
332 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
333 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
334 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
335 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
336 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
339 static const u32 hclge_dfx_bd_offset_list[] = {
340 HCLGE_DFX_BIOS_BD_OFFSET,
341 HCLGE_DFX_SSU_0_BD_OFFSET,
342 HCLGE_DFX_SSU_1_BD_OFFSET,
343 HCLGE_DFX_IGU_BD_OFFSET,
344 HCLGE_DFX_RPU_0_BD_OFFSET,
345 HCLGE_DFX_RPU_1_BD_OFFSET,
346 HCLGE_DFX_NCSI_BD_OFFSET,
347 HCLGE_DFX_RTC_BD_OFFSET,
348 HCLGE_DFX_PPP_BD_OFFSET,
349 HCLGE_DFX_RCB_BD_OFFSET,
350 HCLGE_DFX_TQP_BD_OFFSET,
351 HCLGE_DFX_SSU_2_BD_OFFSET
354 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
355 HCLGE_OPC_DFX_BIOS_COMMON_REG,
356 HCLGE_OPC_DFX_SSU_REG_0,
357 HCLGE_OPC_DFX_SSU_REG_1,
358 HCLGE_OPC_DFX_IGU_EGU_REG,
359 HCLGE_OPC_DFX_RPU_REG_0,
360 HCLGE_OPC_DFX_RPU_REG_1,
361 HCLGE_OPC_DFX_NCSI_REG,
362 HCLGE_OPC_DFX_RTC_REG,
363 HCLGE_OPC_DFX_PPP_REG,
364 HCLGE_OPC_DFX_RCB_REG,
365 HCLGE_OPC_DFX_TQP_REG,
366 HCLGE_OPC_DFX_SSU_REG_2
369 static const struct key_info meta_data_key_info[] = {
370 { PACKET_TYPE_ID, 6},
380 static const struct key_info tuple_key_info[] = {
381 { OUTER_DST_MAC, 48},
382 { OUTER_SRC_MAC, 48},
383 { OUTER_VLAN_TAG_FST, 16},
384 { OUTER_VLAN_TAG_SEC, 16},
385 { OUTER_ETH_TYPE, 16},
388 { OUTER_IP_PROTO, 8},
392 { OUTER_SRC_PORT, 16},
393 { OUTER_DST_PORT, 16},
395 { OUTER_TUN_VNI, 24},
396 { OUTER_TUN_FLOW_ID, 8},
397 { INNER_DST_MAC, 48},
398 { INNER_SRC_MAC, 48},
399 { INNER_VLAN_TAG_FST, 16},
400 { INNER_VLAN_TAG_SEC, 16},
401 { INNER_ETH_TYPE, 16},
404 { INNER_IP_PROTO, 8},
408 { INNER_SRC_PORT, 16},
409 { INNER_DST_PORT, 16},
413 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
415 #define HCLGE_MAC_CMD_NUM 21
417 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
418 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
424 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
426 dev_err(&hdev->pdev->dev,
427 "Get MAC pkt stats fail, status = %d.\n", ret);
432 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
433 /* for special opcode 0032, only the first desc has the head */
434 if (unlikely(i == 0)) {
435 desc_data = (__le64 *)(&desc[i].data[0]);
436 n = HCLGE_RD_FIRST_STATS_NUM;
438 desc_data = (__le64 *)(&desc[i]);
439 n = HCLGE_RD_OTHER_STATS_NUM;
442 for (k = 0; k < n; k++) {
443 *data += le64_to_cpu(*desc_data);
452 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
454 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
455 struct hclge_desc *desc;
460 /* This may be called inside atomic sections,
461 * so GFP_ATOMIC is more suitalbe here
463 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
467 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
468 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
474 for (i = 0; i < desc_num; i++) {
475 /* for special opcode 0034, only the first desc has the head */
477 desc_data = (__le64 *)(&desc[i].data[0]);
478 n = HCLGE_RD_FIRST_STATS_NUM;
480 desc_data = (__le64 *)(&desc[i]);
481 n = HCLGE_RD_OTHER_STATS_NUM;
484 for (k = 0; k < n; k++) {
485 *data += le64_to_cpu(*desc_data);
496 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
498 struct hclge_desc desc;
503 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
504 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
508 desc_data = (__le32 *)(&desc.data[0]);
509 reg_num = le32_to_cpu(*desc_data);
511 *desc_num = 1 + ((reg_num - 3) >> 2) +
512 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 ret = hclge_mac_query_reg_num(hdev, &desc_num);
524 /* The firmware supports the new statistics acquisition method */
526 ret = hclge_mac_update_stats_complete(hdev, desc_num);
527 else if (ret == -EOPNOTSUPP)
528 ret = hclge_mac_update_stats_defective(hdev);
530 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
537 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
538 struct hclge_vport *vport = hclge_get_vport(handle);
539 struct hclge_dev *hdev = vport->back;
540 struct hnae3_queue *queue;
541 struct hclge_desc desc[1];
542 struct hclge_tqp *tqp;
545 for (i = 0; i < kinfo->num_tqps; i++) {
546 queue = handle->kinfo.tqp[i];
547 tqp = container_of(queue, struct hclge_tqp, q);
548 /* command : HCLGE_OPC_QUERY_IGU_STAT */
549 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
552 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
553 ret = hclge_cmd_send(&hdev->hw, desc, 1);
555 dev_err(&hdev->pdev->dev,
556 "Query tqp stat fail, status = %d,queue = %d\n",
560 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
561 le32_to_cpu(desc[0].data[1]);
564 for (i = 0; i < kinfo->num_tqps; i++) {
565 queue = handle->kinfo.tqp[i];
566 tqp = container_of(queue, struct hclge_tqp, q);
567 /* command : HCLGE_OPC_QUERY_IGU_STAT */
568 hclge_cmd_setup_basic_desc(&desc[0],
569 HCLGE_OPC_QUERY_TX_STATUS,
572 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
573 ret = hclge_cmd_send(&hdev->hw, desc, 1);
575 dev_err(&hdev->pdev->dev,
576 "Query tqp stat fail, status = %d,queue = %d\n",
580 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
581 le32_to_cpu(desc[0].data[1]);
587 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
589 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
590 struct hclge_tqp *tqp;
594 for (i = 0; i < kinfo->num_tqps; i++) {
595 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
596 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
599 for (i = 0; i < kinfo->num_tqps; i++) {
600 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
601 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
607 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
609 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
611 /* each tqp has TX & RX two queues */
612 return kinfo->num_tqps * (2);
615 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
617 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
621 for (i = 0; i < kinfo->num_tqps; i++) {
622 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
623 struct hclge_tqp, q);
624 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
626 buff = buff + ETH_GSTRING_LEN;
629 for (i = 0; i < kinfo->num_tqps; i++) {
630 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
631 struct hclge_tqp, q);
632 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
634 buff = buff + ETH_GSTRING_LEN;
640 static u64 *hclge_comm_get_stats(const void *comm_stats,
641 const struct hclge_comm_stats_str strs[],
647 for (i = 0; i < size; i++)
648 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653 static u8 *hclge_comm_get_strings(u32 stringset,
654 const struct hclge_comm_stats_str strs[],
657 char *buff = (char *)data;
660 if (stringset != ETH_SS_STATS)
663 for (i = 0; i < size; i++) {
664 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
665 buff = buff + ETH_GSTRING_LEN;
671 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
673 struct hnae3_handle *handle;
676 handle = &hdev->vport[0].nic;
677 if (handle->client) {
678 status = hclge_tqps_update_stats(handle);
680 dev_err(&hdev->pdev->dev,
681 "Update TQPS stats fail, status = %d.\n",
686 status = hclge_mac_update_stats(hdev);
688 dev_err(&hdev->pdev->dev,
689 "Update MAC stats fail, status = %d.\n", status);
692 static void hclge_update_stats(struct hnae3_handle *handle,
693 struct net_device_stats *net_stats)
695 struct hclge_vport *vport = hclge_get_vport(handle);
696 struct hclge_dev *hdev = vport->back;
699 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
702 status = hclge_mac_update_stats(hdev);
704 dev_err(&hdev->pdev->dev,
705 "Update MAC stats fail, status = %d.\n",
708 status = hclge_tqps_update_stats(handle);
710 dev_err(&hdev->pdev->dev,
711 "Update TQPS stats fail, status = %d.\n",
714 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
717 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
719 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
720 HNAE3_SUPPORT_PHY_LOOPBACK |\
721 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
722 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
724 struct hclge_vport *vport = hclge_get_vport(handle);
725 struct hclge_dev *hdev = vport->back;
728 /* Loopback test support rules:
729 * mac: only GE mode support
730 * serdes: all mac mode will support include GE/XGE/LGE/CGE
731 * phy: only support when phy device exist on board
733 if (stringset == ETH_SS_TEST) {
734 /* clear loopback bit flags at first */
735 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
736 if (hdev->pdev->revision >= 0x21 ||
737 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
738 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
739 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
741 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
745 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
746 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
748 if (hdev->hw.mac.phydev) {
750 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
753 } else if (stringset == ETH_SS_STATS) {
754 count = ARRAY_SIZE(g_mac_stats_string) +
755 hclge_tqps_get_sset_count(handle, stringset);
761 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
764 u8 *p = (char *)data;
767 if (stringset == ETH_SS_STATS) {
768 size = ARRAY_SIZE(g_mac_stats_string);
769 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
771 p = hclge_tqps_get_strings(handle, p);
772 } else if (stringset == ETH_SS_TEST) {
773 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
774 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
776 p += ETH_GSTRING_LEN;
778 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
779 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
781 p += ETH_GSTRING_LEN;
783 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
785 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
787 p += ETH_GSTRING_LEN;
789 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
790 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
792 p += ETH_GSTRING_LEN;
797 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
799 struct hclge_vport *vport = hclge_get_vport(handle);
800 struct hclge_dev *hdev = vport->back;
803 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
804 ARRAY_SIZE(g_mac_stats_string), data);
805 p = hclge_tqps_get_stats(handle, p);
808 static void hclge_get_mac_stat(struct hnae3_handle *handle,
809 struct hns3_mac_stats *mac_stats)
811 struct hclge_vport *vport = hclge_get_vport(handle);
812 struct hclge_dev *hdev = vport->back;
814 hclge_update_stats(handle, NULL);
816 mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
817 mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
820 static int hclge_parse_func_status(struct hclge_dev *hdev,
821 struct hclge_func_status_cmd *status)
823 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
826 /* Set the pf to main pf */
827 if (status->pf_state & HCLGE_PF_STATE_MAIN)
828 hdev->flag |= HCLGE_FLAG_MAIN;
830 hdev->flag &= ~HCLGE_FLAG_MAIN;
835 static int hclge_query_function_status(struct hclge_dev *hdev)
837 #define HCLGE_QUERY_MAX_CNT 5
839 struct hclge_func_status_cmd *req;
840 struct hclge_desc desc;
844 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
845 req = (struct hclge_func_status_cmd *)desc.data;
848 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
850 dev_err(&hdev->pdev->dev,
851 "query function status failed %d.\n", ret);
855 /* Check pf reset is done */
858 usleep_range(1000, 2000);
859 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
861 ret = hclge_parse_func_status(hdev, req);
866 static int hclge_query_pf_resource(struct hclge_dev *hdev)
868 struct hclge_pf_res_cmd *req;
869 struct hclge_desc desc;
872 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
873 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
875 dev_err(&hdev->pdev->dev,
876 "query pf resource failed %d.\n", ret);
880 req = (struct hclge_pf_res_cmd *)desc.data;
881 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
882 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
884 if (req->tx_buf_size)
886 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
888 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
890 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
892 if (req->dv_buf_size)
894 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
896 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
898 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
900 if (hnae3_dev_roce_supported(hdev)) {
901 hdev->roce_base_msix_offset =
902 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
903 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
905 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
906 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
908 /* PF should have NIC vectors and Roce vectors,
909 * NIC vectors are queued before Roce vectors.
911 hdev->num_msi = hdev->num_roce_msi +
912 hdev->roce_base_msix_offset;
915 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
916 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
922 static int hclge_parse_speed(int speed_cmd, int *speed)
926 *speed = HCLGE_MAC_SPEED_10M;
929 *speed = HCLGE_MAC_SPEED_100M;
932 *speed = HCLGE_MAC_SPEED_1G;
935 *speed = HCLGE_MAC_SPEED_10G;
938 *speed = HCLGE_MAC_SPEED_25G;
941 *speed = HCLGE_MAC_SPEED_40G;
944 *speed = HCLGE_MAC_SPEED_50G;
947 *speed = HCLGE_MAC_SPEED_100G;
956 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
958 struct hclge_vport *vport = hclge_get_vport(handle);
959 struct hclge_dev *hdev = vport->back;
960 u32 speed_ability = hdev->hw.mac.speed_ability;
964 case HCLGE_MAC_SPEED_10M:
965 speed_bit = HCLGE_SUPPORT_10M_BIT;
967 case HCLGE_MAC_SPEED_100M:
968 speed_bit = HCLGE_SUPPORT_100M_BIT;
970 case HCLGE_MAC_SPEED_1G:
971 speed_bit = HCLGE_SUPPORT_1G_BIT;
973 case HCLGE_MAC_SPEED_10G:
974 speed_bit = HCLGE_SUPPORT_10G_BIT;
976 case HCLGE_MAC_SPEED_25G:
977 speed_bit = HCLGE_SUPPORT_25G_BIT;
979 case HCLGE_MAC_SPEED_40G:
980 speed_bit = HCLGE_SUPPORT_40G_BIT;
982 case HCLGE_MAC_SPEED_50G:
983 speed_bit = HCLGE_SUPPORT_50G_BIT;
985 case HCLGE_MAC_SPEED_100G:
986 speed_bit = HCLGE_SUPPORT_100G_BIT;
992 if (speed_bit & speed_ability)
998 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1000 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1001 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1003 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1004 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1006 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1007 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1009 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1010 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1012 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1013 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1017 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1019 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1031 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1036 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1055 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1057 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1060 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1063 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1066 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1069 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1070 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1072 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1073 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1077 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1079 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1080 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1082 switch (mac->speed) {
1083 case HCLGE_MAC_SPEED_10G:
1084 case HCLGE_MAC_SPEED_40G:
1085 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1088 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1090 case HCLGE_MAC_SPEED_25G:
1091 case HCLGE_MAC_SPEED_50G:
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1095 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1096 BIT(HNAE3_FEC_AUTO);
1098 case HCLGE_MAC_SPEED_100G:
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1103 mac->fec_ability = 0;
1108 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1111 struct hclge_mac *mac = &hdev->hw.mac;
1113 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1114 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1117 hclge_convert_setting_sr(mac, speed_ability);
1118 hclge_convert_setting_lr(mac, speed_ability);
1119 hclge_convert_setting_cr(mac, speed_ability);
1120 if (hdev->pdev->revision >= 0x21)
1121 hclge_convert_setting_fec(mac);
1123 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1124 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1128 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1131 struct hclge_mac *mac = &hdev->hw.mac;
1133 hclge_convert_setting_kr(mac, speed_ability);
1134 if (hdev->pdev->revision >= 0x21)
1135 hclge_convert_setting_fec(mac);
1136 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1141 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1144 unsigned long *supported = hdev->hw.mac.supported;
1146 /* default to support all speed for GE port */
1148 speed_ability = HCLGE_SUPPORT_GE;
1150 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1151 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1154 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1157 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1161 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1162 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1163 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1166 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1167 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1168 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1169 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1172 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1174 u8 media_type = hdev->hw.mac.media_type;
1176 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1177 hclge_parse_fiber_link_mode(hdev, speed_ability);
1178 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1179 hclge_parse_copper_link_mode(hdev, speed_ability);
1180 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1181 hclge_parse_backplane_link_mode(hdev, speed_ability);
1184 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1186 struct hclge_cfg_param_cmd *req;
1187 u64 mac_addr_tmp_high;
1191 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1193 /* get the configuration */
1194 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1197 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1198 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1199 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1200 HCLGE_CFG_TQP_DESC_N_M,
1201 HCLGE_CFG_TQP_DESC_N_S);
1203 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1204 HCLGE_CFG_PHY_ADDR_M,
1205 HCLGE_CFG_PHY_ADDR_S);
1206 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1207 HCLGE_CFG_MEDIA_TP_M,
1208 HCLGE_CFG_MEDIA_TP_S);
1209 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1210 HCLGE_CFG_RX_BUF_LEN_M,
1211 HCLGE_CFG_RX_BUF_LEN_S);
1212 /* get mac_address */
1213 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1214 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1215 HCLGE_CFG_MAC_ADDR_H_M,
1216 HCLGE_CFG_MAC_ADDR_H_S);
1218 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1220 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1221 HCLGE_CFG_DEFAULT_SPEED_M,
1222 HCLGE_CFG_DEFAULT_SPEED_S);
1223 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1224 HCLGE_CFG_RSS_SIZE_M,
1225 HCLGE_CFG_RSS_SIZE_S);
1227 for (i = 0; i < ETH_ALEN; i++)
1228 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1230 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1231 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1233 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1234 HCLGE_CFG_SPEED_ABILITY_M,
1235 HCLGE_CFG_SPEED_ABILITY_S);
1236 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1237 HCLGE_CFG_UMV_TBL_SPACE_M,
1238 HCLGE_CFG_UMV_TBL_SPACE_S);
1239 if (!cfg->umv_space)
1240 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1243 /* hclge_get_cfg: query the static parameter from flash
1244 * @hdev: pointer to struct hclge_dev
1245 * @hcfg: the config structure to be getted
1247 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1249 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1250 struct hclge_cfg_param_cmd *req;
1254 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1257 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1258 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1260 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1261 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1262 /* Len should be united by 4 bytes when send to hardware */
1263 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1264 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1265 req->offset = cpu_to_le32(offset);
1268 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1270 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1274 hclge_parse_cfg(hcfg, desc);
1279 static int hclge_get_cap(struct hclge_dev *hdev)
1283 ret = hclge_query_function_status(hdev);
1285 dev_err(&hdev->pdev->dev,
1286 "query function status error %d.\n", ret);
1290 /* get pf resource */
1291 ret = hclge_query_pf_resource(hdev);
1293 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1298 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1300 #define HCLGE_MIN_TX_DESC 64
1301 #define HCLGE_MIN_RX_DESC 64
1303 if (!is_kdump_kernel())
1306 dev_info(&hdev->pdev->dev,
1307 "Running kdump kernel. Using minimal resources\n");
1309 /* minimal queue pairs equals to the number of vports */
1310 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1311 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1312 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1315 static int hclge_configure(struct hclge_dev *hdev)
1317 struct hclge_cfg cfg;
1321 ret = hclge_get_cfg(hdev, &cfg);
1323 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1327 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1328 hdev->base_tqp_pid = 0;
1329 hdev->rss_size_max = cfg.rss_size_max;
1330 hdev->rx_buf_len = cfg.rx_buf_len;
1331 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1332 hdev->hw.mac.media_type = cfg.media_type;
1333 hdev->hw.mac.phy_addr = cfg.phy_addr;
1334 hdev->num_tx_desc = cfg.tqp_desc_num;
1335 hdev->num_rx_desc = cfg.tqp_desc_num;
1336 hdev->tm_info.num_pg = 1;
1337 hdev->tc_max = cfg.tc_num;
1338 hdev->tm_info.hw_pfc_map = 0;
1339 hdev->wanted_umv_size = cfg.umv_space;
1341 if (hnae3_dev_fd_supported(hdev)) {
1343 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1346 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1348 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1352 hclge_parse_link_mode(hdev, cfg.speed_ability);
1354 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1355 (hdev->tc_max < 1)) {
1356 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1361 /* Dev does not support DCB */
1362 if (!hnae3_dev_dcb_supported(hdev)) {
1366 hdev->pfc_max = hdev->tc_max;
1369 hdev->tm_info.num_tc = 1;
1371 /* Currently not support uncontiuous tc */
1372 for (i = 0; i < hdev->tm_info.num_tc; i++)
1373 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1375 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1377 hclge_init_kdump_kernel_config(hdev);
1379 /* Set the init affinity based on pci func number */
1380 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1381 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1382 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1383 &hdev->affinity_mask);
1388 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1389 unsigned int tso_mss_max)
1391 struct hclge_cfg_tso_status_cmd *req;
1392 struct hclge_desc desc;
1395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1397 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1400 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1401 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1402 req->tso_mss_min = cpu_to_le16(tso_mss);
1405 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1406 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1407 req->tso_mss_max = cpu_to_le16(tso_mss);
1409 return hclge_cmd_send(&hdev->hw, &desc, 1);
1412 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1414 struct hclge_cfg_gro_status_cmd *req;
1415 struct hclge_desc desc;
1418 if (!hnae3_dev_gro_supported(hdev))
1421 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1422 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1424 req->gro_en = cpu_to_le16(en ? 1 : 0);
1426 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1428 dev_err(&hdev->pdev->dev,
1429 "GRO hardware config cmd failed, ret = %d\n", ret);
1434 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1436 struct hclge_tqp *tqp;
1439 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1440 sizeof(struct hclge_tqp), GFP_KERNEL);
1446 for (i = 0; i < hdev->num_tqps; i++) {
1447 tqp->dev = &hdev->pdev->dev;
1450 tqp->q.ae_algo = &ae_algo;
1451 tqp->q.buf_size = hdev->rx_buf_len;
1452 tqp->q.tx_desc_num = hdev->num_tx_desc;
1453 tqp->q.rx_desc_num = hdev->num_rx_desc;
1454 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1455 i * HCLGE_TQP_REG_SIZE;
1463 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1464 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1466 struct hclge_tqp_map_cmd *req;
1467 struct hclge_desc desc;
1470 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1472 req = (struct hclge_tqp_map_cmd *)desc.data;
1473 req->tqp_id = cpu_to_le16(tqp_pid);
1474 req->tqp_vf = func_id;
1475 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1477 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1478 req->tqp_vid = cpu_to_le16(tqp_vid);
1480 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1482 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1487 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1489 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1490 struct hclge_dev *hdev = vport->back;
1493 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1494 alloced < num_tqps; i++) {
1495 if (!hdev->htqp[i].alloced) {
1496 hdev->htqp[i].q.handle = &vport->nic;
1497 hdev->htqp[i].q.tqp_index = alloced;
1498 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1499 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1500 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1501 hdev->htqp[i].alloced = true;
1505 vport->alloc_tqps = alloced;
1506 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1507 vport->alloc_tqps / hdev->tm_info.num_tc);
1512 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1513 u16 num_tx_desc, u16 num_rx_desc)
1516 struct hnae3_handle *nic = &vport->nic;
1517 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1518 struct hclge_dev *hdev = vport->back;
1521 kinfo->num_tx_desc = num_tx_desc;
1522 kinfo->num_rx_desc = num_rx_desc;
1524 kinfo->rx_buf_len = hdev->rx_buf_len;
1526 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1527 sizeof(struct hnae3_queue *), GFP_KERNEL);
1531 ret = hclge_assign_tqp(vport, num_tqps);
1533 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1538 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1539 struct hclge_vport *vport)
1541 struct hnae3_handle *nic = &vport->nic;
1542 struct hnae3_knic_private_info *kinfo;
1545 kinfo = &nic->kinfo;
1546 for (i = 0; i < vport->alloc_tqps; i++) {
1547 struct hclge_tqp *q =
1548 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1552 is_pf = !(vport->vport_id);
1553 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1562 static int hclge_map_tqp(struct hclge_dev *hdev)
1564 struct hclge_vport *vport = hdev->vport;
1567 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1568 for (i = 0; i < num_vport; i++) {
1571 ret = hclge_map_tqp_to_vport(hdev, vport);
1581 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1583 struct hnae3_handle *nic = &vport->nic;
1584 struct hclge_dev *hdev = vport->back;
1587 nic->pdev = hdev->pdev;
1588 nic->ae_algo = &ae_algo;
1589 nic->numa_node_mask = hdev->numa_node_mask;
1591 ret = hclge_knic_setup(vport, num_tqps,
1592 hdev->num_tx_desc, hdev->num_rx_desc);
1594 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1599 static int hclge_alloc_vport(struct hclge_dev *hdev)
1601 struct pci_dev *pdev = hdev->pdev;
1602 struct hclge_vport *vport;
1608 /* We need to alloc a vport for main NIC of PF */
1609 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1611 if (hdev->num_tqps < num_vport) {
1612 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1613 hdev->num_tqps, num_vport);
1617 /* Alloc the same number of TQPs for every vport */
1618 tqp_per_vport = hdev->num_tqps / num_vport;
1619 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1621 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1626 hdev->vport = vport;
1627 hdev->num_alloc_vport = num_vport;
1629 if (IS_ENABLED(CONFIG_PCI_IOV))
1630 hdev->num_alloc_vfs = hdev->num_req_vfs;
1632 for (i = 0; i < num_vport; i++) {
1634 vport->vport_id = i;
1635 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1636 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1637 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1638 INIT_LIST_HEAD(&vport->vlan_list);
1639 INIT_LIST_HEAD(&vport->uc_mac_list);
1640 INIT_LIST_HEAD(&vport->mc_mac_list);
1643 ret = hclge_vport_setup(vport, tqp_main_vport);
1645 ret = hclge_vport_setup(vport, tqp_per_vport);
1648 "vport setup failed for vport %d, %d\n",
1659 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1660 struct hclge_pkt_buf_alloc *buf_alloc)
1662 /* TX buffer size is unit by 128 byte */
1663 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1664 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1665 struct hclge_tx_buff_alloc_cmd *req;
1666 struct hclge_desc desc;
1670 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1672 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1673 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1674 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1676 req->tx_pkt_buff[i] =
1677 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1678 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1681 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1683 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1689 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1690 struct hclge_pkt_buf_alloc *buf_alloc)
1692 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1695 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1700 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1705 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1706 if (hdev->hw_tc_map & BIT(i))
1711 /* Get the number of pfc enabled TCs, which have private buffer */
1712 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1713 struct hclge_pkt_buf_alloc *buf_alloc)
1715 struct hclge_priv_buf *priv;
1719 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1720 priv = &buf_alloc->priv_buf[i];
1721 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1729 /* Get the number of pfc disabled TCs, which have private buffer */
1730 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1731 struct hclge_pkt_buf_alloc *buf_alloc)
1733 struct hclge_priv_buf *priv;
1737 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1738 priv = &buf_alloc->priv_buf[i];
1739 if (hdev->hw_tc_map & BIT(i) &&
1740 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1748 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1750 struct hclge_priv_buf *priv;
1754 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1755 priv = &buf_alloc->priv_buf[i];
1757 rx_priv += priv->buf_size;
1762 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1764 u32 i, total_tx_size = 0;
1766 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1767 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1769 return total_tx_size;
1772 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1773 struct hclge_pkt_buf_alloc *buf_alloc,
1776 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1777 u32 tc_num = hclge_get_tc_num(hdev);
1778 u32 shared_buf, aligned_mps;
1782 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1784 if (hnae3_dev_dcb_supported(hdev))
1785 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1788 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1789 + hdev->dv_buf_size;
1791 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1792 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1793 HCLGE_BUF_SIZE_UNIT);
1795 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1796 if (rx_all < rx_priv + shared_std)
1799 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1800 buf_alloc->s_buf.buf_size = shared_buf;
1801 if (hnae3_dev_dcb_supported(hdev)) {
1802 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1803 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1804 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1805 HCLGE_BUF_SIZE_UNIT);
1807 buf_alloc->s_buf.self.high = aligned_mps +
1808 HCLGE_NON_DCB_ADDITIONAL_BUF;
1809 buf_alloc->s_buf.self.low = aligned_mps;
1812 if (hnae3_dev_dcb_supported(hdev)) {
1813 hi_thrd = shared_buf - hdev->dv_buf_size;
1815 if (tc_num <= NEED_RESERVE_TC_NUM)
1816 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1820 hi_thrd = hi_thrd / tc_num;
1822 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1823 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1824 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1826 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1827 lo_thrd = aligned_mps;
1830 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1831 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1832 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1838 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1839 struct hclge_pkt_buf_alloc *buf_alloc)
1843 total_size = hdev->pkt_buf_size;
1845 /* alloc tx buffer for all enabled tc */
1846 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1847 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1849 if (hdev->hw_tc_map & BIT(i)) {
1850 if (total_size < hdev->tx_buf_size)
1853 priv->tx_buf_size = hdev->tx_buf_size;
1855 priv->tx_buf_size = 0;
1858 total_size -= priv->tx_buf_size;
1864 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1865 struct hclge_pkt_buf_alloc *buf_alloc)
1867 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1868 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1871 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1872 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1879 if (!(hdev->hw_tc_map & BIT(i)))
1884 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1885 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1886 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1887 HCLGE_BUF_SIZE_UNIT);
1890 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1894 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1897 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1900 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1901 struct hclge_pkt_buf_alloc *buf_alloc)
1903 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1904 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1907 /* let the last to be cleared first */
1908 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1909 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1910 unsigned int mask = BIT((unsigned int)i);
1912 if (hdev->hw_tc_map & mask &&
1913 !(hdev->tm_info.hw_pfc_map & mask)) {
1914 /* Clear the no pfc TC private buffer */
1922 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1923 no_pfc_priv_num == 0)
1927 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1930 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1931 struct hclge_pkt_buf_alloc *buf_alloc)
1933 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1934 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1937 /* let the last to be cleared first */
1938 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1939 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1940 unsigned int mask = BIT((unsigned int)i);
1942 if (hdev->hw_tc_map & mask &&
1943 hdev->tm_info.hw_pfc_map & mask) {
1944 /* Reduce the number of pfc TC with private buffer */
1952 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1957 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1960 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1961 struct hclge_pkt_buf_alloc *buf_alloc)
1963 #define COMPENSATE_BUFFER 0x3C00
1964 #define COMPENSATE_HALF_MPS_NUM 5
1965 #define PRIV_WL_GAP 0x1800
1967 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1968 u32 tc_num = hclge_get_tc_num(hdev);
1969 u32 half_mps = hdev->mps >> 1;
1974 rx_priv = rx_priv / tc_num;
1976 if (tc_num <= NEED_RESERVE_TC_NUM)
1977 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1979 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1980 COMPENSATE_HALF_MPS_NUM * half_mps;
1981 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1982 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1984 if (rx_priv < min_rx_priv)
1987 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1988 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1995 if (!(hdev->hw_tc_map & BIT(i)))
1999 priv->buf_size = rx_priv;
2000 priv->wl.high = rx_priv - hdev->dv_buf_size;
2001 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2004 buf_alloc->s_buf.buf_size = 0;
2009 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2010 * @hdev: pointer to struct hclge_dev
2011 * @buf_alloc: pointer to buffer calculation data
2012 * @return: 0: calculate sucessful, negative: fail
2014 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2015 struct hclge_pkt_buf_alloc *buf_alloc)
2017 /* When DCB is not supported, rx private buffer is not allocated. */
2018 if (!hnae3_dev_dcb_supported(hdev)) {
2019 u32 rx_all = hdev->pkt_buf_size;
2021 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2022 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2028 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2031 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2034 /* try to decrease the buffer size */
2035 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2038 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2041 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2047 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2048 struct hclge_pkt_buf_alloc *buf_alloc)
2050 struct hclge_rx_priv_buff_cmd *req;
2051 struct hclge_desc desc;
2055 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2056 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2058 /* Alloc private buffer TCs */
2059 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2060 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2063 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2065 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2069 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2070 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2072 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2074 dev_err(&hdev->pdev->dev,
2075 "rx private buffer alloc cmd failed %d\n", ret);
2080 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2081 struct hclge_pkt_buf_alloc *buf_alloc)
2083 struct hclge_rx_priv_wl_buf *req;
2084 struct hclge_priv_buf *priv;
2085 struct hclge_desc desc[2];
2089 for (i = 0; i < 2; i++) {
2090 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2092 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2094 /* The first descriptor set the NEXT bit to 1 */
2096 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2098 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2100 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2101 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2103 priv = &buf_alloc->priv_buf[idx];
2104 req->tc_wl[j].high =
2105 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2106 req->tc_wl[j].high |=
2107 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2109 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2110 req->tc_wl[j].low |=
2111 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2115 /* Send 2 descriptor at one time */
2116 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2118 dev_err(&hdev->pdev->dev,
2119 "rx private waterline config cmd failed %d\n",
2124 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2125 struct hclge_pkt_buf_alloc *buf_alloc)
2127 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2128 struct hclge_rx_com_thrd *req;
2129 struct hclge_desc desc[2];
2130 struct hclge_tc_thrd *tc;
2134 for (i = 0; i < 2; i++) {
2135 hclge_cmd_setup_basic_desc(&desc[i],
2136 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2137 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2139 /* The first descriptor set the NEXT bit to 1 */
2141 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2143 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2145 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2146 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2148 req->com_thrd[j].high =
2149 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2150 req->com_thrd[j].high |=
2151 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2152 req->com_thrd[j].low =
2153 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2154 req->com_thrd[j].low |=
2155 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2159 /* Send 2 descriptors at one time */
2160 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2162 dev_err(&hdev->pdev->dev,
2163 "common threshold config cmd failed %d\n", ret);
2167 static int hclge_common_wl_config(struct hclge_dev *hdev,
2168 struct hclge_pkt_buf_alloc *buf_alloc)
2170 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2171 struct hclge_rx_com_wl *req;
2172 struct hclge_desc desc;
2175 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2177 req = (struct hclge_rx_com_wl *)desc.data;
2178 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2179 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2181 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2182 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2184 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2186 dev_err(&hdev->pdev->dev,
2187 "common waterline config cmd failed %d\n", ret);
2192 int hclge_buffer_alloc(struct hclge_dev *hdev)
2194 struct hclge_pkt_buf_alloc *pkt_buf;
2197 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2201 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2203 dev_err(&hdev->pdev->dev,
2204 "could not calc tx buffer size for all TCs %d\n", ret);
2208 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2210 dev_err(&hdev->pdev->dev,
2211 "could not alloc tx buffers %d\n", ret);
2215 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2217 dev_err(&hdev->pdev->dev,
2218 "could not calc rx priv buffer size for all TCs %d\n",
2223 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2225 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2230 if (hnae3_dev_dcb_supported(hdev)) {
2231 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2233 dev_err(&hdev->pdev->dev,
2234 "could not configure rx private waterline %d\n",
2239 ret = hclge_common_thrd_config(hdev, pkt_buf);
2241 dev_err(&hdev->pdev->dev,
2242 "could not configure common threshold %d\n",
2248 ret = hclge_common_wl_config(hdev, pkt_buf);
2250 dev_err(&hdev->pdev->dev,
2251 "could not configure common waterline %d\n", ret);
2258 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2260 struct hnae3_handle *roce = &vport->roce;
2261 struct hnae3_handle *nic = &vport->nic;
2263 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2265 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2266 vport->back->num_msi_left == 0)
2269 roce->rinfo.base_vector = vport->back->roce_base_vector;
2271 roce->rinfo.netdev = nic->kinfo.netdev;
2272 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2274 roce->pdev = nic->pdev;
2275 roce->ae_algo = nic->ae_algo;
2276 roce->numa_node_mask = nic->numa_node_mask;
2281 static int hclge_init_msi(struct hclge_dev *hdev)
2283 struct pci_dev *pdev = hdev->pdev;
2287 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2288 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2291 "failed(%d) to allocate MSI/MSI-X vectors\n",
2295 if (vectors < hdev->num_msi)
2296 dev_warn(&hdev->pdev->dev,
2297 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2298 hdev->num_msi, vectors);
2300 hdev->num_msi = vectors;
2301 hdev->num_msi_left = vectors;
2302 hdev->base_msi_vector = pdev->irq;
2303 hdev->roce_base_vector = hdev->base_msi_vector +
2304 hdev->roce_base_msix_offset;
2306 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2307 sizeof(u16), GFP_KERNEL);
2308 if (!hdev->vector_status) {
2309 pci_free_irq_vectors(pdev);
2313 for (i = 0; i < hdev->num_msi; i++)
2314 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2316 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2317 sizeof(int), GFP_KERNEL);
2318 if (!hdev->vector_irq) {
2319 pci_free_irq_vectors(pdev);
2326 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2328 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2329 duplex = HCLGE_MAC_FULL;
2334 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2337 struct hclge_config_mac_speed_dup_cmd *req;
2338 struct hclge_desc desc;
2341 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2343 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2346 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2349 case HCLGE_MAC_SPEED_10M:
2350 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2351 HCLGE_CFG_SPEED_S, 6);
2353 case HCLGE_MAC_SPEED_100M:
2354 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2355 HCLGE_CFG_SPEED_S, 7);
2357 case HCLGE_MAC_SPEED_1G:
2358 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2359 HCLGE_CFG_SPEED_S, 0);
2361 case HCLGE_MAC_SPEED_10G:
2362 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2363 HCLGE_CFG_SPEED_S, 1);
2365 case HCLGE_MAC_SPEED_25G:
2366 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2367 HCLGE_CFG_SPEED_S, 2);
2369 case HCLGE_MAC_SPEED_40G:
2370 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2371 HCLGE_CFG_SPEED_S, 3);
2373 case HCLGE_MAC_SPEED_50G:
2374 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2375 HCLGE_CFG_SPEED_S, 4);
2377 case HCLGE_MAC_SPEED_100G:
2378 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2379 HCLGE_CFG_SPEED_S, 5);
2382 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2386 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2389 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2391 dev_err(&hdev->pdev->dev,
2392 "mac speed/duplex config cmd failed %d.\n", ret);
2399 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2403 duplex = hclge_check_speed_dup(duplex, speed);
2404 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2407 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2411 hdev->hw.mac.speed = speed;
2412 hdev->hw.mac.duplex = duplex;
2417 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2420 struct hclge_vport *vport = hclge_get_vport(handle);
2421 struct hclge_dev *hdev = vport->back;
2423 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2426 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2428 struct hclge_config_auto_neg_cmd *req;
2429 struct hclge_desc desc;
2433 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2435 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2437 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2438 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2440 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2442 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2448 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2450 struct hclge_vport *vport = hclge_get_vport(handle);
2451 struct hclge_dev *hdev = vport->back;
2453 if (!hdev->hw.mac.support_autoneg) {
2455 dev_err(&hdev->pdev->dev,
2456 "autoneg is not supported by current port\n");
2463 return hclge_set_autoneg_en(hdev, enable);
2466 static int hclge_get_autoneg(struct hnae3_handle *handle)
2468 struct hclge_vport *vport = hclge_get_vport(handle);
2469 struct hclge_dev *hdev = vport->back;
2470 struct phy_device *phydev = hdev->hw.mac.phydev;
2473 return phydev->autoneg;
2475 return hdev->hw.mac.autoneg;
2478 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2480 struct hclge_vport *vport = hclge_get_vport(handle);
2481 struct hclge_dev *hdev = vport->back;
2484 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2486 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2489 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2492 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2494 struct hclge_vport *vport = hclge_get_vport(handle);
2495 struct hclge_dev *hdev = vport->back;
2497 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2498 return hclge_set_autoneg_en(hdev, !halt);
2503 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2505 struct hclge_config_fec_cmd *req;
2506 struct hclge_desc desc;
2509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2511 req = (struct hclge_config_fec_cmd *)desc.data;
2512 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2513 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2514 if (fec_mode & BIT(HNAE3_FEC_RS))
2515 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2516 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2517 if (fec_mode & BIT(HNAE3_FEC_BASER))
2518 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2519 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2521 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2523 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2528 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2530 struct hclge_vport *vport = hclge_get_vport(handle);
2531 struct hclge_dev *hdev = vport->back;
2532 struct hclge_mac *mac = &hdev->hw.mac;
2535 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2536 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2540 ret = hclge_set_fec_hw(hdev, fec_mode);
2544 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2548 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2551 struct hclge_vport *vport = hclge_get_vport(handle);
2552 struct hclge_dev *hdev = vport->back;
2553 struct hclge_mac *mac = &hdev->hw.mac;
2556 *fec_ability = mac->fec_ability;
2558 *fec_mode = mac->fec_mode;
2561 static int hclge_mac_init(struct hclge_dev *hdev)
2563 struct hclge_mac *mac = &hdev->hw.mac;
2566 hdev->support_sfp_query = true;
2567 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2568 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2569 hdev->hw.mac.duplex);
2571 dev_err(&hdev->pdev->dev,
2572 "Config mac speed dup fail ret=%d\n", ret);
2576 if (hdev->hw.mac.support_autoneg) {
2577 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2579 dev_err(&hdev->pdev->dev,
2580 "Config mac autoneg fail ret=%d\n", ret);
2587 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2588 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2590 dev_err(&hdev->pdev->dev,
2591 "Fec mode init fail, ret = %d\n", ret);
2596 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2598 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2602 ret = hclge_buffer_alloc(hdev);
2604 dev_err(&hdev->pdev->dev,
2605 "allocate buffer fail, ret=%d\n", ret);
2610 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2612 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2613 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2614 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2615 &hdev->mbx_service_task);
2618 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2620 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2621 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2622 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2623 &hdev->rst_service_task);
2626 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2628 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2629 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2630 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2631 hdev->hw_stats.stats_timer++;
2632 hdev->fd_arfs_expire_timer++;
2633 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2634 system_wq, &hdev->service_task,
2639 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2641 struct hclge_link_status_cmd *req;
2642 struct hclge_desc desc;
2646 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2647 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2649 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2654 req = (struct hclge_link_status_cmd *)desc.data;
2655 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2657 return !!link_status;
2660 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2662 unsigned int mac_state;
2665 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2668 mac_state = hclge_get_mac_link_status(hdev);
2670 if (hdev->hw.mac.phydev) {
2671 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2672 link_stat = mac_state &
2673 hdev->hw.mac.phydev->link;
2678 link_stat = mac_state;
2684 static void hclge_update_link_status(struct hclge_dev *hdev)
2686 struct hnae3_client *rclient = hdev->roce_client;
2687 struct hnae3_client *client = hdev->nic_client;
2688 struct hnae3_handle *rhandle;
2689 struct hnae3_handle *handle;
2695 state = hclge_get_mac_phy_link(hdev);
2696 if (state != hdev->hw.mac.link) {
2697 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2698 handle = &hdev->vport[i].nic;
2699 client->ops->link_status_change(handle, state);
2700 hclge_config_mac_tnl_int(hdev, state);
2701 rhandle = &hdev->vport[i].roce;
2702 if (rclient && rclient->ops->link_status_change)
2703 rclient->ops->link_status_change(rhandle,
2706 hdev->hw.mac.link = state;
2710 static void hclge_update_port_capability(struct hclge_mac *mac)
2712 /* update fec ability by speed */
2713 hclge_convert_setting_fec(mac);
2715 /* firmware can not identify back plane type, the media type
2716 * read from configuration can help deal it
2718 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2719 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2720 mac->module_type = HNAE3_MODULE_TYPE_KR;
2721 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2722 mac->module_type = HNAE3_MODULE_TYPE_TP;
2724 if (mac->support_autoneg == true) {
2725 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2726 linkmode_copy(mac->advertising, mac->supported);
2728 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2730 linkmode_zero(mac->advertising);
2734 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2736 struct hclge_sfp_info_cmd *resp;
2737 struct hclge_desc desc;
2740 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2741 resp = (struct hclge_sfp_info_cmd *)desc.data;
2742 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2743 if (ret == -EOPNOTSUPP) {
2744 dev_warn(&hdev->pdev->dev,
2745 "IMP do not support get SFP speed %d\n", ret);
2748 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2752 *speed = le32_to_cpu(resp->speed);
2757 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2759 struct hclge_sfp_info_cmd *resp;
2760 struct hclge_desc desc;
2763 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2764 resp = (struct hclge_sfp_info_cmd *)desc.data;
2766 resp->query_type = QUERY_ACTIVE_SPEED;
2768 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2769 if (ret == -EOPNOTSUPP) {
2770 dev_warn(&hdev->pdev->dev,
2771 "IMP does not support get SFP info %d\n", ret);
2774 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2778 mac->speed = le32_to_cpu(resp->speed);
2779 /* if resp->speed_ability is 0, it means it's an old version
2780 * firmware, do not update these params
2782 if (resp->speed_ability) {
2783 mac->module_type = le32_to_cpu(resp->module_type);
2784 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2785 mac->autoneg = resp->autoneg;
2786 mac->support_autoneg = resp->autoneg_ability;
2787 mac->speed_type = QUERY_ACTIVE_SPEED;
2788 if (!resp->active_fec)
2791 mac->fec_mode = BIT(resp->active_fec);
2793 mac->speed_type = QUERY_SFP_SPEED;
2799 static int hclge_update_port_info(struct hclge_dev *hdev)
2801 struct hclge_mac *mac = &hdev->hw.mac;
2802 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2805 /* get the port info from SFP cmd if not copper port */
2806 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2809 /* if IMP does not support get SFP/qSFP info, return directly */
2810 if (!hdev->support_sfp_query)
2813 if (hdev->pdev->revision >= 0x21)
2814 ret = hclge_get_sfp_info(hdev, mac);
2816 ret = hclge_get_sfp_speed(hdev, &speed);
2818 if (ret == -EOPNOTSUPP) {
2819 hdev->support_sfp_query = false;
2825 if (hdev->pdev->revision >= 0x21) {
2826 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2827 hclge_update_port_capability(mac);
2830 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2833 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2834 return 0; /* do nothing if no SFP */
2836 /* must config full duplex for SFP */
2837 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2841 static int hclge_get_status(struct hnae3_handle *handle)
2843 struct hclge_vport *vport = hclge_get_vport(handle);
2844 struct hclge_dev *hdev = vport->back;
2846 hclge_update_link_status(hdev);
2848 return hdev->hw.mac.link;
2851 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2853 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2855 /* fetch the events from their corresponding regs */
2856 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2857 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2858 msix_src_reg = hclge_read_dev(&hdev->hw,
2859 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2861 /* Assumption: If by any chance reset and mailbox events are reported
2862 * together then we will only process reset event in this go and will
2863 * defer the processing of the mailbox events. Since, we would have not
2864 * cleared RX CMDQ event this time we would receive again another
2865 * interrupt from H/W just for the mailbox.
2867 * check for vector0 reset event sources
2869 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2870 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2871 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2872 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2873 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2874 hdev->rst_stats.imp_rst_cnt++;
2875 return HCLGE_VECTOR0_EVENT_RST;
2878 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2879 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2880 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2881 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2882 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2883 hdev->rst_stats.global_rst_cnt++;
2884 return HCLGE_VECTOR0_EVENT_RST;
2887 /* check for vector0 msix event source */
2888 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2889 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2891 *clearval = msix_src_reg;
2892 return HCLGE_VECTOR0_EVENT_ERR;
2895 /* check for vector0 mailbox(=CMDQ RX) event source */
2896 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2897 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2898 *clearval = cmdq_src_reg;
2899 return HCLGE_VECTOR0_EVENT_MBX;
2902 /* print other vector0 event source */
2903 dev_info(&hdev->pdev->dev,
2904 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2905 cmdq_src_reg, msix_src_reg);
2906 *clearval = msix_src_reg;
2908 return HCLGE_VECTOR0_EVENT_OTHER;
2911 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2914 switch (event_type) {
2915 case HCLGE_VECTOR0_EVENT_RST:
2916 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2918 case HCLGE_VECTOR0_EVENT_MBX:
2919 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2926 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2928 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2929 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2930 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2931 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2932 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2935 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2937 writel(enable ? 1 : 0, vector->addr);
2940 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2942 struct hclge_dev *hdev = data;
2946 hclge_enable_vector(&hdev->misc_vector, false);
2947 event_cause = hclge_check_event_cause(hdev, &clearval);
2949 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2950 switch (event_cause) {
2951 case HCLGE_VECTOR0_EVENT_ERR:
2952 /* we do not know what type of reset is required now. This could
2953 * only be decided after we fetch the type of errors which
2954 * caused this event. Therefore, we will do below for now:
2955 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2956 * have defered type of reset to be used.
2957 * 2. Schedule the reset serivce task.
2958 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2959 * will fetch the correct type of reset. This would be done
2960 * by first decoding the types of errors.
2962 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2964 case HCLGE_VECTOR0_EVENT_RST:
2965 hclge_reset_task_schedule(hdev);
2967 case HCLGE_VECTOR0_EVENT_MBX:
2968 /* If we are here then,
2969 * 1. Either we are not handling any mbx task and we are not
2972 * 2. We could be handling a mbx task but nothing more is
2974 * In both cases, we should schedule mbx task as there are more
2975 * mbx messages reported by this interrupt.
2977 hclge_mbx_task_schedule(hdev);
2980 dev_warn(&hdev->pdev->dev,
2981 "received unknown or unhandled event of vector0\n");
2985 hclge_clear_event_cause(hdev, event_cause, clearval);
2987 /* Enable interrupt if it is not cause by reset. And when
2988 * clearval equal to 0, it means interrupt status may be
2989 * cleared by hardware before driver reads status register.
2990 * For this case, vector0 interrupt also should be enabled.
2993 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2994 hclge_enable_vector(&hdev->misc_vector, true);
3000 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3002 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3003 dev_warn(&hdev->pdev->dev,
3004 "vector(vector_id %d) has been freed.\n", vector_id);
3008 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3009 hdev->num_msi_left += 1;
3010 hdev->num_msi_used -= 1;
3013 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3015 struct hclge_misc_vector *vector = &hdev->misc_vector;
3017 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3019 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3020 hdev->vector_status[0] = 0;
3022 hdev->num_msi_left -= 1;
3023 hdev->num_msi_used += 1;
3026 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3027 const cpumask_t *mask)
3029 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3032 cpumask_copy(&hdev->affinity_mask, mask);
3035 static void hclge_irq_affinity_release(struct kref *ref)
3039 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3041 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3042 &hdev->affinity_mask);
3044 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3045 hdev->affinity_notify.release = hclge_irq_affinity_release;
3046 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3047 &hdev->affinity_notify);
3050 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3052 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3053 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3056 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3060 hclge_get_misc_vector(hdev);
3062 /* this would be explicitly freed in the end */
3063 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3064 0, "hclge_misc", hdev);
3066 hclge_free_vector(hdev, 0);
3067 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3068 hdev->misc_vector.vector_irq);
3074 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3076 free_irq(hdev->misc_vector.vector_irq, hdev);
3077 hclge_free_vector(hdev, 0);
3080 int hclge_notify_client(struct hclge_dev *hdev,
3081 enum hnae3_reset_notify_type type)
3083 struct hnae3_client *client = hdev->nic_client;
3086 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3089 if (!client->ops->reset_notify)
3092 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3093 struct hnae3_handle *handle = &hdev->vport[i].nic;
3096 ret = client->ops->reset_notify(handle, type);
3098 dev_err(&hdev->pdev->dev,
3099 "notify nic client failed %d(%d)\n", type, ret);
3107 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3108 enum hnae3_reset_notify_type type)
3110 struct hnae3_client *client = hdev->roce_client;
3114 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3117 if (!client->ops->reset_notify)
3120 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3121 struct hnae3_handle *handle = &hdev->vport[i].roce;
3123 ret = client->ops->reset_notify(handle, type);
3125 dev_err(&hdev->pdev->dev,
3126 "notify roce client failed %d(%d)",
3135 static int hclge_reset_wait(struct hclge_dev *hdev)
3137 #define HCLGE_RESET_WATI_MS 100
3138 #define HCLGE_RESET_WAIT_CNT 200
3139 u32 val, reg, reg_bit;
3142 switch (hdev->reset_type) {
3143 case HNAE3_IMP_RESET:
3144 reg = HCLGE_GLOBAL_RESET_REG;
3145 reg_bit = HCLGE_IMP_RESET_BIT;
3147 case HNAE3_GLOBAL_RESET:
3148 reg = HCLGE_GLOBAL_RESET_REG;
3149 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3151 case HNAE3_FUNC_RESET:
3152 reg = HCLGE_FUN_RST_ING;
3153 reg_bit = HCLGE_FUN_RST_ING_B;
3155 case HNAE3_FLR_RESET:
3158 dev_err(&hdev->pdev->dev,
3159 "Wait for unsupported reset type: %d\n",
3164 if (hdev->reset_type == HNAE3_FLR_RESET) {
3165 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3166 cnt++ < HCLGE_RESET_WAIT_CNT)
3167 msleep(HCLGE_RESET_WATI_MS);
3169 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3170 dev_err(&hdev->pdev->dev,
3171 "flr wait timeout: %d\n", cnt);
3178 val = hclge_read_dev(&hdev->hw, reg);
3179 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3180 msleep(HCLGE_RESET_WATI_MS);
3181 val = hclge_read_dev(&hdev->hw, reg);
3185 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3186 dev_warn(&hdev->pdev->dev,
3187 "Wait for reset timeout: %d\n", hdev->reset_type);
3194 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3196 struct hclge_vf_rst_cmd *req;
3197 struct hclge_desc desc;
3199 req = (struct hclge_vf_rst_cmd *)desc.data;
3200 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3201 req->dest_vfid = func_id;
3206 return hclge_cmd_send(&hdev->hw, &desc, 1);
3209 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3213 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3214 struct hclge_vport *vport = &hdev->vport[i];
3217 /* Send cmd to set/clear VF's FUNC_RST_ING */
3218 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3220 dev_err(&hdev->pdev->dev,
3221 "set vf(%d) rst failed %d!\n",
3222 vport->vport_id, ret);
3226 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3229 /* Inform VF to process the reset.
3230 * hclge_inform_reset_assert_to_vf may fail if VF
3231 * driver is not loaded.
3233 ret = hclge_inform_reset_assert_to_vf(vport);
3235 dev_warn(&hdev->pdev->dev,
3236 "inform reset to vf(%d) failed %d!\n",
3237 vport->vport_id, ret);
3243 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3245 struct hclge_pf_rst_sync_cmd *req;
3246 struct hclge_desc desc;
3250 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3251 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3254 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3255 /* for compatible with old firmware, wait
3256 * 100 ms for VF to stop IO
3258 if (ret == -EOPNOTSUPP) {
3259 msleep(HCLGE_RESET_SYNC_TIME);
3262 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3265 } else if (req->all_vf_ready) {
3268 msleep(HCLGE_PF_RESET_SYNC_TIME);
3269 hclge_cmd_reuse_desc(&desc, true);
3270 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3272 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3276 void hclge_report_hw_error(struct hclge_dev *hdev,
3277 enum hnae3_hw_error_type type)
3279 struct hnae3_client *client = hdev->nic_client;
3282 if (!client || !client->ops->process_hw_error ||
3283 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3286 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3287 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3290 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3294 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3295 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3296 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3297 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3298 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3301 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3302 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3303 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3304 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3308 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3310 struct hclge_desc desc;
3311 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3314 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3315 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3316 req->fun_reset_vfid = func_id;
3318 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3320 dev_err(&hdev->pdev->dev,
3321 "send function reset cmd fail, status =%d\n", ret);
3326 static void hclge_do_reset(struct hclge_dev *hdev)
3328 struct hnae3_handle *handle = &hdev->vport[0].nic;
3329 struct pci_dev *pdev = hdev->pdev;
3332 if (hclge_get_hw_reset_stat(handle)) {
3333 dev_info(&pdev->dev, "Hardware reset not finish\n");
3334 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3335 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3336 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3340 switch (hdev->reset_type) {
3341 case HNAE3_GLOBAL_RESET:
3342 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3343 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3344 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3345 dev_info(&pdev->dev, "Global Reset requested\n");
3347 case HNAE3_FUNC_RESET:
3348 dev_info(&pdev->dev, "PF Reset requested\n");
3349 /* schedule again to check later */
3350 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3351 hclge_reset_task_schedule(hdev);
3353 case HNAE3_FLR_RESET:
3354 dev_info(&pdev->dev, "FLR requested\n");
3355 /* schedule again to check later */
3356 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3357 hclge_reset_task_schedule(hdev);
3360 dev_warn(&pdev->dev,
3361 "Unsupported reset type: %d\n", hdev->reset_type);
3366 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3367 unsigned long *addr)
3369 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3370 struct hclge_dev *hdev = ae_dev->priv;
3372 /* first, resolve any unknown reset type to the known type(s) */
3373 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3374 /* we will intentionally ignore any errors from this function
3375 * as we will end up in *some* reset request in any case
3377 hclge_handle_hw_msix_error(hdev, addr);
3378 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3379 /* We defered the clearing of the error event which caused
3380 * interrupt since it was not posssible to do that in
3381 * interrupt context (and this is the reason we introduced
3382 * new UNKNOWN reset type). Now, the errors have been
3383 * handled and cleared in hardware we can safely enable
3384 * interrupts. This is an exception to the norm.
3386 hclge_enable_vector(&hdev->misc_vector, true);
3389 /* return the highest priority reset level amongst all */
3390 if (test_bit(HNAE3_IMP_RESET, addr)) {
3391 rst_level = HNAE3_IMP_RESET;
3392 clear_bit(HNAE3_IMP_RESET, addr);
3393 clear_bit(HNAE3_GLOBAL_RESET, addr);
3394 clear_bit(HNAE3_FUNC_RESET, addr);
3395 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3396 rst_level = HNAE3_GLOBAL_RESET;
3397 clear_bit(HNAE3_GLOBAL_RESET, addr);
3398 clear_bit(HNAE3_FUNC_RESET, addr);
3399 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3400 rst_level = HNAE3_FUNC_RESET;
3401 clear_bit(HNAE3_FUNC_RESET, addr);
3402 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3403 rst_level = HNAE3_FLR_RESET;
3404 clear_bit(HNAE3_FLR_RESET, addr);
3407 if (hdev->reset_type != HNAE3_NONE_RESET &&
3408 rst_level < hdev->reset_type)
3409 return HNAE3_NONE_RESET;
3414 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3418 switch (hdev->reset_type) {
3419 case HNAE3_IMP_RESET:
3420 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3422 case HNAE3_GLOBAL_RESET:
3423 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3432 /* For revision 0x20, the reset interrupt source
3433 * can only be cleared after hardware reset done
3435 if (hdev->pdev->revision == 0x20)
3436 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3439 hclge_enable_vector(&hdev->misc_vector, true);
3442 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3446 switch (hdev->reset_type) {
3447 case HNAE3_FUNC_RESET:
3449 case HNAE3_FLR_RESET:
3450 ret = hclge_set_all_vf_rst(hdev, true);
3459 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3463 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3465 reg_val |= HCLGE_NIC_SW_RST_RDY;
3467 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3469 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3472 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3477 switch (hdev->reset_type) {
3478 case HNAE3_FUNC_RESET:
3479 /* to confirm whether all running VF is ready
3480 * before request PF reset
3482 ret = hclge_func_reset_sync_vf(hdev);
3486 ret = hclge_func_reset_cmd(hdev, 0);
3488 dev_err(&hdev->pdev->dev,
3489 "asserting function reset fail %d!\n", ret);
3493 /* After performaning pf reset, it is not necessary to do the
3494 * mailbox handling or send any command to firmware, because
3495 * any mailbox handling or command to firmware is only valid
3496 * after hclge_cmd_init is called.
3498 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3499 hdev->rst_stats.pf_rst_cnt++;
3501 case HNAE3_FLR_RESET:
3502 /* to confirm whether all running VF is ready
3503 * before request PF reset
3505 ret = hclge_func_reset_sync_vf(hdev);
3509 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3510 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3511 hdev->rst_stats.flr_rst_cnt++;
3513 case HNAE3_IMP_RESET:
3514 hclge_handle_imp_error(hdev);
3515 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3516 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3517 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3523 /* inform hardware that preparatory work is done */
3524 msleep(HCLGE_RESET_SYNC_TIME);
3525 hclge_reset_handshake(hdev, true);
3526 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3531 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3533 #define MAX_RESET_FAIL_CNT 5
3535 if (hdev->reset_pending) {
3536 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3537 hdev->reset_pending);
3539 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3540 HCLGE_RESET_INT_M) {
3541 dev_info(&hdev->pdev->dev,
3542 "reset failed because new reset interrupt\n");
3543 hclge_clear_reset_cause(hdev);
3545 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3546 hdev->reset_fail_cnt++;
3547 set_bit(hdev->reset_type, &hdev->reset_pending);
3548 dev_info(&hdev->pdev->dev,
3549 "re-schedule reset task(%d)\n",
3550 hdev->reset_fail_cnt);
3554 hclge_clear_reset_cause(hdev);
3556 /* recover the handshake status when reset fail */
3557 hclge_reset_handshake(hdev, true);
3559 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3563 static int hclge_set_rst_done(struct hclge_dev *hdev)
3565 struct hclge_pf_rst_done_cmd *req;
3566 struct hclge_desc desc;
3568 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3569 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3570 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3572 return hclge_cmd_send(&hdev->hw, &desc, 1);
3575 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3579 switch (hdev->reset_type) {
3580 case HNAE3_FUNC_RESET:
3582 case HNAE3_FLR_RESET:
3583 ret = hclge_set_all_vf_rst(hdev, false);
3585 case HNAE3_GLOBAL_RESET:
3587 case HNAE3_IMP_RESET:
3588 ret = hclge_set_rst_done(hdev);
3594 /* clear up the handshake status after re-initialize done */
3595 hclge_reset_handshake(hdev, false);
3600 static int hclge_reset_stack(struct hclge_dev *hdev)
3604 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3608 ret = hclge_reset_ae_dev(hdev->ae_dev);
3612 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3616 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3619 static void hclge_reset(struct hclge_dev *hdev)
3621 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3624 /* Initialize ae_dev reset status as well, in case enet layer wants to
3625 * know if device is undergoing reset
3627 ae_dev->reset_type = hdev->reset_type;
3628 hdev->rst_stats.reset_cnt++;
3629 /* perform reset of the stack & ae device for a client */
3630 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3634 ret = hclge_reset_prepare_down(hdev);
3639 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3641 goto err_reset_lock;
3645 ret = hclge_reset_prepare_wait(hdev);
3649 if (hclge_reset_wait(hdev))
3652 hdev->rst_stats.hw_reset_done_cnt++;
3654 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3660 ret = hclge_reset_stack(hdev);
3662 goto err_reset_lock;
3664 hclge_clear_reset_cause(hdev);
3666 ret = hclge_reset_prepare_up(hdev);
3668 goto err_reset_lock;
3672 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3673 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3676 if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3681 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3683 goto err_reset_lock;
3687 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3691 hdev->last_reset_time = jiffies;
3692 hdev->reset_fail_cnt = 0;
3693 hdev->rst_stats.reset_done_cnt++;
3694 ae_dev->reset_type = HNAE3_NONE_RESET;
3696 /* if default_reset_request has a higher level reset request,
3697 * it should be handled as soon as possible. since some errors
3698 * need this kind of reset to fix.
3700 hdev->reset_level = hclge_get_reset_level(ae_dev,
3701 &hdev->default_reset_request);
3702 if (hdev->reset_level != HNAE3_NONE_RESET)
3703 set_bit(hdev->reset_level, &hdev->reset_request);
3710 if (hclge_reset_err_handle(hdev))
3711 hclge_reset_task_schedule(hdev);
3714 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3716 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3717 struct hclge_dev *hdev = ae_dev->priv;
3719 /* We might end up getting called broadly because of 2 below cases:
3720 * 1. Recoverable error was conveyed through APEI and only way to bring
3721 * normalcy is to reset.
3722 * 2. A new reset request from the stack due to timeout
3724 * For the first case,error event might not have ae handle available.
3725 * check if this is a new reset request and we are not here just because
3726 * last reset attempt did not succeed and watchdog hit us again. We will
3727 * know this if last reset request did not occur very recently (watchdog
3728 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3729 * In case of new request we reset the "reset level" to PF reset.
3730 * And if it is a repeat reset request of the most recent one then we
3731 * want to make sure we throttle the reset request. Therefore, we will
3732 * not allow it again before 3*HZ times.
3735 handle = &hdev->vport[0].nic;
3737 if (time_before(jiffies, (hdev->last_reset_time +
3738 HCLGE_RESET_INTERVAL))) {
3739 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3741 } else if (hdev->default_reset_request)
3743 hclge_get_reset_level(ae_dev,
3744 &hdev->default_reset_request);
3745 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3746 hdev->reset_level = HNAE3_FUNC_RESET;
3748 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3751 /* request reset & schedule reset task */
3752 set_bit(hdev->reset_level, &hdev->reset_request);
3753 hclge_reset_task_schedule(hdev);
3755 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3756 hdev->reset_level++;
3759 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3760 enum hnae3_reset_type rst_type)
3762 struct hclge_dev *hdev = ae_dev->priv;
3764 set_bit(rst_type, &hdev->default_reset_request);
3767 static void hclge_reset_timer(struct timer_list *t)
3769 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3771 /* if default_reset_request has no value, it means that this reset
3772 * request has already be handled, so just return here
3774 if (!hdev->default_reset_request)
3777 dev_info(&hdev->pdev->dev,
3778 "triggering reset in reset timer\n");
3779 hclge_reset_event(hdev->pdev, NULL);
3782 static void hclge_reset_subtask(struct hclge_dev *hdev)
3784 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3786 /* check if there is any ongoing reset in the hardware. This status can
3787 * be checked from reset_pending. If there is then, we need to wait for
3788 * hardware to complete reset.
3789 * a. If we are able to figure out in reasonable time that hardware
3790 * has fully resetted then, we can proceed with driver, client
3792 * b. else, we can come back later to check this status so re-sched
3795 hdev->last_reset_time = jiffies;
3796 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3797 if (hdev->reset_type != HNAE3_NONE_RESET)
3800 /* check if we got any *new* reset requests to be honored */
3801 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3802 if (hdev->reset_type != HNAE3_NONE_RESET)
3803 hclge_do_reset(hdev);
3805 hdev->reset_type = HNAE3_NONE_RESET;
3808 static void hclge_reset_service_task(struct work_struct *work)
3810 struct hclge_dev *hdev =
3811 container_of(work, struct hclge_dev, rst_service_task);
3813 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3816 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3818 hclge_reset_subtask(hdev);
3820 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3823 static void hclge_mailbox_service_task(struct work_struct *work)
3825 struct hclge_dev *hdev =
3826 container_of(work, struct hclge_dev, mbx_service_task);
3828 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3831 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3833 hclge_mbx_handler(hdev);
3835 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3838 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3842 /* start from vport 1 for PF is always alive */
3843 for (i = 1; i < hdev->num_alloc_vport; i++) {
3844 struct hclge_vport *vport = &hdev->vport[i];
3846 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3847 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3849 /* If vf is not alive, set to default value */
3850 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3851 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3855 static void hclge_service_task(struct work_struct *work)
3857 struct hclge_dev *hdev =
3858 container_of(work, struct hclge_dev, service_task.work);
3860 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3862 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3863 hclge_update_stats_for_all(hdev);
3864 hdev->hw_stats.stats_timer = 0;
3867 hclge_update_port_info(hdev);
3868 hclge_update_link_status(hdev);
3869 hclge_update_vport_alive(hdev);
3870 hclge_sync_vlan_filter(hdev);
3871 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3872 hclge_rfs_filter_expire(hdev);
3873 hdev->fd_arfs_expire_timer = 0;
3876 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3879 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3881 /* VF handle has no client */
3882 if (!handle->client)
3883 return container_of(handle, struct hclge_vport, nic);
3884 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3885 return container_of(handle, struct hclge_vport, roce);
3887 return container_of(handle, struct hclge_vport, nic);
3890 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3891 struct hnae3_vector_info *vector_info)
3893 struct hclge_vport *vport = hclge_get_vport(handle);
3894 struct hnae3_vector_info *vector = vector_info;
3895 struct hclge_dev *hdev = vport->back;
3899 vector_num = min(hdev->num_msi_left, vector_num);
3901 for (j = 0; j < vector_num; j++) {
3902 for (i = 1; i < hdev->num_msi; i++) {
3903 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3904 vector->vector = pci_irq_vector(hdev->pdev, i);
3905 vector->io_addr = hdev->hw.io_base +
3906 HCLGE_VECTOR_REG_BASE +
3907 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3909 HCLGE_VECTOR_VF_OFFSET;
3910 hdev->vector_status[i] = vport->vport_id;
3911 hdev->vector_irq[i] = vector->vector;
3920 hdev->num_msi_left -= alloc;
3921 hdev->num_msi_used += alloc;
3926 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3930 for (i = 0; i < hdev->num_msi; i++)
3931 if (vector == hdev->vector_irq[i])
3937 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3939 struct hclge_vport *vport = hclge_get_vport(handle);
3940 struct hclge_dev *hdev = vport->back;
3943 vector_id = hclge_get_vector_index(hdev, vector);
3944 if (vector_id < 0) {
3945 dev_err(&hdev->pdev->dev,
3946 "Get vector index fail. vector_id =%d\n", vector_id);
3950 hclge_free_vector(hdev, vector_id);
3955 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3957 return HCLGE_RSS_KEY_SIZE;
3960 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3962 return HCLGE_RSS_IND_TBL_SIZE;
3965 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3966 const u8 hfunc, const u8 *key)
3968 struct hclge_rss_config_cmd *req;
3969 unsigned int key_offset = 0;
3970 struct hclge_desc desc;
3975 key_counts = HCLGE_RSS_KEY_SIZE;
3976 req = (struct hclge_rss_config_cmd *)desc.data;
3978 while (key_counts) {
3979 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3982 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3983 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3985 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3986 memcpy(req->hash_key,
3987 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3989 key_counts -= key_size;
3991 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3993 dev_err(&hdev->pdev->dev,
3994 "Configure RSS config fail, status = %d\n",
4002 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4004 struct hclge_rss_indirection_table_cmd *req;
4005 struct hclge_desc desc;
4009 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4011 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4012 hclge_cmd_setup_basic_desc
4013 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4015 req->start_table_index =
4016 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4017 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4019 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4020 req->rss_result[j] =
4021 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4023 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4025 dev_err(&hdev->pdev->dev,
4026 "Configure rss indir table fail,status = %d\n",
4034 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4035 u16 *tc_size, u16 *tc_offset)
4037 struct hclge_rss_tc_mode_cmd *req;
4038 struct hclge_desc desc;
4042 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4043 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4045 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4048 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4049 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4050 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4051 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4052 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4054 req->rss_tc_mode[i] = cpu_to_le16(mode);
4057 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4059 dev_err(&hdev->pdev->dev,
4060 "Configure rss tc mode fail, status = %d\n", ret);
4065 static void hclge_get_rss_type(struct hclge_vport *vport)
4067 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4068 vport->rss_tuple_sets.ipv4_udp_en ||
4069 vport->rss_tuple_sets.ipv4_sctp_en ||
4070 vport->rss_tuple_sets.ipv6_tcp_en ||
4071 vport->rss_tuple_sets.ipv6_udp_en ||
4072 vport->rss_tuple_sets.ipv6_sctp_en)
4073 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4074 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4075 vport->rss_tuple_sets.ipv6_fragment_en)
4076 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4078 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4081 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4083 struct hclge_rss_input_tuple_cmd *req;
4084 struct hclge_desc desc;
4087 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4089 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4091 /* Get the tuple cfg from pf */
4092 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4093 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4094 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4095 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4096 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4097 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4098 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4099 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4100 hclge_get_rss_type(&hdev->vport[0]);
4101 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4103 dev_err(&hdev->pdev->dev,
4104 "Configure rss input fail, status = %d\n", ret);
4108 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4111 struct hclge_vport *vport = hclge_get_vport(handle);
4114 /* Get hash algorithm */
4116 switch (vport->rss_algo) {
4117 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4118 *hfunc = ETH_RSS_HASH_TOP;
4120 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4121 *hfunc = ETH_RSS_HASH_XOR;
4124 *hfunc = ETH_RSS_HASH_UNKNOWN;
4129 /* Get the RSS Key required by the user */
4131 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4133 /* Get indirect table */
4135 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4136 indir[i] = vport->rss_indirection_tbl[i];
4141 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4142 const u8 *key, const u8 hfunc)
4144 struct hclge_vport *vport = hclge_get_vport(handle);
4145 struct hclge_dev *hdev = vport->back;
4149 /* Set the RSS Hash Key if specififed by the user */
4152 case ETH_RSS_HASH_TOP:
4153 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4155 case ETH_RSS_HASH_XOR:
4156 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4158 case ETH_RSS_HASH_NO_CHANGE:
4159 hash_algo = vport->rss_algo;
4165 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4169 /* Update the shadow RSS key with user specified qids */
4170 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4171 vport->rss_algo = hash_algo;
4174 /* Update the shadow RSS table with user specified qids */
4175 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4176 vport->rss_indirection_tbl[i] = indir[i];
4178 /* Update the hardware */
4179 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4182 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4184 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4186 if (nfc->data & RXH_L4_B_2_3)
4187 hash_sets |= HCLGE_D_PORT_BIT;
4189 hash_sets &= ~HCLGE_D_PORT_BIT;
4191 if (nfc->data & RXH_IP_SRC)
4192 hash_sets |= HCLGE_S_IP_BIT;
4194 hash_sets &= ~HCLGE_S_IP_BIT;
4196 if (nfc->data & RXH_IP_DST)
4197 hash_sets |= HCLGE_D_IP_BIT;
4199 hash_sets &= ~HCLGE_D_IP_BIT;
4201 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4202 hash_sets |= HCLGE_V_TAG_BIT;
4207 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4208 struct ethtool_rxnfc *nfc)
4210 struct hclge_vport *vport = hclge_get_vport(handle);
4211 struct hclge_dev *hdev = vport->back;
4212 struct hclge_rss_input_tuple_cmd *req;
4213 struct hclge_desc desc;
4217 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4218 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4221 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4222 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4224 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4225 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4226 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4227 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4228 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4229 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4230 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4231 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4233 tuple_sets = hclge_get_rss_hash_bits(nfc);
4234 switch (nfc->flow_type) {
4236 req->ipv4_tcp_en = tuple_sets;
4239 req->ipv6_tcp_en = tuple_sets;
4242 req->ipv4_udp_en = tuple_sets;
4245 req->ipv6_udp_en = tuple_sets;
4248 req->ipv4_sctp_en = tuple_sets;
4251 if ((nfc->data & RXH_L4_B_0_1) ||
4252 (nfc->data & RXH_L4_B_2_3))
4255 req->ipv6_sctp_en = tuple_sets;
4258 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4261 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4267 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4269 dev_err(&hdev->pdev->dev,
4270 "Set rss tuple fail, status = %d\n", ret);
4274 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4275 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4276 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4277 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4278 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4279 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4280 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4281 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4282 hclge_get_rss_type(vport);
4286 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4287 struct ethtool_rxnfc *nfc)
4289 struct hclge_vport *vport = hclge_get_vport(handle);
4294 switch (nfc->flow_type) {
4296 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4299 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4302 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4305 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4308 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4311 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4315 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4324 if (tuple_sets & HCLGE_D_PORT_BIT)
4325 nfc->data |= RXH_L4_B_2_3;
4326 if (tuple_sets & HCLGE_S_PORT_BIT)
4327 nfc->data |= RXH_L4_B_0_1;
4328 if (tuple_sets & HCLGE_D_IP_BIT)
4329 nfc->data |= RXH_IP_DST;
4330 if (tuple_sets & HCLGE_S_IP_BIT)
4331 nfc->data |= RXH_IP_SRC;
4336 static int hclge_get_tc_size(struct hnae3_handle *handle)
4338 struct hclge_vport *vport = hclge_get_vport(handle);
4339 struct hclge_dev *hdev = vport->back;
4341 return hdev->rss_size_max;
4344 int hclge_rss_init_hw(struct hclge_dev *hdev)
4346 struct hclge_vport *vport = hdev->vport;
4347 u8 *rss_indir = vport[0].rss_indirection_tbl;
4348 u16 rss_size = vport[0].alloc_rss_size;
4349 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4350 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4351 u8 *key = vport[0].rss_hash_key;
4352 u8 hfunc = vport[0].rss_algo;
4353 u16 tc_valid[HCLGE_MAX_TC_NUM];
4358 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4362 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4366 ret = hclge_set_rss_input_tuple(hdev);
4370 /* Each TC have the same queue size, and tc_size set to hardware is
4371 * the log2 of roundup power of two of rss_size, the acutal queue
4372 * size is limited by indirection table.
4374 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4375 dev_err(&hdev->pdev->dev,
4376 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4381 roundup_size = roundup_pow_of_two(rss_size);
4382 roundup_size = ilog2(roundup_size);
4384 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4387 if (!(hdev->hw_tc_map & BIT(i)))
4391 tc_size[i] = roundup_size;
4392 tc_offset[i] = rss_size * i;
4395 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4398 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4400 struct hclge_vport *vport = hdev->vport;
4403 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4404 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4405 vport[j].rss_indirection_tbl[i] =
4406 i % vport[j].alloc_rss_size;
4410 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4412 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4413 struct hclge_vport *vport = hdev->vport;
4415 if (hdev->pdev->revision >= 0x21)
4416 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4418 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4419 vport[i].rss_tuple_sets.ipv4_tcp_en =
4420 HCLGE_RSS_INPUT_TUPLE_OTHER;
4421 vport[i].rss_tuple_sets.ipv4_udp_en =
4422 HCLGE_RSS_INPUT_TUPLE_OTHER;
4423 vport[i].rss_tuple_sets.ipv4_sctp_en =
4424 HCLGE_RSS_INPUT_TUPLE_SCTP;
4425 vport[i].rss_tuple_sets.ipv4_fragment_en =
4426 HCLGE_RSS_INPUT_TUPLE_OTHER;
4427 vport[i].rss_tuple_sets.ipv6_tcp_en =
4428 HCLGE_RSS_INPUT_TUPLE_OTHER;
4429 vport[i].rss_tuple_sets.ipv6_udp_en =
4430 HCLGE_RSS_INPUT_TUPLE_OTHER;
4431 vport[i].rss_tuple_sets.ipv6_sctp_en =
4432 HCLGE_RSS_INPUT_TUPLE_SCTP;
4433 vport[i].rss_tuple_sets.ipv6_fragment_en =
4434 HCLGE_RSS_INPUT_TUPLE_OTHER;
4436 vport[i].rss_algo = rss_algo;
4438 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4439 HCLGE_RSS_KEY_SIZE);
4442 hclge_rss_indir_init_cfg(hdev);
4445 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4446 int vector_id, bool en,
4447 struct hnae3_ring_chain_node *ring_chain)
4449 struct hclge_dev *hdev = vport->back;
4450 struct hnae3_ring_chain_node *node;
4451 struct hclge_desc desc;
4452 struct hclge_ctrl_vector_chain_cmd *req =
4453 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4454 enum hclge_cmd_status status;
4455 enum hclge_opcode_type op;
4456 u16 tqp_type_and_id;
4459 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4460 hclge_cmd_setup_basic_desc(&desc, op, false);
4461 req->int_vector_id = vector_id;
4464 for (node = ring_chain; node; node = node->next) {
4465 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4466 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4468 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4469 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4470 HCLGE_TQP_ID_S, node->tqp_index);
4471 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4473 hnae3_get_field(node->int_gl_idx,
4474 HNAE3_RING_GL_IDX_M,
4475 HNAE3_RING_GL_IDX_S));
4476 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4477 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4478 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4479 req->vfid = vport->vport_id;
4481 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4483 dev_err(&hdev->pdev->dev,
4484 "Map TQP fail, status is %d.\n",
4490 hclge_cmd_setup_basic_desc(&desc,
4493 req->int_vector_id = vector_id;
4498 req->int_cause_num = i;
4499 req->vfid = vport->vport_id;
4500 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4502 dev_err(&hdev->pdev->dev,
4503 "Map TQP fail, status is %d.\n", status);
4511 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4512 struct hnae3_ring_chain_node *ring_chain)
4514 struct hclge_vport *vport = hclge_get_vport(handle);
4515 struct hclge_dev *hdev = vport->back;
4518 vector_id = hclge_get_vector_index(hdev, vector);
4519 if (vector_id < 0) {
4520 dev_err(&hdev->pdev->dev,
4521 "Get vector index fail. vector_id =%d\n", vector_id);
4525 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4528 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4529 struct hnae3_ring_chain_node *ring_chain)
4531 struct hclge_vport *vport = hclge_get_vport(handle);
4532 struct hclge_dev *hdev = vport->back;
4535 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4538 vector_id = hclge_get_vector_index(hdev, vector);
4539 if (vector_id < 0) {
4540 dev_err(&handle->pdev->dev,
4541 "Get vector index fail. ret =%d\n", vector_id);
4545 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4547 dev_err(&handle->pdev->dev,
4548 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4554 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4555 struct hclge_promisc_param *param)
4557 struct hclge_promisc_cfg_cmd *req;
4558 struct hclge_desc desc;
4561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4563 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4564 req->vf_id = param->vf_id;
4566 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4567 * pdev revision(0x20), new revision support them. The
4568 * value of this two fields will not return error when driver
4569 * send command to fireware in revision(0x20).
4571 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4572 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4574 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4576 dev_err(&hdev->pdev->dev,
4577 "Set promisc mode fail, status is %d.\n", ret);
4582 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4583 bool en_mc, bool en_bc, int vport_id)
4588 memset(param, 0, sizeof(struct hclge_promisc_param));
4590 param->enable = HCLGE_PROMISC_EN_UC;
4592 param->enable |= HCLGE_PROMISC_EN_MC;
4594 param->enable |= HCLGE_PROMISC_EN_BC;
4595 param->vf_id = vport_id;
4598 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4601 struct hclge_vport *vport = hclge_get_vport(handle);
4602 struct hclge_dev *hdev = vport->back;
4603 struct hclge_promisc_param param;
4604 bool en_bc_pmc = true;
4606 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4607 * always bypassed. So broadcast promisc should be disabled until
4608 * user enable promisc mode
4610 if (handle->pdev->revision == 0x20)
4611 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4613 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4615 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4618 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4620 struct hclge_get_fd_mode_cmd *req;
4621 struct hclge_desc desc;
4624 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4626 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4630 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4634 *fd_mode = req->mode;
4639 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4640 u32 *stage1_entry_num,
4641 u32 *stage2_entry_num,
4642 u16 *stage1_counter_num,
4643 u16 *stage2_counter_num)
4645 struct hclge_get_fd_allocation_cmd *req;
4646 struct hclge_desc desc;
4649 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4651 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4653 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4655 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4660 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4661 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4662 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4663 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4668 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4670 struct hclge_set_fd_key_config_cmd *req;
4671 struct hclge_fd_key_cfg *stage;
4672 struct hclge_desc desc;
4675 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4677 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4678 stage = &hdev->fd_cfg.key_cfg[stage_num];
4679 req->stage = stage_num;
4680 req->key_select = stage->key_sel;
4681 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4682 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4683 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4684 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4685 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4686 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4690 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4695 static int hclge_init_fd_config(struct hclge_dev *hdev)
4697 #define LOW_2_WORDS 0x03
4698 struct hclge_fd_key_cfg *key_cfg;
4701 if (!hnae3_dev_fd_supported(hdev))
4704 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4708 switch (hdev->fd_cfg.fd_mode) {
4709 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4710 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4712 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4713 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4716 dev_err(&hdev->pdev->dev,
4717 "Unsupported flow director mode %d\n",
4718 hdev->fd_cfg.fd_mode);
4722 hdev->fd_cfg.proto_support =
4723 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4724 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4725 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4726 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4727 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4728 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4729 key_cfg->outer_sipv6_word_en = 0;
4730 key_cfg->outer_dipv6_word_en = 0;
4732 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4733 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4734 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4735 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4737 /* If use max 400bit key, we can support tuples for ether type */
4738 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4739 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4740 key_cfg->tuple_active |=
4741 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4744 /* roce_type is used to filter roce frames
4745 * dst_vport is used to specify the rule
4747 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4749 ret = hclge_get_fd_allocation(hdev,
4750 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4751 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4752 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4753 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4757 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4760 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4761 int loc, u8 *key, bool is_add)
4763 struct hclge_fd_tcam_config_1_cmd *req1;
4764 struct hclge_fd_tcam_config_2_cmd *req2;
4765 struct hclge_fd_tcam_config_3_cmd *req3;
4766 struct hclge_desc desc[3];
4769 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4770 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4771 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4772 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4773 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4775 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4776 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4777 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4779 req1->stage = stage;
4780 req1->xy_sel = sel_x ? 1 : 0;
4781 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4782 req1->index = cpu_to_le32(loc);
4783 req1->entry_vld = sel_x ? is_add : 0;
4786 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4787 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4788 sizeof(req2->tcam_data));
4789 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4790 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4793 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4795 dev_err(&hdev->pdev->dev,
4796 "config tcam key fail, ret=%d\n",
4802 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4803 struct hclge_fd_ad_data *action)
4805 struct hclge_fd_ad_config_cmd *req;
4806 struct hclge_desc desc;
4810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4812 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4813 req->index = cpu_to_le32(loc);
4816 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4817 action->write_rule_id_to_bd);
4818 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4821 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4822 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4823 action->forward_to_direct_queue);
4824 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4826 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4827 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4828 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4829 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4830 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4831 action->counter_id);
4833 req->ad_data = cpu_to_le64(ad_data);
4834 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4836 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4841 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4842 struct hclge_fd_rule *rule)
4844 u16 tmp_x_s, tmp_y_s;
4845 u32 tmp_x_l, tmp_y_l;
4848 if (rule->unused_tuple & tuple_bit)
4851 switch (tuple_bit) {
4854 case BIT(INNER_DST_MAC):
4855 for (i = 0; i < ETH_ALEN; i++) {
4856 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4857 rule->tuples_mask.dst_mac[i]);
4858 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4859 rule->tuples_mask.dst_mac[i]);
4863 case BIT(INNER_SRC_MAC):
4864 for (i = 0; i < ETH_ALEN; i++) {
4865 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4866 rule->tuples.src_mac[i]);
4867 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4868 rule->tuples.src_mac[i]);
4872 case BIT(INNER_VLAN_TAG_FST):
4873 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4874 rule->tuples_mask.vlan_tag1);
4875 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4876 rule->tuples_mask.vlan_tag1);
4877 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4878 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4881 case BIT(INNER_ETH_TYPE):
4882 calc_x(tmp_x_s, rule->tuples.ether_proto,
4883 rule->tuples_mask.ether_proto);
4884 calc_y(tmp_y_s, rule->tuples.ether_proto,
4885 rule->tuples_mask.ether_proto);
4886 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4887 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4890 case BIT(INNER_IP_TOS):
4891 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4892 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4895 case BIT(INNER_IP_PROTO):
4896 calc_x(*key_x, rule->tuples.ip_proto,
4897 rule->tuples_mask.ip_proto);
4898 calc_y(*key_y, rule->tuples.ip_proto,
4899 rule->tuples_mask.ip_proto);
4902 case BIT(INNER_SRC_IP):
4903 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4904 rule->tuples_mask.src_ip[IPV4_INDEX]);
4905 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4906 rule->tuples_mask.src_ip[IPV4_INDEX]);
4907 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4908 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4911 case BIT(INNER_DST_IP):
4912 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4913 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4914 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4915 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4916 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4917 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4920 case BIT(INNER_SRC_PORT):
4921 calc_x(tmp_x_s, rule->tuples.src_port,
4922 rule->tuples_mask.src_port);
4923 calc_y(tmp_y_s, rule->tuples.src_port,
4924 rule->tuples_mask.src_port);
4925 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4926 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4929 case BIT(INNER_DST_PORT):
4930 calc_x(tmp_x_s, rule->tuples.dst_port,
4931 rule->tuples_mask.dst_port);
4932 calc_y(tmp_y_s, rule->tuples.dst_port,
4933 rule->tuples_mask.dst_port);
4934 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4935 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4943 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4944 u8 vf_id, u8 network_port_id)
4946 u32 port_number = 0;
4948 if (port_type == HOST_PORT) {
4949 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4951 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4953 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4955 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4956 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4957 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4963 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4964 __le32 *key_x, __le32 *key_y,
4965 struct hclge_fd_rule *rule)
4967 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4968 u8 cur_pos = 0, tuple_size, shift_bits;
4971 for (i = 0; i < MAX_META_DATA; i++) {
4972 tuple_size = meta_data_key_info[i].key_length;
4973 tuple_bit = key_cfg->meta_data_active & BIT(i);
4975 switch (tuple_bit) {
4976 case BIT(ROCE_TYPE):
4977 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4978 cur_pos += tuple_size;
4980 case BIT(DST_VPORT):
4981 port_number = hclge_get_port_number(HOST_PORT, 0,
4983 hnae3_set_field(meta_data,
4984 GENMASK(cur_pos + tuple_size, cur_pos),
4985 cur_pos, port_number);
4986 cur_pos += tuple_size;
4993 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4994 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4995 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4997 *key_x = cpu_to_le32(tmp_x << shift_bits);
4998 *key_y = cpu_to_le32(tmp_y << shift_bits);
5001 /* A complete key is combined with meta data key and tuple key.
5002 * Meta data key is stored at the MSB region, and tuple key is stored at
5003 * the LSB region, unused bits will be filled 0.
5005 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5006 struct hclge_fd_rule *rule)
5008 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5009 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5010 u8 *cur_key_x, *cur_key_y;
5012 int ret, tuple_size;
5013 u8 meta_data_region;
5015 memset(key_x, 0, sizeof(key_x));
5016 memset(key_y, 0, sizeof(key_y));
5020 for (i = 0 ; i < MAX_TUPLE; i++) {
5024 tuple_size = tuple_key_info[i].key_length / 8;
5025 check_tuple = key_cfg->tuple_active & BIT(i);
5027 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5030 cur_key_x += tuple_size;
5031 cur_key_y += tuple_size;
5035 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5036 MAX_META_DATA_LENGTH / 8;
5038 hclge_fd_convert_meta_data(key_cfg,
5039 (__le32 *)(key_x + meta_data_region),
5040 (__le32 *)(key_y + meta_data_region),
5043 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5046 dev_err(&hdev->pdev->dev,
5047 "fd key_y config fail, loc=%d, ret=%d\n",
5048 rule->queue_id, ret);
5052 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5055 dev_err(&hdev->pdev->dev,
5056 "fd key_x config fail, loc=%d, ret=%d\n",
5057 rule->queue_id, ret);
5061 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5062 struct hclge_fd_rule *rule)
5064 struct hclge_fd_ad_data ad_data;
5066 ad_data.ad_id = rule->location;
5068 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5069 ad_data.drop_packet = true;
5070 ad_data.forward_to_direct_queue = false;
5071 ad_data.queue_id = 0;
5073 ad_data.drop_packet = false;
5074 ad_data.forward_to_direct_queue = true;
5075 ad_data.queue_id = rule->queue_id;
5078 ad_data.use_counter = false;
5079 ad_data.counter_id = 0;
5081 ad_data.use_next_stage = false;
5082 ad_data.next_input_key = 0;
5084 ad_data.write_rule_id_to_bd = true;
5085 ad_data.rule_id = rule->location;
5087 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5090 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5091 struct ethtool_rx_flow_spec *fs, u32 *unused)
5093 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5094 struct ethtool_usrip4_spec *usr_ip4_spec;
5095 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5096 struct ethtool_usrip6_spec *usr_ip6_spec;
5097 struct ethhdr *ether_spec;
5099 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5102 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5105 if ((fs->flow_type & FLOW_EXT) &&
5106 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5107 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5111 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5115 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5116 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5118 if (!tcp_ip4_spec->ip4src)
5119 *unused |= BIT(INNER_SRC_IP);
5121 if (!tcp_ip4_spec->ip4dst)
5122 *unused |= BIT(INNER_DST_IP);
5124 if (!tcp_ip4_spec->psrc)
5125 *unused |= BIT(INNER_SRC_PORT);
5127 if (!tcp_ip4_spec->pdst)
5128 *unused |= BIT(INNER_DST_PORT);
5130 if (!tcp_ip4_spec->tos)
5131 *unused |= BIT(INNER_IP_TOS);
5135 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5136 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5137 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5139 if (!usr_ip4_spec->ip4src)
5140 *unused |= BIT(INNER_SRC_IP);
5142 if (!usr_ip4_spec->ip4dst)
5143 *unused |= BIT(INNER_DST_IP);
5145 if (!usr_ip4_spec->tos)
5146 *unused |= BIT(INNER_IP_TOS);
5148 if (!usr_ip4_spec->proto)
5149 *unused |= BIT(INNER_IP_PROTO);
5151 if (usr_ip4_spec->l4_4_bytes)
5154 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5161 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5162 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5165 /* check whether src/dst ip address used */
5166 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5167 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5168 *unused |= BIT(INNER_SRC_IP);
5170 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5171 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5172 *unused |= BIT(INNER_DST_IP);
5174 if (!tcp_ip6_spec->psrc)
5175 *unused |= BIT(INNER_SRC_PORT);
5177 if (!tcp_ip6_spec->pdst)
5178 *unused |= BIT(INNER_DST_PORT);
5180 if (tcp_ip6_spec->tclass)
5184 case IPV6_USER_FLOW:
5185 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5186 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5187 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5188 BIT(INNER_DST_PORT);
5190 /* check whether src/dst ip address used */
5191 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5192 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5193 *unused |= BIT(INNER_SRC_IP);
5195 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5196 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5197 *unused |= BIT(INNER_DST_IP);
5199 if (!usr_ip6_spec->l4_proto)
5200 *unused |= BIT(INNER_IP_PROTO);
5202 if (usr_ip6_spec->tclass)
5205 if (usr_ip6_spec->l4_4_bytes)
5210 ether_spec = &fs->h_u.ether_spec;
5211 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5212 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5213 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5215 if (is_zero_ether_addr(ether_spec->h_source))
5216 *unused |= BIT(INNER_SRC_MAC);
5218 if (is_zero_ether_addr(ether_spec->h_dest))
5219 *unused |= BIT(INNER_DST_MAC);
5221 if (!ether_spec->h_proto)
5222 *unused |= BIT(INNER_ETH_TYPE);
5229 if ((fs->flow_type & FLOW_EXT)) {
5230 if (fs->h_ext.vlan_etype)
5232 if (!fs->h_ext.vlan_tci)
5233 *unused |= BIT(INNER_VLAN_TAG_FST);
5235 if (fs->m_ext.vlan_tci) {
5236 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5240 *unused |= BIT(INNER_VLAN_TAG_FST);
5243 if (fs->flow_type & FLOW_MAC_EXT) {
5244 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5247 if (is_zero_ether_addr(fs->h_ext.h_dest))
5248 *unused |= BIT(INNER_DST_MAC);
5250 *unused &= ~(BIT(INNER_DST_MAC));
5256 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5258 struct hclge_fd_rule *rule = NULL;
5259 struct hlist_node *node2;
5261 spin_lock_bh(&hdev->fd_rule_lock);
5262 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5263 if (rule->location >= location)
5267 spin_unlock_bh(&hdev->fd_rule_lock);
5269 return rule && rule->location == location;
5272 /* make sure being called after lock up with fd_rule_lock */
5273 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5274 struct hclge_fd_rule *new_rule,
5278 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5279 struct hlist_node *node2;
5281 if (is_add && !new_rule)
5284 hlist_for_each_entry_safe(rule, node2,
5285 &hdev->fd_rule_list, rule_node) {
5286 if (rule->location >= location)
5291 if (rule && rule->location == location) {
5292 hlist_del(&rule->rule_node);
5294 hdev->hclge_fd_rule_num--;
5297 if (!hdev->hclge_fd_rule_num)
5298 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5299 clear_bit(location, hdev->fd_bmap);
5303 } else if (!is_add) {
5304 dev_err(&hdev->pdev->dev,
5305 "delete fail, rule %d is inexistent\n",
5310 INIT_HLIST_NODE(&new_rule->rule_node);
5313 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5315 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5317 set_bit(location, hdev->fd_bmap);
5318 hdev->hclge_fd_rule_num++;
5319 hdev->fd_active_type = new_rule->rule_type;
5324 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5325 struct ethtool_rx_flow_spec *fs,
5326 struct hclge_fd_rule *rule)
5328 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5330 switch (flow_type) {
5334 rule->tuples.src_ip[IPV4_INDEX] =
5335 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5336 rule->tuples_mask.src_ip[IPV4_INDEX] =
5337 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5339 rule->tuples.dst_ip[IPV4_INDEX] =
5340 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5341 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5342 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5344 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5345 rule->tuples_mask.src_port =
5346 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5348 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5349 rule->tuples_mask.dst_port =
5350 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5352 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5353 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5355 rule->tuples.ether_proto = ETH_P_IP;
5356 rule->tuples_mask.ether_proto = 0xFFFF;
5360 rule->tuples.src_ip[IPV4_INDEX] =
5361 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5362 rule->tuples_mask.src_ip[IPV4_INDEX] =
5363 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5365 rule->tuples.dst_ip[IPV4_INDEX] =
5366 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5367 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5368 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5370 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5371 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5373 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5374 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5376 rule->tuples.ether_proto = ETH_P_IP;
5377 rule->tuples_mask.ether_proto = 0xFFFF;
5383 be32_to_cpu_array(rule->tuples.src_ip,
5384 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5385 be32_to_cpu_array(rule->tuples_mask.src_ip,
5386 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5388 be32_to_cpu_array(rule->tuples.dst_ip,
5389 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5390 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5391 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5393 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5394 rule->tuples_mask.src_port =
5395 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5397 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5398 rule->tuples_mask.dst_port =
5399 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5401 rule->tuples.ether_proto = ETH_P_IPV6;
5402 rule->tuples_mask.ether_proto = 0xFFFF;
5405 case IPV6_USER_FLOW:
5406 be32_to_cpu_array(rule->tuples.src_ip,
5407 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5408 be32_to_cpu_array(rule->tuples_mask.src_ip,
5409 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5411 be32_to_cpu_array(rule->tuples.dst_ip,
5412 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5413 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5414 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5416 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5417 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5419 rule->tuples.ether_proto = ETH_P_IPV6;
5420 rule->tuples_mask.ether_proto = 0xFFFF;
5424 ether_addr_copy(rule->tuples.src_mac,
5425 fs->h_u.ether_spec.h_source);
5426 ether_addr_copy(rule->tuples_mask.src_mac,
5427 fs->m_u.ether_spec.h_source);
5429 ether_addr_copy(rule->tuples.dst_mac,
5430 fs->h_u.ether_spec.h_dest);
5431 ether_addr_copy(rule->tuples_mask.dst_mac,
5432 fs->m_u.ether_spec.h_dest);
5434 rule->tuples.ether_proto =
5435 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5436 rule->tuples_mask.ether_proto =
5437 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5444 switch (flow_type) {
5447 rule->tuples.ip_proto = IPPROTO_SCTP;
5448 rule->tuples_mask.ip_proto = 0xFF;
5452 rule->tuples.ip_proto = IPPROTO_TCP;
5453 rule->tuples_mask.ip_proto = 0xFF;
5457 rule->tuples.ip_proto = IPPROTO_UDP;
5458 rule->tuples_mask.ip_proto = 0xFF;
5464 if ((fs->flow_type & FLOW_EXT)) {
5465 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5466 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5469 if (fs->flow_type & FLOW_MAC_EXT) {
5470 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5471 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5477 /* make sure being called after lock up with fd_rule_lock */
5478 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5479 struct hclge_fd_rule *rule)
5484 dev_err(&hdev->pdev->dev,
5485 "The flow director rule is NULL\n");
5489 /* it will never fail here, so needn't to check return value */
5490 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5492 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5496 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5503 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5507 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5508 struct ethtool_rxnfc *cmd)
5510 struct hclge_vport *vport = hclge_get_vport(handle);
5511 struct hclge_dev *hdev = vport->back;
5512 u16 dst_vport_id = 0, q_index = 0;
5513 struct ethtool_rx_flow_spec *fs;
5514 struct hclge_fd_rule *rule;
5519 if (!hnae3_dev_fd_supported(hdev))
5523 dev_warn(&hdev->pdev->dev,
5524 "Please enable flow director first\n");
5528 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5530 ret = hclge_fd_check_spec(hdev, fs, &unused);
5532 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5536 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5537 action = HCLGE_FD_ACTION_DROP_PACKET;
5539 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5540 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5543 if (vf > hdev->num_req_vfs) {
5544 dev_err(&hdev->pdev->dev,
5545 "Error: vf id (%d) > max vf num (%d)\n",
5546 vf, hdev->num_req_vfs);
5550 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5551 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5554 dev_err(&hdev->pdev->dev,
5555 "Error: queue id (%d) > max tqp num (%d)\n",
5560 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5564 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5568 ret = hclge_fd_get_tuple(hdev, fs, rule);
5574 rule->flow_type = fs->flow_type;
5576 rule->location = fs->location;
5577 rule->unused_tuple = unused;
5578 rule->vf_id = dst_vport_id;
5579 rule->queue_id = q_index;
5580 rule->action = action;
5581 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5583 /* to avoid rule conflict, when user configure rule by ethtool,
5584 * we need to clear all arfs rules
5586 hclge_clear_arfs_rules(handle);
5588 spin_lock_bh(&hdev->fd_rule_lock);
5589 ret = hclge_fd_config_rule(hdev, rule);
5591 spin_unlock_bh(&hdev->fd_rule_lock);
5596 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5597 struct ethtool_rxnfc *cmd)
5599 struct hclge_vport *vport = hclge_get_vport(handle);
5600 struct hclge_dev *hdev = vport->back;
5601 struct ethtool_rx_flow_spec *fs;
5604 if (!hnae3_dev_fd_supported(hdev))
5607 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5609 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5612 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5613 dev_err(&hdev->pdev->dev,
5614 "Delete fail, rule %d is inexistent\n", fs->location);
5618 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5623 spin_lock_bh(&hdev->fd_rule_lock);
5624 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5626 spin_unlock_bh(&hdev->fd_rule_lock);
5631 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5634 struct hclge_vport *vport = hclge_get_vport(handle);
5635 struct hclge_dev *hdev = vport->back;
5636 struct hclge_fd_rule *rule;
5637 struct hlist_node *node;
5640 if (!hnae3_dev_fd_supported(hdev))
5643 spin_lock_bh(&hdev->fd_rule_lock);
5644 for_each_set_bit(location, hdev->fd_bmap,
5645 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5646 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5650 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5652 hlist_del(&rule->rule_node);
5655 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5656 hdev->hclge_fd_rule_num = 0;
5657 bitmap_zero(hdev->fd_bmap,
5658 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5661 spin_unlock_bh(&hdev->fd_rule_lock);
5664 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5666 struct hclge_vport *vport = hclge_get_vport(handle);
5667 struct hclge_dev *hdev = vport->back;
5668 struct hclge_fd_rule *rule;
5669 struct hlist_node *node;
5672 /* Return ok here, because reset error handling will check this
5673 * return value. If error is returned here, the reset process will
5676 if (!hnae3_dev_fd_supported(hdev))
5679 /* if fd is disabled, should not restore it when reset */
5683 spin_lock_bh(&hdev->fd_rule_lock);
5684 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5685 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5687 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5690 dev_warn(&hdev->pdev->dev,
5691 "Restore rule %d failed, remove it\n",
5693 clear_bit(rule->location, hdev->fd_bmap);
5694 hlist_del(&rule->rule_node);
5696 hdev->hclge_fd_rule_num--;
5700 if (hdev->hclge_fd_rule_num)
5701 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5703 spin_unlock_bh(&hdev->fd_rule_lock);
5708 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5709 struct ethtool_rxnfc *cmd)
5711 struct hclge_vport *vport = hclge_get_vport(handle);
5712 struct hclge_dev *hdev = vport->back;
5714 if (!hnae3_dev_fd_supported(hdev))
5717 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5718 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5723 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5724 struct ethtool_rxnfc *cmd)
5726 struct hclge_vport *vport = hclge_get_vport(handle);
5727 struct hclge_fd_rule *rule = NULL;
5728 struct hclge_dev *hdev = vport->back;
5729 struct ethtool_rx_flow_spec *fs;
5730 struct hlist_node *node2;
5732 if (!hnae3_dev_fd_supported(hdev))
5735 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5737 spin_lock_bh(&hdev->fd_rule_lock);
5739 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5740 if (rule->location >= fs->location)
5744 if (!rule || fs->location != rule->location) {
5745 spin_unlock_bh(&hdev->fd_rule_lock);
5750 fs->flow_type = rule->flow_type;
5751 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5755 fs->h_u.tcp_ip4_spec.ip4src =
5756 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5757 fs->m_u.tcp_ip4_spec.ip4src =
5758 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5759 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5761 fs->h_u.tcp_ip4_spec.ip4dst =
5762 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5763 fs->m_u.tcp_ip4_spec.ip4dst =
5764 rule->unused_tuple & BIT(INNER_DST_IP) ?
5765 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5767 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5768 fs->m_u.tcp_ip4_spec.psrc =
5769 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5770 0 : cpu_to_be16(rule->tuples_mask.src_port);
5772 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5773 fs->m_u.tcp_ip4_spec.pdst =
5774 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5775 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5777 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5778 fs->m_u.tcp_ip4_spec.tos =
5779 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5780 0 : rule->tuples_mask.ip_tos;
5784 fs->h_u.usr_ip4_spec.ip4src =
5785 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5786 fs->m_u.tcp_ip4_spec.ip4src =
5787 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5788 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5790 fs->h_u.usr_ip4_spec.ip4dst =
5791 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5792 fs->m_u.usr_ip4_spec.ip4dst =
5793 rule->unused_tuple & BIT(INNER_DST_IP) ?
5794 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5796 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5797 fs->m_u.usr_ip4_spec.tos =
5798 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5799 0 : rule->tuples_mask.ip_tos;
5801 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5802 fs->m_u.usr_ip4_spec.proto =
5803 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5804 0 : rule->tuples_mask.ip_proto;
5806 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5812 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5813 rule->tuples.src_ip, IPV6_SIZE);
5814 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5815 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5816 sizeof(int) * IPV6_SIZE);
5818 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5819 rule->tuples_mask.src_ip, IPV6_SIZE);
5821 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5822 rule->tuples.dst_ip, IPV6_SIZE);
5823 if (rule->unused_tuple & BIT(INNER_DST_IP))
5824 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5825 sizeof(int) * IPV6_SIZE);
5827 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5828 rule->tuples_mask.dst_ip, IPV6_SIZE);
5830 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5831 fs->m_u.tcp_ip6_spec.psrc =
5832 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5833 0 : cpu_to_be16(rule->tuples_mask.src_port);
5835 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5836 fs->m_u.tcp_ip6_spec.pdst =
5837 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5838 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5841 case IPV6_USER_FLOW:
5842 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5843 rule->tuples.src_ip, IPV6_SIZE);
5844 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5845 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5846 sizeof(int) * IPV6_SIZE);
5848 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5849 rule->tuples_mask.src_ip, IPV6_SIZE);
5851 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5852 rule->tuples.dst_ip, IPV6_SIZE);
5853 if (rule->unused_tuple & BIT(INNER_DST_IP))
5854 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5855 sizeof(int) * IPV6_SIZE);
5857 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5858 rule->tuples_mask.dst_ip, IPV6_SIZE);
5860 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5861 fs->m_u.usr_ip6_spec.l4_proto =
5862 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5863 0 : rule->tuples_mask.ip_proto;
5867 ether_addr_copy(fs->h_u.ether_spec.h_source,
5868 rule->tuples.src_mac);
5869 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5870 eth_zero_addr(fs->m_u.ether_spec.h_source);
5872 ether_addr_copy(fs->m_u.ether_spec.h_source,
5873 rule->tuples_mask.src_mac);
5875 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5876 rule->tuples.dst_mac);
5877 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5878 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5880 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5881 rule->tuples_mask.dst_mac);
5883 fs->h_u.ether_spec.h_proto =
5884 cpu_to_be16(rule->tuples.ether_proto);
5885 fs->m_u.ether_spec.h_proto =
5886 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5887 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5891 spin_unlock_bh(&hdev->fd_rule_lock);
5895 if (fs->flow_type & FLOW_EXT) {
5896 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5897 fs->m_ext.vlan_tci =
5898 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5899 cpu_to_be16(VLAN_VID_MASK) :
5900 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5903 if (fs->flow_type & FLOW_MAC_EXT) {
5904 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5905 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5906 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5908 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5909 rule->tuples_mask.dst_mac);
5912 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5913 fs->ring_cookie = RX_CLS_FLOW_DISC;
5917 fs->ring_cookie = rule->queue_id;
5918 vf_id = rule->vf_id;
5919 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5920 fs->ring_cookie |= vf_id;
5923 spin_unlock_bh(&hdev->fd_rule_lock);
5928 static int hclge_get_all_rules(struct hnae3_handle *handle,
5929 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5931 struct hclge_vport *vport = hclge_get_vport(handle);
5932 struct hclge_dev *hdev = vport->back;
5933 struct hclge_fd_rule *rule;
5934 struct hlist_node *node2;
5937 if (!hnae3_dev_fd_supported(hdev))
5940 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5942 spin_lock_bh(&hdev->fd_rule_lock);
5943 hlist_for_each_entry_safe(rule, node2,
5944 &hdev->fd_rule_list, rule_node) {
5945 if (cnt == cmd->rule_cnt) {
5946 spin_unlock_bh(&hdev->fd_rule_lock);
5950 rule_locs[cnt] = rule->location;
5954 spin_unlock_bh(&hdev->fd_rule_lock);
5956 cmd->rule_cnt = cnt;
5961 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5962 struct hclge_fd_rule_tuples *tuples)
5964 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5965 tuples->ip_proto = fkeys->basic.ip_proto;
5966 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5968 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5969 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5970 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5972 memcpy(tuples->src_ip,
5973 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5974 sizeof(tuples->src_ip));
5975 memcpy(tuples->dst_ip,
5976 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5977 sizeof(tuples->dst_ip));
5981 /* traverse all rules, check whether an existed rule has the same tuples */
5982 static struct hclge_fd_rule *
5983 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5984 const struct hclge_fd_rule_tuples *tuples)
5986 struct hclge_fd_rule *rule = NULL;
5987 struct hlist_node *node;
5989 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5990 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5997 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5998 struct hclge_fd_rule *rule)
6000 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6001 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6002 BIT(INNER_SRC_PORT);
6005 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6006 if (tuples->ether_proto == ETH_P_IP) {
6007 if (tuples->ip_proto == IPPROTO_TCP)
6008 rule->flow_type = TCP_V4_FLOW;
6010 rule->flow_type = UDP_V4_FLOW;
6012 if (tuples->ip_proto == IPPROTO_TCP)
6013 rule->flow_type = TCP_V6_FLOW;
6015 rule->flow_type = UDP_V6_FLOW;
6017 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6018 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6021 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6022 u16 flow_id, struct flow_keys *fkeys)
6024 struct hclge_vport *vport = hclge_get_vport(handle);
6025 struct hclge_fd_rule_tuples new_tuples;
6026 struct hclge_dev *hdev = vport->back;
6027 struct hclge_fd_rule *rule;
6032 if (!hnae3_dev_fd_supported(hdev))
6035 memset(&new_tuples, 0, sizeof(new_tuples));
6036 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6038 spin_lock_bh(&hdev->fd_rule_lock);
6040 /* when there is already fd rule existed add by user,
6041 * arfs should not work
6043 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6044 spin_unlock_bh(&hdev->fd_rule_lock);
6049 /* check is there flow director filter existed for this flow,
6050 * if not, create a new filter for it;
6051 * if filter exist with different queue id, modify the filter;
6052 * if filter exist with same queue id, do nothing
6054 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6056 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6057 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6058 spin_unlock_bh(&hdev->fd_rule_lock);
6063 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6065 spin_unlock_bh(&hdev->fd_rule_lock);
6070 set_bit(bit_id, hdev->fd_bmap);
6071 rule->location = bit_id;
6072 rule->flow_id = flow_id;
6073 rule->queue_id = queue_id;
6074 hclge_fd_build_arfs_rule(&new_tuples, rule);
6075 ret = hclge_fd_config_rule(hdev, rule);
6077 spin_unlock_bh(&hdev->fd_rule_lock);
6082 return rule->location;
6085 spin_unlock_bh(&hdev->fd_rule_lock);
6087 if (rule->queue_id == queue_id)
6088 return rule->location;
6090 tmp_queue_id = rule->queue_id;
6091 rule->queue_id = queue_id;
6092 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6094 rule->queue_id = tmp_queue_id;
6098 return rule->location;
6101 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6103 #ifdef CONFIG_RFS_ACCEL
6104 struct hnae3_handle *handle = &hdev->vport[0].nic;
6105 struct hclge_fd_rule *rule;
6106 struct hlist_node *node;
6107 HLIST_HEAD(del_list);
6109 spin_lock_bh(&hdev->fd_rule_lock);
6110 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6111 spin_unlock_bh(&hdev->fd_rule_lock);
6114 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6115 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6116 rule->flow_id, rule->location)) {
6117 hlist_del_init(&rule->rule_node);
6118 hlist_add_head(&rule->rule_node, &del_list);
6119 hdev->hclge_fd_rule_num--;
6120 clear_bit(rule->location, hdev->fd_bmap);
6123 spin_unlock_bh(&hdev->fd_rule_lock);
6125 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6126 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6127 rule->location, NULL, false);
6133 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6135 #ifdef CONFIG_RFS_ACCEL
6136 struct hclge_vport *vport = hclge_get_vport(handle);
6137 struct hclge_dev *hdev = vport->back;
6139 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6140 hclge_del_all_fd_entries(handle, true);
6144 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6146 struct hclge_vport *vport = hclge_get_vport(handle);
6147 struct hclge_dev *hdev = vport->back;
6149 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6150 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6153 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6155 struct hclge_vport *vport = hclge_get_vport(handle);
6156 struct hclge_dev *hdev = vport->back;
6158 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6161 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6163 struct hclge_vport *vport = hclge_get_vport(handle);
6164 struct hclge_dev *hdev = vport->back;
6166 return hdev->rst_stats.hw_reset_done_cnt;
6169 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6171 struct hclge_vport *vport = hclge_get_vport(handle);
6172 struct hclge_dev *hdev = vport->back;
6175 hdev->fd_en = enable;
6176 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
6178 hclge_del_all_fd_entries(handle, clear);
6180 hclge_restore_fd_entries(handle);
6183 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6185 struct hclge_desc desc;
6186 struct hclge_config_mac_mode_cmd *req =
6187 (struct hclge_config_mac_mode_cmd *)desc.data;
6191 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6194 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6195 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6196 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6197 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6198 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6199 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6200 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6201 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6202 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6203 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6206 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6208 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6210 dev_err(&hdev->pdev->dev,
6211 "mac enable fail, ret =%d.\n", ret);
6214 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6215 u8 switch_param, u8 param_mask)
6217 struct hclge_mac_vlan_switch_cmd *req;
6218 struct hclge_desc desc;
6222 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6223 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6224 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6226 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6227 req->func_id = cpu_to_le32(func_id);
6228 req->switch_param = switch_param;
6229 req->param_mask = param_mask;
6231 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6233 dev_err(&hdev->pdev->dev,
6234 "set mac vlan switch parameter fail, ret = %d\n", ret);
6238 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6241 #define HCLGE_PHY_LINK_STATUS_NUM 200
6243 struct phy_device *phydev = hdev->hw.mac.phydev;
6248 ret = phy_read_status(phydev);
6250 dev_err(&hdev->pdev->dev,
6251 "phy update link status fail, ret = %d\n", ret);
6255 if (phydev->link == link_ret)
6258 msleep(HCLGE_LINK_STATUS_MS);
6259 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6262 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6264 #define HCLGE_MAC_LINK_STATUS_NUM 100
6270 ret = hclge_get_mac_link_status(hdev);
6273 else if (ret == link_ret)
6276 msleep(HCLGE_LINK_STATUS_MS);
6277 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6281 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6284 #define HCLGE_LINK_STATUS_DOWN 0
6285 #define HCLGE_LINK_STATUS_UP 1
6289 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6292 hclge_phy_link_status_wait(hdev, link_ret);
6294 return hclge_mac_link_status_wait(hdev, link_ret);
6297 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6299 struct hclge_config_mac_mode_cmd *req;
6300 struct hclge_desc desc;
6304 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6305 /* 1 Read out the MAC mode config at first */
6306 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6307 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6309 dev_err(&hdev->pdev->dev,
6310 "mac loopback get fail, ret =%d.\n", ret);
6314 /* 2 Then setup the loopback flag */
6315 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6316 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6317 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6318 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6320 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6322 /* 3 Config mac work mode with loopback flag
6323 * and its original configure parameters
6325 hclge_cmd_reuse_desc(&desc, false);
6326 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6328 dev_err(&hdev->pdev->dev,
6329 "mac loopback set fail, ret =%d.\n", ret);
6333 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6334 enum hnae3_loop loop_mode)
6336 #define HCLGE_SERDES_RETRY_MS 10
6337 #define HCLGE_SERDES_RETRY_NUM 100
6339 struct hclge_serdes_lb_cmd *req;
6340 struct hclge_desc desc;
6344 req = (struct hclge_serdes_lb_cmd *)desc.data;
6345 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6347 switch (loop_mode) {
6348 case HNAE3_LOOP_SERIAL_SERDES:
6349 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6351 case HNAE3_LOOP_PARALLEL_SERDES:
6352 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6355 dev_err(&hdev->pdev->dev,
6356 "unsupported serdes loopback mode %d\n", loop_mode);
6361 req->enable = loop_mode_b;
6362 req->mask = loop_mode_b;
6364 req->mask = loop_mode_b;
6367 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6369 dev_err(&hdev->pdev->dev,
6370 "serdes loopback set fail, ret = %d\n", ret);
6375 msleep(HCLGE_SERDES_RETRY_MS);
6376 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6378 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6380 dev_err(&hdev->pdev->dev,
6381 "serdes loopback get, ret = %d\n", ret);
6384 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6385 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6387 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6388 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6390 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6391 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6395 hclge_cfg_mac_mode(hdev, en);
6397 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6399 dev_err(&hdev->pdev->dev,
6400 "serdes loopback config mac mode timeout\n");
6405 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6406 struct phy_device *phydev)
6410 if (!phydev->suspended) {
6411 ret = phy_suspend(phydev);
6416 ret = phy_resume(phydev);
6420 return phy_loopback(phydev, true);
6423 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6424 struct phy_device *phydev)
6428 ret = phy_loopback(phydev, false);
6432 return phy_suspend(phydev);
6435 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6437 struct phy_device *phydev = hdev->hw.mac.phydev;
6444 ret = hclge_enable_phy_loopback(hdev, phydev);
6446 ret = hclge_disable_phy_loopback(hdev, phydev);
6448 dev_err(&hdev->pdev->dev,
6449 "set phy loopback fail, ret = %d\n", ret);
6453 hclge_cfg_mac_mode(hdev, en);
6455 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6457 dev_err(&hdev->pdev->dev,
6458 "phy loopback config mac mode timeout\n");
6463 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6464 int stream_id, bool enable)
6466 struct hclge_desc desc;
6467 struct hclge_cfg_com_tqp_queue_cmd *req =
6468 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6471 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6472 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6473 req->stream_id = cpu_to_le16(stream_id);
6475 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6477 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6479 dev_err(&hdev->pdev->dev,
6480 "Tqp enable fail, status =%d.\n", ret);
6484 static int hclge_set_loopback(struct hnae3_handle *handle,
6485 enum hnae3_loop loop_mode, bool en)
6487 struct hclge_vport *vport = hclge_get_vport(handle);
6488 struct hnae3_knic_private_info *kinfo;
6489 struct hclge_dev *hdev = vport->back;
6492 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6493 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6494 * the same, the packets are looped back in the SSU. If SSU loopback
6495 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6497 if (hdev->pdev->revision >= 0x21) {
6498 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6500 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6501 HCLGE_SWITCH_ALW_LPBK_MASK);
6506 switch (loop_mode) {
6507 case HNAE3_LOOP_APP:
6508 ret = hclge_set_app_loopback(hdev, en);
6510 case HNAE3_LOOP_SERIAL_SERDES:
6511 case HNAE3_LOOP_PARALLEL_SERDES:
6512 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6514 case HNAE3_LOOP_PHY:
6515 ret = hclge_set_phy_loopback(hdev, en);
6519 dev_err(&hdev->pdev->dev,
6520 "loop_mode %d is not supported\n", loop_mode);
6527 kinfo = &vport->nic.kinfo;
6528 for (i = 0; i < kinfo->num_tqps; i++) {
6529 ret = hclge_tqp_enable(hdev, i, 0, en);
6537 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6539 struct hclge_vport *vport = hclge_get_vport(handle);
6540 struct hnae3_knic_private_info *kinfo;
6541 struct hnae3_queue *queue;
6542 struct hclge_tqp *tqp;
6545 kinfo = &vport->nic.kinfo;
6546 for (i = 0; i < kinfo->num_tqps; i++) {
6547 queue = handle->kinfo.tqp[i];
6548 tqp = container_of(queue, struct hclge_tqp, q);
6549 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6553 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6555 struct hclge_vport *vport = hclge_get_vport(handle);
6556 struct hclge_dev *hdev = vport->back;
6559 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6561 /* Set the DOWN flag here to disable the service to be
6564 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6565 cancel_delayed_work_sync(&hdev->service_task);
6566 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6570 static int hclge_ae_start(struct hnae3_handle *handle)
6572 struct hclge_vport *vport = hclge_get_vport(handle);
6573 struct hclge_dev *hdev = vport->back;
6576 hclge_cfg_mac_mode(hdev, true);
6577 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6578 hdev->hw.mac.link = 0;
6580 /* reset tqp stats */
6581 hclge_reset_tqp_stats(handle);
6583 hclge_mac_start_phy(hdev);
6588 static void hclge_ae_stop(struct hnae3_handle *handle)
6590 struct hclge_vport *vport = hclge_get_vport(handle);
6591 struct hclge_dev *hdev = vport->back;
6594 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6596 hclge_clear_arfs_rules(handle);
6598 /* If it is not PF reset, the firmware will disable the MAC,
6599 * so it only need to stop phy here.
6601 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6602 hdev->reset_type != HNAE3_FUNC_RESET) {
6603 hclge_mac_stop_phy(hdev);
6604 hclge_update_link_status(hdev);
6608 for (i = 0; i < handle->kinfo.num_tqps; i++)
6609 hclge_reset_tqp(handle, i);
6611 hclge_config_mac_tnl_int(hdev, false);
6614 hclge_cfg_mac_mode(hdev, false);
6616 hclge_mac_stop_phy(hdev);
6618 /* reset tqp stats */
6619 hclge_reset_tqp_stats(handle);
6620 hclge_update_link_status(hdev);
6623 int hclge_vport_start(struct hclge_vport *vport)
6625 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6626 vport->last_active_jiffies = jiffies;
6630 void hclge_vport_stop(struct hclge_vport *vport)
6632 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6635 static int hclge_client_start(struct hnae3_handle *handle)
6637 struct hclge_vport *vport = hclge_get_vport(handle);
6639 return hclge_vport_start(vport);
6642 static void hclge_client_stop(struct hnae3_handle *handle)
6644 struct hclge_vport *vport = hclge_get_vport(handle);
6646 hclge_vport_stop(vport);
6649 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6650 u16 cmdq_resp, u8 resp_code,
6651 enum hclge_mac_vlan_tbl_opcode op)
6653 struct hclge_dev *hdev = vport->back;
6656 dev_err(&hdev->pdev->dev,
6657 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6662 if (op == HCLGE_MAC_VLAN_ADD) {
6663 if ((!resp_code) || (resp_code == 1)) {
6665 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6666 dev_err(&hdev->pdev->dev,
6667 "add mac addr failed for uc_overflow.\n");
6669 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6670 dev_err(&hdev->pdev->dev,
6671 "add mac addr failed for mc_overflow.\n");
6675 dev_err(&hdev->pdev->dev,
6676 "add mac addr failed for undefined, code=%u.\n",
6679 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6682 } else if (resp_code == 1) {
6683 dev_dbg(&hdev->pdev->dev,
6684 "remove mac addr failed for miss.\n");
6688 dev_err(&hdev->pdev->dev,
6689 "remove mac addr failed for undefined, code=%u.\n",
6692 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6695 } else if (resp_code == 1) {
6696 dev_dbg(&hdev->pdev->dev,
6697 "lookup mac addr failed for miss.\n");
6701 dev_err(&hdev->pdev->dev,
6702 "lookup mac addr failed for undefined, code=%u.\n",
6707 dev_err(&hdev->pdev->dev,
6708 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6713 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6715 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6717 unsigned int word_num;
6718 unsigned int bit_num;
6720 if (vfid > 255 || vfid < 0)
6723 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6724 word_num = vfid / 32;
6725 bit_num = vfid % 32;
6727 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6729 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6731 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6732 bit_num = vfid % 32;
6734 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6736 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6742 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6744 #define HCLGE_DESC_NUMBER 3
6745 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6748 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6749 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6750 if (desc[i].data[j])
6756 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6757 const u8 *addr, bool is_mc)
6759 const unsigned char *mac_addr = addr;
6760 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6761 (mac_addr[0]) | (mac_addr[1] << 8);
6762 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6764 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6766 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6767 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6770 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6771 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6774 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6775 struct hclge_mac_vlan_tbl_entry_cmd *req)
6777 struct hclge_dev *hdev = vport->back;
6778 struct hclge_desc desc;
6783 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6785 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6787 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6789 dev_err(&hdev->pdev->dev,
6790 "del mac addr failed for cmd_send, ret =%d.\n",
6794 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6795 retval = le16_to_cpu(desc.retval);
6797 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6798 HCLGE_MAC_VLAN_REMOVE);
6801 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6802 struct hclge_mac_vlan_tbl_entry_cmd *req,
6803 struct hclge_desc *desc,
6806 struct hclge_dev *hdev = vport->back;
6811 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6813 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6814 memcpy(desc[0].data,
6816 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6817 hclge_cmd_setup_basic_desc(&desc[1],
6818 HCLGE_OPC_MAC_VLAN_ADD,
6820 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6821 hclge_cmd_setup_basic_desc(&desc[2],
6822 HCLGE_OPC_MAC_VLAN_ADD,
6824 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6826 memcpy(desc[0].data,
6828 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6829 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6832 dev_err(&hdev->pdev->dev,
6833 "lookup mac addr failed for cmd_send, ret =%d.\n",
6837 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6838 retval = le16_to_cpu(desc[0].retval);
6840 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6841 HCLGE_MAC_VLAN_LKUP);
6844 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6845 struct hclge_mac_vlan_tbl_entry_cmd *req,
6846 struct hclge_desc *mc_desc)
6848 struct hclge_dev *hdev = vport->back;
6855 struct hclge_desc desc;
6857 hclge_cmd_setup_basic_desc(&desc,
6858 HCLGE_OPC_MAC_VLAN_ADD,
6860 memcpy(desc.data, req,
6861 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6862 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6863 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6864 retval = le16_to_cpu(desc.retval);
6866 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6868 HCLGE_MAC_VLAN_ADD);
6870 hclge_cmd_reuse_desc(&mc_desc[0], false);
6871 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6872 hclge_cmd_reuse_desc(&mc_desc[1], false);
6873 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6874 hclge_cmd_reuse_desc(&mc_desc[2], false);
6875 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6876 memcpy(mc_desc[0].data, req,
6877 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6878 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6879 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6880 retval = le16_to_cpu(mc_desc[0].retval);
6882 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6884 HCLGE_MAC_VLAN_ADD);
6888 dev_err(&hdev->pdev->dev,
6889 "add mac addr failed for cmd_send, ret =%d.\n",
6897 static int hclge_init_umv_space(struct hclge_dev *hdev)
6899 u16 allocated_size = 0;
6902 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6907 if (allocated_size < hdev->wanted_umv_size)
6908 dev_warn(&hdev->pdev->dev,
6909 "Alloc umv space failed, want %d, get %d\n",
6910 hdev->wanted_umv_size, allocated_size);
6912 mutex_init(&hdev->umv_mutex);
6913 hdev->max_umv_size = allocated_size;
6914 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6915 * preserve some unicast mac vlan table entries shared by pf
6918 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6919 hdev->share_umv_size = hdev->priv_umv_size +
6920 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6925 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6929 if (hdev->max_umv_size > 0) {
6930 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6934 hdev->max_umv_size = 0;
6936 mutex_destroy(&hdev->umv_mutex);
6941 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6942 u16 *allocated_size, bool is_alloc)
6944 struct hclge_umv_spc_alc_cmd *req;
6945 struct hclge_desc desc;
6948 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6949 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6951 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6953 req->space_size = cpu_to_le32(space_size);
6955 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6957 dev_err(&hdev->pdev->dev,
6958 "%s umv space failed for cmd_send, ret =%d\n",
6959 is_alloc ? "allocate" : "free", ret);
6963 if (is_alloc && allocated_size)
6964 *allocated_size = le32_to_cpu(desc.data[1]);
6969 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6971 struct hclge_vport *vport;
6974 for (i = 0; i < hdev->num_alloc_vport; i++) {
6975 vport = &hdev->vport[i];
6976 vport->used_umv_num = 0;
6979 mutex_lock(&hdev->umv_mutex);
6980 hdev->share_umv_size = hdev->priv_umv_size +
6981 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6982 mutex_unlock(&hdev->umv_mutex);
6985 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6987 struct hclge_dev *hdev = vport->back;
6990 mutex_lock(&hdev->umv_mutex);
6991 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6992 hdev->share_umv_size == 0);
6993 mutex_unlock(&hdev->umv_mutex);
6998 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7000 struct hclge_dev *hdev = vport->back;
7002 mutex_lock(&hdev->umv_mutex);
7004 if (vport->used_umv_num > hdev->priv_umv_size)
7005 hdev->share_umv_size++;
7007 if (vport->used_umv_num > 0)
7008 vport->used_umv_num--;
7010 if (vport->used_umv_num >= hdev->priv_umv_size &&
7011 hdev->share_umv_size > 0)
7012 hdev->share_umv_size--;
7013 vport->used_umv_num++;
7015 mutex_unlock(&hdev->umv_mutex);
7018 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7019 const unsigned char *addr)
7021 struct hclge_vport *vport = hclge_get_vport(handle);
7023 return hclge_add_uc_addr_common(vport, addr);
7026 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7027 const unsigned char *addr)
7029 struct hclge_dev *hdev = vport->back;
7030 struct hclge_mac_vlan_tbl_entry_cmd req;
7031 struct hclge_desc desc;
7032 u16 egress_port = 0;
7035 /* mac addr check */
7036 if (is_zero_ether_addr(addr) ||
7037 is_broadcast_ether_addr(addr) ||
7038 is_multicast_ether_addr(addr)) {
7039 dev_err(&hdev->pdev->dev,
7040 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7041 addr, is_zero_ether_addr(addr),
7042 is_broadcast_ether_addr(addr),
7043 is_multicast_ether_addr(addr));
7047 memset(&req, 0, sizeof(req));
7049 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7050 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7052 req.egress_port = cpu_to_le16(egress_port);
7054 hclge_prepare_mac_addr(&req, addr, false);
7056 /* Lookup the mac address in the mac_vlan table, and add
7057 * it if the entry is inexistent. Repeated unicast entry
7058 * is not allowed in the mac vlan table.
7060 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7061 if (ret == -ENOENT) {
7062 if (!hclge_is_umv_space_full(vport)) {
7063 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7065 hclge_update_umv_space(vport, false);
7069 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7070 hdev->priv_umv_size);
7075 /* check if we just hit the duplicate */
7077 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
7078 vport->vport_id, addr);
7082 dev_err(&hdev->pdev->dev,
7083 "PF failed to add unicast entry(%pM) in the MAC table\n",
7089 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7090 const unsigned char *addr)
7092 struct hclge_vport *vport = hclge_get_vport(handle);
7094 return hclge_rm_uc_addr_common(vport, addr);
7097 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7098 const unsigned char *addr)
7100 struct hclge_dev *hdev = vport->back;
7101 struct hclge_mac_vlan_tbl_entry_cmd req;
7104 /* mac addr check */
7105 if (is_zero_ether_addr(addr) ||
7106 is_broadcast_ether_addr(addr) ||
7107 is_multicast_ether_addr(addr)) {
7108 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7113 memset(&req, 0, sizeof(req));
7114 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7115 hclge_prepare_mac_addr(&req, addr, false);
7116 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7118 hclge_update_umv_space(vport, true);
7123 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7124 const unsigned char *addr)
7126 struct hclge_vport *vport = hclge_get_vport(handle);
7128 return hclge_add_mc_addr_common(vport, addr);
7131 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7132 const unsigned char *addr)
7134 struct hclge_dev *hdev = vport->back;
7135 struct hclge_mac_vlan_tbl_entry_cmd req;
7136 struct hclge_desc desc[3];
7139 /* mac addr check */
7140 if (!is_multicast_ether_addr(addr)) {
7141 dev_err(&hdev->pdev->dev,
7142 "Add mc mac err! invalid mac:%pM.\n",
7146 memset(&req, 0, sizeof(req));
7147 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7148 hclge_prepare_mac_addr(&req, addr, true);
7149 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7151 /* This mac addr do not exist, add new entry for it */
7152 memset(desc[0].data, 0, sizeof(desc[0].data));
7153 memset(desc[1].data, 0, sizeof(desc[0].data));
7154 memset(desc[2].data, 0, sizeof(desc[0].data));
7156 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7159 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7161 if (status == -ENOSPC)
7162 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7167 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7168 const unsigned char *addr)
7170 struct hclge_vport *vport = hclge_get_vport(handle);
7172 return hclge_rm_mc_addr_common(vport, addr);
7175 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7176 const unsigned char *addr)
7178 struct hclge_dev *hdev = vport->back;
7179 struct hclge_mac_vlan_tbl_entry_cmd req;
7180 enum hclge_cmd_status status;
7181 struct hclge_desc desc[3];
7183 /* mac addr check */
7184 if (!is_multicast_ether_addr(addr)) {
7185 dev_dbg(&hdev->pdev->dev,
7186 "Remove mc mac err! invalid mac:%pM.\n",
7191 memset(&req, 0, sizeof(req));
7192 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7193 hclge_prepare_mac_addr(&req, addr, true);
7194 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7196 /* This mac addr exist, remove this handle's VFID for it */
7197 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7201 if (hclge_is_all_function_id_zero(desc))
7202 /* All the vfid is zero, so need to delete this entry */
7203 status = hclge_remove_mac_vlan_tbl(vport, &req);
7205 /* Not all the vfid is zero, update the vfid */
7206 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7209 /* Maybe this mac address is in mta table, but it cannot be
7210 * deleted here because an entry of mta represents an address
7211 * range rather than a specific address. the delete action to
7212 * all entries will take effect in update_mta_status called by
7213 * hns3_nic_set_rx_mode.
7221 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7222 enum HCLGE_MAC_ADDR_TYPE mac_type)
7224 struct hclge_vport_mac_addr_cfg *mac_cfg;
7225 struct list_head *list;
7227 if (!vport->vport_id)
7230 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7234 mac_cfg->hd_tbl_status = true;
7235 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7237 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7238 &vport->uc_mac_list : &vport->mc_mac_list;
7240 list_add_tail(&mac_cfg->node, list);
7243 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7245 enum HCLGE_MAC_ADDR_TYPE mac_type)
7247 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7248 struct list_head *list;
7249 bool uc_flag, mc_flag;
7251 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7252 &vport->uc_mac_list : &vport->mc_mac_list;
7254 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7255 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7257 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7258 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7259 if (uc_flag && mac_cfg->hd_tbl_status)
7260 hclge_rm_uc_addr_common(vport, mac_addr);
7262 if (mc_flag && mac_cfg->hd_tbl_status)
7263 hclge_rm_mc_addr_common(vport, mac_addr);
7265 list_del(&mac_cfg->node);
7272 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7273 enum HCLGE_MAC_ADDR_TYPE mac_type)
7275 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7276 struct list_head *list;
7278 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7279 &vport->uc_mac_list : &vport->mc_mac_list;
7281 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7282 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7283 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7285 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7286 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7288 mac_cfg->hd_tbl_status = false;
7290 list_del(&mac_cfg->node);
7296 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7298 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7299 struct hclge_vport *vport;
7302 mutex_lock(&hdev->vport_cfg_mutex);
7303 for (i = 0; i < hdev->num_alloc_vport; i++) {
7304 vport = &hdev->vport[i];
7305 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7306 list_del(&mac->node);
7310 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7311 list_del(&mac->node);
7315 mutex_unlock(&hdev->vport_cfg_mutex);
7318 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7319 u16 cmdq_resp, u8 resp_code)
7321 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7322 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7323 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7324 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7329 dev_err(&hdev->pdev->dev,
7330 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7335 switch (resp_code) {
7336 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7337 case HCLGE_ETHERTYPE_ALREADY_ADD:
7340 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7341 dev_err(&hdev->pdev->dev,
7342 "add mac ethertype failed for manager table overflow.\n");
7343 return_status = -EIO;
7345 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7346 dev_err(&hdev->pdev->dev,
7347 "add mac ethertype failed for key conflict.\n");
7348 return_status = -EIO;
7351 dev_err(&hdev->pdev->dev,
7352 "add mac ethertype failed for undefined, code=%d.\n",
7354 return_status = -EIO;
7357 return return_status;
7360 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7361 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7363 struct hclge_desc desc;
7368 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7369 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7371 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7373 dev_err(&hdev->pdev->dev,
7374 "add mac ethertype failed for cmd_send, ret =%d.\n",
7379 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7380 retval = le16_to_cpu(desc.retval);
7382 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7385 static int init_mgr_tbl(struct hclge_dev *hdev)
7390 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7391 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7393 dev_err(&hdev->pdev->dev,
7394 "add mac ethertype failed, ret =%d.\n",
7403 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7405 struct hclge_vport *vport = hclge_get_vport(handle);
7406 struct hclge_dev *hdev = vport->back;
7408 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7411 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7414 const unsigned char *new_addr = (const unsigned char *)p;
7415 struct hclge_vport *vport = hclge_get_vport(handle);
7416 struct hclge_dev *hdev = vport->back;
7419 /* mac addr check */
7420 if (is_zero_ether_addr(new_addr) ||
7421 is_broadcast_ether_addr(new_addr) ||
7422 is_multicast_ether_addr(new_addr)) {
7423 dev_err(&hdev->pdev->dev,
7424 "Change uc mac err! invalid mac:%pM.\n",
7429 if ((!is_first || is_kdump_kernel()) &&
7430 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7431 dev_warn(&hdev->pdev->dev,
7432 "remove old uc mac address fail.\n");
7434 ret = hclge_add_uc_addr(handle, new_addr);
7436 dev_err(&hdev->pdev->dev,
7437 "add uc mac address fail, ret =%d.\n",
7441 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7442 dev_err(&hdev->pdev->dev,
7443 "restore uc mac address fail.\n");
7448 ret = hclge_pause_addr_cfg(hdev, new_addr);
7450 dev_err(&hdev->pdev->dev,
7451 "configure mac pause address fail, ret =%d.\n",
7456 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7461 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7464 struct hclge_vport *vport = hclge_get_vport(handle);
7465 struct hclge_dev *hdev = vport->back;
7467 if (!hdev->hw.mac.phydev)
7470 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7473 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7474 u8 fe_type, bool filter_en, u8 vf_id)
7476 struct hclge_vlan_filter_ctrl_cmd *req;
7477 struct hclge_desc desc;
7480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7482 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7483 req->vlan_type = vlan_type;
7484 req->vlan_fe = filter_en ? fe_type : 0;
7487 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7489 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7495 #define HCLGE_FILTER_TYPE_VF 0
7496 #define HCLGE_FILTER_TYPE_PORT 1
7497 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7498 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7499 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7500 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7501 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7502 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7503 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7504 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7505 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7507 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7509 struct hclge_vport *vport = hclge_get_vport(handle);
7510 struct hclge_dev *hdev = vport->back;
7512 if (hdev->pdev->revision >= 0x21) {
7513 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7514 HCLGE_FILTER_FE_EGRESS, enable, 0);
7515 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7516 HCLGE_FILTER_FE_INGRESS, enable, 0);
7518 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7519 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7523 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7525 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7528 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7529 bool is_kill, u16 vlan,
7532 #define HCLGE_MAX_VF_BYTES 16
7533 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7534 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7535 struct hclge_desc desc[2];
7540 /* if vf vlan table is full, firmware will close vf vlan filter, it
7541 * is unable and unnecessary to add new vlan id to vf vlan filter
7543 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7546 hclge_cmd_setup_basic_desc(&desc[0],
7547 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7548 hclge_cmd_setup_basic_desc(&desc[1],
7549 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7551 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7553 vf_byte_off = vfid / 8;
7554 vf_byte_val = 1 << (vfid % 8);
7556 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7557 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7559 req0->vlan_id = cpu_to_le16(vlan);
7560 req0->vlan_cfg = is_kill;
7562 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7563 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7565 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7567 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7569 dev_err(&hdev->pdev->dev,
7570 "Send vf vlan command fail, ret =%d.\n",
7576 #define HCLGE_VF_VLAN_NO_ENTRY 2
7577 if (!req0->resp_code || req0->resp_code == 1)
7580 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7581 set_bit(vfid, hdev->vf_vlan_full);
7582 dev_warn(&hdev->pdev->dev,
7583 "vf vlan table is full, vf vlan filter is disabled\n");
7587 dev_err(&hdev->pdev->dev,
7588 "Add vf vlan filter fail, ret =%d.\n",
7591 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7592 if (!req0->resp_code)
7595 /* vf vlan filter is disabled when vf vlan table is full,
7596 * then new vlan id will not be added into vf vlan table.
7597 * Just return 0 without warning, avoid massive verbose
7598 * print logs when unload.
7600 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7603 dev_err(&hdev->pdev->dev,
7604 "Kill vf vlan filter fail, ret =%d.\n",
7611 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7612 u16 vlan_id, bool is_kill)
7614 struct hclge_vlan_filter_pf_cfg_cmd *req;
7615 struct hclge_desc desc;
7616 u8 vlan_offset_byte_val;
7617 u8 vlan_offset_byte;
7621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7623 vlan_offset_160 = vlan_id / 160;
7624 vlan_offset_byte = (vlan_id % 160) / 8;
7625 vlan_offset_byte_val = 1 << (vlan_id % 8);
7627 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7628 req->vlan_offset = vlan_offset_160;
7629 req->vlan_cfg = is_kill;
7630 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7632 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7634 dev_err(&hdev->pdev->dev,
7635 "port vlan command, send fail, ret =%d.\n", ret);
7639 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7640 u16 vport_id, u16 vlan_id,
7643 u16 vport_idx, vport_num = 0;
7646 if (is_kill && !vlan_id)
7649 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7652 dev_err(&hdev->pdev->dev,
7653 "Set %d vport vlan filter config fail, ret =%d.\n",
7658 /* vlan 0 may be added twice when 8021q module is enabled */
7659 if (!is_kill && !vlan_id &&
7660 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7663 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7664 dev_err(&hdev->pdev->dev,
7665 "Add port vlan failed, vport %d is already in vlan %d\n",
7671 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7672 dev_err(&hdev->pdev->dev,
7673 "Delete port vlan failed, vport %d is not in vlan %d\n",
7678 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7681 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7682 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7688 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7690 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7691 struct hclge_vport_vtag_tx_cfg_cmd *req;
7692 struct hclge_dev *hdev = vport->back;
7693 struct hclge_desc desc;
7697 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7699 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7700 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7701 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7702 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7703 vcfg->accept_tag1 ? 1 : 0);
7704 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7705 vcfg->accept_untag1 ? 1 : 0);
7706 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7707 vcfg->accept_tag2 ? 1 : 0);
7708 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7709 vcfg->accept_untag2 ? 1 : 0);
7710 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7711 vcfg->insert_tag1_en ? 1 : 0);
7712 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7713 vcfg->insert_tag2_en ? 1 : 0);
7714 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7716 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7717 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7718 HCLGE_VF_NUM_PER_BYTE;
7719 req->vf_bitmap[bmap_index] =
7720 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7722 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7724 dev_err(&hdev->pdev->dev,
7725 "Send port txvlan cfg command fail, ret =%d\n",
7731 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7733 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7734 struct hclge_vport_vtag_rx_cfg_cmd *req;
7735 struct hclge_dev *hdev = vport->back;
7736 struct hclge_desc desc;
7740 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7742 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7743 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7744 vcfg->strip_tag1_en ? 1 : 0);
7745 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7746 vcfg->strip_tag2_en ? 1 : 0);
7747 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7748 vcfg->vlan1_vlan_prionly ? 1 : 0);
7749 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7750 vcfg->vlan2_vlan_prionly ? 1 : 0);
7752 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7753 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7754 HCLGE_VF_NUM_PER_BYTE;
7755 req->vf_bitmap[bmap_index] =
7756 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7758 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7760 dev_err(&hdev->pdev->dev,
7761 "Send port rxvlan cfg command fail, ret =%d\n",
7767 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7768 u16 port_base_vlan_state,
7773 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7774 vport->txvlan_cfg.accept_tag1 = true;
7775 vport->txvlan_cfg.insert_tag1_en = false;
7776 vport->txvlan_cfg.default_tag1 = 0;
7778 vport->txvlan_cfg.accept_tag1 = false;
7779 vport->txvlan_cfg.insert_tag1_en = true;
7780 vport->txvlan_cfg.default_tag1 = vlan_tag;
7783 vport->txvlan_cfg.accept_untag1 = true;
7785 /* accept_tag2 and accept_untag2 are not supported on
7786 * pdev revision(0x20), new revision support them,
7787 * this two fields can not be configured by user.
7789 vport->txvlan_cfg.accept_tag2 = true;
7790 vport->txvlan_cfg.accept_untag2 = true;
7791 vport->txvlan_cfg.insert_tag2_en = false;
7792 vport->txvlan_cfg.default_tag2 = 0;
7794 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7795 vport->rxvlan_cfg.strip_tag1_en = false;
7796 vport->rxvlan_cfg.strip_tag2_en =
7797 vport->rxvlan_cfg.rx_vlan_offload_en;
7799 vport->rxvlan_cfg.strip_tag1_en =
7800 vport->rxvlan_cfg.rx_vlan_offload_en;
7801 vport->rxvlan_cfg.strip_tag2_en = true;
7803 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7804 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7806 ret = hclge_set_vlan_tx_offload_cfg(vport);
7810 return hclge_set_vlan_rx_offload_cfg(vport);
7813 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7815 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7816 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7817 struct hclge_desc desc;
7820 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7821 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7822 rx_req->ot_fst_vlan_type =
7823 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7824 rx_req->ot_sec_vlan_type =
7825 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7826 rx_req->in_fst_vlan_type =
7827 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7828 rx_req->in_sec_vlan_type =
7829 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7831 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7833 dev_err(&hdev->pdev->dev,
7834 "Send rxvlan protocol type command fail, ret =%d\n",
7839 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7841 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7842 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7843 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7845 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7847 dev_err(&hdev->pdev->dev,
7848 "Send txvlan protocol type command fail, ret =%d\n",
7854 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7856 #define HCLGE_DEF_VLAN_TYPE 0x8100
7858 struct hnae3_handle *handle = &hdev->vport[0].nic;
7859 struct hclge_vport *vport;
7863 if (hdev->pdev->revision >= 0x21) {
7864 /* for revision 0x21, vf vlan filter is per function */
7865 for (i = 0; i < hdev->num_alloc_vport; i++) {
7866 vport = &hdev->vport[i];
7867 ret = hclge_set_vlan_filter_ctrl(hdev,
7868 HCLGE_FILTER_TYPE_VF,
7869 HCLGE_FILTER_FE_EGRESS,
7876 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7877 HCLGE_FILTER_FE_INGRESS, true,
7882 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7883 HCLGE_FILTER_FE_EGRESS_V1_B,
7889 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7891 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7892 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7893 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7894 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7895 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7896 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7898 ret = hclge_set_vlan_protocol_type(hdev);
7902 for (i = 0; i < hdev->num_alloc_vport; i++) {
7905 vport = &hdev->vport[i];
7906 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7908 ret = hclge_vlan_offload_cfg(vport,
7909 vport->port_base_vlan_cfg.state,
7915 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7918 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7921 struct hclge_vport_vlan_cfg *vlan;
7923 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7927 vlan->hd_tbl_status = writen_to_tbl;
7928 vlan->vlan_id = vlan_id;
7930 list_add_tail(&vlan->node, &vport->vlan_list);
7933 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7935 struct hclge_vport_vlan_cfg *vlan, *tmp;
7936 struct hclge_dev *hdev = vport->back;
7939 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7940 if (!vlan->hd_tbl_status) {
7941 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7943 vlan->vlan_id, false);
7945 dev_err(&hdev->pdev->dev,
7946 "restore vport vlan list failed, ret=%d\n",
7951 vlan->hd_tbl_status = true;
7957 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7960 struct hclge_vport_vlan_cfg *vlan, *tmp;
7961 struct hclge_dev *hdev = vport->back;
7963 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7964 if (vlan->vlan_id == vlan_id) {
7965 if (is_write_tbl && vlan->hd_tbl_status)
7966 hclge_set_vlan_filter_hw(hdev,
7972 list_del(&vlan->node);
7979 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7981 struct hclge_vport_vlan_cfg *vlan, *tmp;
7982 struct hclge_dev *hdev = vport->back;
7984 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7985 if (vlan->hd_tbl_status)
7986 hclge_set_vlan_filter_hw(hdev,
7992 vlan->hd_tbl_status = false;
7994 list_del(&vlan->node);
8000 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8002 struct hclge_vport_vlan_cfg *vlan, *tmp;
8003 struct hclge_vport *vport;
8006 mutex_lock(&hdev->vport_cfg_mutex);
8007 for (i = 0; i < hdev->num_alloc_vport; i++) {
8008 vport = &hdev->vport[i];
8009 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8010 list_del(&vlan->node);
8014 mutex_unlock(&hdev->vport_cfg_mutex);
8017 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8019 struct hclge_vport *vport = hclge_get_vport(handle);
8020 struct hclge_vport_vlan_cfg *vlan, *tmp;
8021 struct hclge_dev *hdev = vport->back;
8026 mutex_lock(&hdev->vport_cfg_mutex);
8027 for (i = 0; i < hdev->num_alloc_vport; i++) {
8028 vport = &hdev->vport[i];
8029 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8030 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8031 state = vport->port_base_vlan_cfg.state;
8033 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8034 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8035 vport->vport_id, vlan_id,
8040 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8041 if (vlan->hd_tbl_status)
8042 hclge_set_vlan_filter_hw(hdev,
8050 mutex_unlock(&hdev->vport_cfg_mutex);
8053 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8055 struct hclge_vport *vport = hclge_get_vport(handle);
8057 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8058 vport->rxvlan_cfg.strip_tag1_en = false;
8059 vport->rxvlan_cfg.strip_tag2_en = enable;
8061 vport->rxvlan_cfg.strip_tag1_en = enable;
8062 vport->rxvlan_cfg.strip_tag2_en = true;
8064 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8065 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8066 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8068 return hclge_set_vlan_rx_offload_cfg(vport);
8071 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8072 u16 port_base_vlan_state,
8073 struct hclge_vlan_info *new_info,
8074 struct hclge_vlan_info *old_info)
8076 struct hclge_dev *hdev = vport->back;
8079 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8080 hclge_rm_vport_all_vlan_table(vport, false);
8081 return hclge_set_vlan_filter_hw(hdev,
8082 htons(new_info->vlan_proto),
8088 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8089 vport->vport_id, old_info->vlan_tag,
8094 return hclge_add_vport_all_vlan_table(vport);
8097 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8098 struct hclge_vlan_info *vlan_info)
8100 struct hnae3_handle *nic = &vport->nic;
8101 struct hclge_vlan_info *old_vlan_info;
8102 struct hclge_dev *hdev = vport->back;
8105 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8107 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8111 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8112 /* add new VLAN tag */
8113 ret = hclge_set_vlan_filter_hw(hdev,
8114 htons(vlan_info->vlan_proto),
8116 vlan_info->vlan_tag,
8121 /* remove old VLAN tag */
8122 ret = hclge_set_vlan_filter_hw(hdev,
8123 htons(old_vlan_info->vlan_proto),
8125 old_vlan_info->vlan_tag,
8133 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8138 /* update state only when disable/enable port based VLAN */
8139 vport->port_base_vlan_cfg.state = state;
8140 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8141 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8143 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8146 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8147 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8148 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8153 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8154 enum hnae3_port_base_vlan_state state,
8157 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8159 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8161 return HNAE3_PORT_BASE_VLAN_ENABLE;
8164 return HNAE3_PORT_BASE_VLAN_DISABLE;
8165 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8166 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8168 return HNAE3_PORT_BASE_VLAN_MODIFY;
8172 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8173 u16 vlan, u8 qos, __be16 proto)
8175 struct hclge_vport *vport = hclge_get_vport(handle);
8176 struct hclge_dev *hdev = vport->back;
8177 struct hclge_vlan_info vlan_info;
8181 if (hdev->pdev->revision == 0x20)
8184 /* qos is a 3 bits value, so can not be bigger than 7 */
8185 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
8187 if (proto != htons(ETH_P_8021Q))
8188 return -EPROTONOSUPPORT;
8190 vport = &hdev->vport[vfid];
8191 state = hclge_get_port_base_vlan_state(vport,
8192 vport->port_base_vlan_cfg.state,
8194 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8197 vlan_info.vlan_tag = vlan;
8198 vlan_info.qos = qos;
8199 vlan_info.vlan_proto = ntohs(proto);
8201 /* update port based VLAN for PF */
8203 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8204 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8205 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8210 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8211 return hclge_update_port_base_vlan_cfg(vport, state,
8214 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8222 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8223 u16 vlan_id, bool is_kill)
8225 struct hclge_vport *vport = hclge_get_vport(handle);
8226 struct hclge_dev *hdev = vport->back;
8227 bool writen_to_tbl = false;
8230 /* When device is resetting, firmware is unable to handle
8231 * mailbox. Just record the vlan id, and remove it after
8234 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8235 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8239 /* when port base vlan enabled, we use port base vlan as the vlan
8240 * filter entry. In this case, we don't update vlan filter table
8241 * when user add new vlan or remove exist vlan, just update the vport
8242 * vlan list. The vlan id in vlan list will be writen in vlan filter
8243 * table until port base vlan disabled
8245 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8246 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8248 writen_to_tbl = true;
8253 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8255 hclge_add_vport_vlan_table(vport, vlan_id,
8257 } else if (is_kill) {
8258 /* when remove hw vlan filter failed, record the vlan id,
8259 * and try to remove it from hw later, to be consistence
8262 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8267 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8269 #define HCLGE_MAX_SYNC_COUNT 60
8271 int i, ret, sync_cnt = 0;
8274 /* start from vport 1 for PF is always alive */
8275 for (i = 0; i < hdev->num_alloc_vport; i++) {
8276 struct hclge_vport *vport = &hdev->vport[i];
8278 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8280 while (vlan_id != VLAN_N_VID) {
8281 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8282 vport->vport_id, vlan_id,
8284 if (ret && ret != -EINVAL)
8287 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8288 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8291 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8294 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8300 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8302 struct hclge_config_max_frm_size_cmd *req;
8303 struct hclge_desc desc;
8305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8307 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8308 req->max_frm_size = cpu_to_le16(new_mps);
8309 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8311 return hclge_cmd_send(&hdev->hw, &desc, 1);
8314 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8316 struct hclge_vport *vport = hclge_get_vport(handle);
8318 return hclge_set_vport_mtu(vport, new_mtu);
8321 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8323 struct hclge_dev *hdev = vport->back;
8324 int i, max_frm_size, ret;
8326 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8327 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8328 max_frm_size > HCLGE_MAC_MAX_FRAME)
8331 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8332 mutex_lock(&hdev->vport_lock);
8333 /* VF's mps must fit within hdev->mps */
8334 if (vport->vport_id && max_frm_size > hdev->mps) {
8335 mutex_unlock(&hdev->vport_lock);
8337 } else if (vport->vport_id) {
8338 vport->mps = max_frm_size;
8339 mutex_unlock(&hdev->vport_lock);
8343 /* PF's mps must be greater then VF's mps */
8344 for (i = 1; i < hdev->num_alloc_vport; i++)
8345 if (max_frm_size < hdev->vport[i].mps) {
8346 mutex_unlock(&hdev->vport_lock);
8350 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8352 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8354 dev_err(&hdev->pdev->dev,
8355 "Change mtu fail, ret =%d\n", ret);
8359 hdev->mps = max_frm_size;
8360 vport->mps = max_frm_size;
8362 ret = hclge_buffer_alloc(hdev);
8364 dev_err(&hdev->pdev->dev,
8365 "Allocate buffer fail, ret =%d\n", ret);
8368 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8369 mutex_unlock(&hdev->vport_lock);
8373 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8376 struct hclge_reset_tqp_queue_cmd *req;
8377 struct hclge_desc desc;
8380 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8382 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8383 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8385 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8387 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8389 dev_err(&hdev->pdev->dev,
8390 "Send tqp reset cmd error, status =%d\n", ret);
8397 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8399 struct hclge_reset_tqp_queue_cmd *req;
8400 struct hclge_desc desc;
8403 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8405 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8406 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8408 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8410 dev_err(&hdev->pdev->dev,
8411 "Get reset status error, status =%d\n", ret);
8415 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8418 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8420 struct hnae3_queue *queue;
8421 struct hclge_tqp *tqp;
8423 queue = handle->kinfo.tqp[queue_id];
8424 tqp = container_of(queue, struct hclge_tqp, q);
8429 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8431 struct hclge_vport *vport = hclge_get_vport(handle);
8432 struct hclge_dev *hdev = vport->back;
8433 int reset_try_times = 0;
8438 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8440 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8442 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8446 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8448 dev_err(&hdev->pdev->dev,
8449 "Send reset tqp cmd fail, ret = %d\n", ret);
8453 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8454 reset_status = hclge_get_reset_status(hdev, queue_gid);
8458 /* Wait for tqp hw reset */
8459 usleep_range(1000, 1200);
8462 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8463 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8467 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8469 dev_err(&hdev->pdev->dev,
8470 "Deassert the soft reset fail, ret = %d\n", ret);
8475 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8477 struct hclge_dev *hdev = vport->back;
8478 int reset_try_times = 0;
8483 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8485 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8487 dev_warn(&hdev->pdev->dev,
8488 "Send reset tqp cmd fail, ret = %d\n", ret);
8492 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8493 reset_status = hclge_get_reset_status(hdev, queue_gid);
8497 /* Wait for tqp hw reset */
8498 usleep_range(1000, 1200);
8501 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8502 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8506 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8508 dev_warn(&hdev->pdev->dev,
8509 "Deassert the soft reset fail, ret = %d\n", ret);
8512 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8514 struct hclge_vport *vport = hclge_get_vport(handle);
8515 struct hclge_dev *hdev = vport->back;
8517 return hdev->fw_version;
8520 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8522 struct phy_device *phydev = hdev->hw.mac.phydev;
8527 phy_set_asym_pause(phydev, rx_en, tx_en);
8530 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8534 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8537 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8539 dev_err(&hdev->pdev->dev,
8540 "configure pauseparam error, ret = %d.\n", ret);
8545 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8547 struct phy_device *phydev = hdev->hw.mac.phydev;
8548 u16 remote_advertising = 0;
8549 u16 local_advertising;
8550 u32 rx_pause, tx_pause;
8553 if (!phydev->link || !phydev->autoneg)
8556 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8559 remote_advertising = LPA_PAUSE_CAP;
8561 if (phydev->asym_pause)
8562 remote_advertising |= LPA_PAUSE_ASYM;
8564 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8565 remote_advertising);
8566 tx_pause = flowctl & FLOW_CTRL_TX;
8567 rx_pause = flowctl & FLOW_CTRL_RX;
8569 if (phydev->duplex == HCLGE_MAC_HALF) {
8574 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8577 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8578 u32 *rx_en, u32 *tx_en)
8580 struct hclge_vport *vport = hclge_get_vport(handle);
8581 struct hclge_dev *hdev = vport->back;
8582 struct phy_device *phydev = hdev->hw.mac.phydev;
8584 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8586 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8592 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8595 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8598 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8607 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8608 u32 rx_en, u32 tx_en)
8611 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8612 else if (rx_en && !tx_en)
8613 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8614 else if (!rx_en && tx_en)
8615 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8617 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8619 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8622 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8623 u32 rx_en, u32 tx_en)
8625 struct hclge_vport *vport = hclge_get_vport(handle);
8626 struct hclge_dev *hdev = vport->back;
8627 struct phy_device *phydev = hdev->hw.mac.phydev;
8631 fc_autoneg = hclge_get_autoneg(handle);
8632 if (auto_neg != fc_autoneg) {
8633 dev_info(&hdev->pdev->dev,
8634 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8639 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8640 dev_info(&hdev->pdev->dev,
8641 "Priority flow control enabled. Cannot set link flow control.\n");
8645 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8647 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8650 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8653 return phy_start_aneg(phydev);
8658 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8659 u8 *auto_neg, u32 *speed, u8 *duplex)
8661 struct hclge_vport *vport = hclge_get_vport(handle);
8662 struct hclge_dev *hdev = vport->back;
8665 *speed = hdev->hw.mac.speed;
8667 *duplex = hdev->hw.mac.duplex;
8669 *auto_neg = hdev->hw.mac.autoneg;
8672 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8675 struct hclge_vport *vport = hclge_get_vport(handle);
8676 struct hclge_dev *hdev = vport->back;
8679 *media_type = hdev->hw.mac.media_type;
8682 *module_type = hdev->hw.mac.module_type;
8685 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8686 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8688 struct hclge_vport *vport = hclge_get_vport(handle);
8689 struct hclge_dev *hdev = vport->back;
8690 struct phy_device *phydev = hdev->hw.mac.phydev;
8691 int mdix_ctrl, mdix, is_resolved;
8692 unsigned int retval;
8695 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8696 *tp_mdix = ETH_TP_MDI_INVALID;
8700 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8702 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8703 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8704 HCLGE_PHY_MDIX_CTRL_S);
8706 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8707 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8708 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8710 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8712 switch (mdix_ctrl) {
8714 *tp_mdix_ctrl = ETH_TP_MDI;
8717 *tp_mdix_ctrl = ETH_TP_MDI_X;
8720 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8723 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8728 *tp_mdix = ETH_TP_MDI_INVALID;
8730 *tp_mdix = ETH_TP_MDI_X;
8732 *tp_mdix = ETH_TP_MDI;
8735 static void hclge_info_show(struct hclge_dev *hdev)
8737 struct device *dev = &hdev->pdev->dev;
8739 dev_info(dev, "PF info begin:\n");
8741 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8742 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8743 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8744 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8745 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8746 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8747 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8748 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8749 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8750 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8751 dev_info(dev, "This is %s PF\n",
8752 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8753 dev_info(dev, "DCB %s\n",
8754 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8755 dev_info(dev, "MQPRIO %s\n",
8756 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8758 dev_info(dev, "PF info end.\n");
8761 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8762 struct hclge_vport *vport)
8764 struct hnae3_client *client = vport->nic.client;
8765 struct hclge_dev *hdev = ae_dev->priv;
8769 rst_cnt = hdev->rst_stats.reset_cnt;
8770 ret = client->ops->init_instance(&vport->nic);
8774 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8775 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8776 rst_cnt != hdev->rst_stats.reset_cnt) {
8781 /* Enable nic hw error interrupts */
8782 ret = hclge_config_nic_hw_error(hdev, true);
8784 dev_err(&ae_dev->pdev->dev,
8785 "fail(%d) to enable hw error interrupts\n", ret);
8789 hnae3_set_client_init_flag(client, ae_dev, 1);
8791 if (netif_msg_drv(&hdev->vport->nic))
8792 hclge_info_show(hdev);
8797 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8798 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8799 msleep(HCLGE_WAIT_RESET_DONE);
8801 client->ops->uninit_instance(&vport->nic, 0);
8806 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8807 struct hclge_vport *vport)
8809 struct hnae3_client *client = vport->roce.client;
8810 struct hclge_dev *hdev = ae_dev->priv;
8814 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8818 client = hdev->roce_client;
8819 ret = hclge_init_roce_base_info(vport);
8823 rst_cnt = hdev->rst_stats.reset_cnt;
8824 ret = client->ops->init_instance(&vport->roce);
8828 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8829 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8830 rst_cnt != hdev->rst_stats.reset_cnt) {
8835 /* Enable roce ras interrupts */
8836 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8838 dev_err(&ae_dev->pdev->dev,
8839 "fail(%d) to enable roce ras interrupts\n", ret);
8843 hnae3_set_client_init_flag(client, ae_dev, 1);
8848 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8849 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8850 msleep(HCLGE_WAIT_RESET_DONE);
8852 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8857 static int hclge_init_client_instance(struct hnae3_client *client,
8858 struct hnae3_ae_dev *ae_dev)
8860 struct hclge_dev *hdev = ae_dev->priv;
8861 struct hclge_vport *vport;
8864 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8865 vport = &hdev->vport[i];
8867 switch (client->type) {
8868 case HNAE3_CLIENT_KNIC:
8870 hdev->nic_client = client;
8871 vport->nic.client = client;
8872 ret = hclge_init_nic_client_instance(ae_dev, vport);
8876 ret = hclge_init_roce_client_instance(ae_dev, vport);
8881 case HNAE3_CLIENT_ROCE:
8882 if (hnae3_dev_roce_supported(hdev)) {
8883 hdev->roce_client = client;
8884 vport->roce.client = client;
8887 ret = hclge_init_roce_client_instance(ae_dev, vport);
8900 hdev->nic_client = NULL;
8901 vport->nic.client = NULL;
8904 hdev->roce_client = NULL;
8905 vport->roce.client = NULL;
8909 static void hclge_uninit_client_instance(struct hnae3_client *client,
8910 struct hnae3_ae_dev *ae_dev)
8912 struct hclge_dev *hdev = ae_dev->priv;
8913 struct hclge_vport *vport;
8916 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8917 vport = &hdev->vport[i];
8918 if (hdev->roce_client) {
8919 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8920 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8921 msleep(HCLGE_WAIT_RESET_DONE);
8923 hdev->roce_client->ops->uninit_instance(&vport->roce,
8925 hdev->roce_client = NULL;
8926 vport->roce.client = NULL;
8928 if (client->type == HNAE3_CLIENT_ROCE)
8930 if (hdev->nic_client && client->ops->uninit_instance) {
8931 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8932 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8933 msleep(HCLGE_WAIT_RESET_DONE);
8935 client->ops->uninit_instance(&vport->nic, 0);
8936 hdev->nic_client = NULL;
8937 vport->nic.client = NULL;
8942 static int hclge_pci_init(struct hclge_dev *hdev)
8944 struct pci_dev *pdev = hdev->pdev;
8945 struct hclge_hw *hw;
8948 ret = pci_enable_device(pdev);
8950 dev_err(&pdev->dev, "failed to enable PCI device\n");
8954 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8956 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8959 "can't set consistent PCI DMA");
8960 goto err_disable_device;
8962 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8965 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8967 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8968 goto err_disable_device;
8971 pci_set_master(pdev);
8973 hw->io_base = pcim_iomap(pdev, 2, 0);
8975 dev_err(&pdev->dev, "Can't map configuration register space\n");
8977 goto err_clr_master;
8980 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8984 pci_clear_master(pdev);
8985 pci_release_regions(pdev);
8987 pci_disable_device(pdev);
8992 static void hclge_pci_uninit(struct hclge_dev *hdev)
8994 struct pci_dev *pdev = hdev->pdev;
8996 pcim_iounmap(pdev, hdev->hw.io_base);
8997 pci_free_irq_vectors(pdev);
8998 pci_clear_master(pdev);
8999 pci_release_mem_regions(pdev);
9000 pci_disable_device(pdev);
9003 static void hclge_state_init(struct hclge_dev *hdev)
9005 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9006 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9007 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9008 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9009 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9010 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9013 static void hclge_state_uninit(struct hclge_dev *hdev)
9015 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9016 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9018 if (hdev->reset_timer.function)
9019 del_timer_sync(&hdev->reset_timer);
9020 if (hdev->service_task.work.func)
9021 cancel_delayed_work_sync(&hdev->service_task);
9022 if (hdev->rst_service_task.func)
9023 cancel_work_sync(&hdev->rst_service_task);
9024 if (hdev->mbx_service_task.func)
9025 cancel_work_sync(&hdev->mbx_service_task);
9028 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9030 #define HCLGE_FLR_WAIT_MS 100
9031 #define HCLGE_FLR_WAIT_CNT 50
9032 struct hclge_dev *hdev = ae_dev->priv;
9035 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9036 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9037 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9038 hclge_reset_event(hdev->pdev, NULL);
9040 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9041 cnt++ < HCLGE_FLR_WAIT_CNT)
9042 msleep(HCLGE_FLR_WAIT_MS);
9044 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9045 dev_err(&hdev->pdev->dev,
9046 "flr wait down timeout: %d\n", cnt);
9049 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9051 struct hclge_dev *hdev = ae_dev->priv;
9053 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9056 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9060 for (i = 0; i < hdev->num_alloc_vport; i++) {
9061 struct hclge_vport *vport = &hdev->vport[i];
9064 /* Send cmd to clear VF's FUNC_RST_ING */
9065 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9067 dev_warn(&hdev->pdev->dev,
9068 "clear vf(%d) rst failed %d!\n",
9069 vport->vport_id, ret);
9073 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9075 struct pci_dev *pdev = ae_dev->pdev;
9076 struct hclge_dev *hdev;
9079 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9086 hdev->ae_dev = ae_dev;
9087 hdev->reset_type = HNAE3_NONE_RESET;
9088 hdev->reset_level = HNAE3_FUNC_RESET;
9089 ae_dev->priv = hdev;
9090 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9092 mutex_init(&hdev->vport_lock);
9093 mutex_init(&hdev->vport_cfg_mutex);
9094 spin_lock_init(&hdev->fd_rule_lock);
9096 ret = hclge_pci_init(hdev);
9098 dev_err(&pdev->dev, "PCI init failed\n");
9102 /* Firmware command queue initialize */
9103 ret = hclge_cmd_queue_init(hdev);
9105 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9106 goto err_pci_uninit;
9109 /* Firmware command initialize */
9110 ret = hclge_cmd_init(hdev);
9112 goto err_cmd_uninit;
9114 ret = hclge_get_cap(hdev);
9116 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9118 goto err_cmd_uninit;
9121 ret = hclge_configure(hdev);
9123 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9124 goto err_cmd_uninit;
9127 ret = hclge_init_msi(hdev);
9129 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9130 goto err_cmd_uninit;
9133 ret = hclge_misc_irq_init(hdev);
9136 "Misc IRQ(vector0) init error, ret = %d.\n",
9138 goto err_msi_uninit;
9141 ret = hclge_alloc_tqps(hdev);
9143 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9144 goto err_msi_irq_uninit;
9147 ret = hclge_alloc_vport(hdev);
9149 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9150 goto err_msi_irq_uninit;
9153 ret = hclge_map_tqp(hdev);
9155 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9156 goto err_msi_irq_uninit;
9159 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9160 ret = hclge_mac_mdio_config(hdev);
9162 dev_err(&hdev->pdev->dev,
9163 "mdio config fail ret=%d\n", ret);
9164 goto err_msi_irq_uninit;
9168 ret = hclge_init_umv_space(hdev);
9170 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9171 goto err_mdiobus_unreg;
9174 ret = hclge_mac_init(hdev);
9176 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9177 goto err_mdiobus_unreg;
9180 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9182 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9183 goto err_mdiobus_unreg;
9186 ret = hclge_config_gro(hdev, true);
9188 goto err_mdiobus_unreg;
9190 ret = hclge_init_vlan_config(hdev);
9192 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9193 goto err_mdiobus_unreg;
9196 ret = hclge_tm_schd_init(hdev);
9198 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9199 goto err_mdiobus_unreg;
9202 hclge_rss_init_cfg(hdev);
9203 ret = hclge_rss_init_hw(hdev);
9205 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9206 goto err_mdiobus_unreg;
9209 ret = init_mgr_tbl(hdev);
9211 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9212 goto err_mdiobus_unreg;
9215 ret = hclge_init_fd_config(hdev);
9218 "fd table init fail, ret=%d\n", ret);
9219 goto err_mdiobus_unreg;
9222 INIT_KFIFO(hdev->mac_tnl_log);
9224 hclge_dcb_ops_set(hdev);
9226 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9227 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9228 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9229 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9231 /* Setup affinity after service timer setup because add_timer_on
9232 * is called in affinity notify.
9234 hclge_misc_affinity_setup(hdev);
9236 hclge_clear_all_event_cause(hdev);
9237 hclge_clear_resetting_state(hdev);
9239 /* Log and clear the hw errors those already occurred */
9240 hclge_handle_all_hns_hw_errors(ae_dev);
9242 /* request delayed reset for the error recovery because an immediate
9243 * global reset on a PF affecting pending initialization of other PFs
9245 if (ae_dev->hw_err_reset_req) {
9246 enum hnae3_reset_type reset_level;
9248 reset_level = hclge_get_reset_level(ae_dev,
9249 &ae_dev->hw_err_reset_req);
9250 hclge_set_def_reset_request(ae_dev, reset_level);
9251 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9254 /* Enable MISC vector(vector0) */
9255 hclge_enable_vector(&hdev->misc_vector, true);
9257 hclge_state_init(hdev);
9258 hdev->last_reset_time = jiffies;
9260 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9266 if (hdev->hw.mac.phydev)
9267 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9269 hclge_misc_irq_uninit(hdev);
9271 pci_free_irq_vectors(pdev);
9273 hclge_cmd_uninit(hdev);
9275 pcim_iounmap(pdev, hdev->hw.io_base);
9276 pci_clear_master(pdev);
9277 pci_release_regions(pdev);
9278 pci_disable_device(pdev);
9283 static void hclge_stats_clear(struct hclge_dev *hdev)
9285 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9288 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9290 struct hclge_vport *vport = hdev->vport;
9293 for (i = 0; i < hdev->num_alloc_vport; i++) {
9294 hclge_vport_stop(vport);
9299 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9301 struct hclge_dev *hdev = ae_dev->priv;
9302 struct pci_dev *pdev = ae_dev->pdev;
9305 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9307 hclge_stats_clear(hdev);
9308 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9309 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9311 ret = hclge_cmd_init(hdev);
9313 dev_err(&pdev->dev, "Cmd queue init failed\n");
9317 ret = hclge_map_tqp(hdev);
9319 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9323 hclge_reset_umv_space(hdev);
9325 ret = hclge_mac_init(hdev);
9327 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9331 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9333 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9337 ret = hclge_config_gro(hdev, true);
9341 ret = hclge_init_vlan_config(hdev);
9343 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9347 ret = hclge_tm_init_hw(hdev, true);
9349 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9353 ret = hclge_rss_init_hw(hdev);
9355 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9359 ret = hclge_init_fd_config(hdev);
9361 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9365 /* Re-enable the hw error interrupts because
9366 * the interrupts get disabled on global reset.
9368 ret = hclge_config_nic_hw_error(hdev, true);
9371 "fail(%d) to re-enable NIC hw error interrupts\n",
9376 if (hdev->roce_client) {
9377 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9380 "fail(%d) to re-enable roce ras interrupts\n",
9386 hclge_reset_vport_state(hdev);
9388 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9394 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9396 struct hclge_dev *hdev = ae_dev->priv;
9397 struct hclge_mac *mac = &hdev->hw.mac;
9399 hclge_misc_affinity_teardown(hdev);
9400 hclge_state_uninit(hdev);
9403 mdiobus_unregister(mac->mdio_bus);
9405 hclge_uninit_umv_space(hdev);
9407 /* Disable MISC vector(vector0) */
9408 hclge_enable_vector(&hdev->misc_vector, false);
9409 synchronize_irq(hdev->misc_vector.vector_irq);
9411 /* Disable all hw interrupts */
9412 hclge_config_mac_tnl_int(hdev, false);
9413 hclge_config_nic_hw_error(hdev, false);
9414 hclge_config_rocee_ras_interrupt(hdev, false);
9416 hclge_cmd_uninit(hdev);
9417 hclge_misc_irq_uninit(hdev);
9418 hclge_pci_uninit(hdev);
9419 mutex_destroy(&hdev->vport_lock);
9420 hclge_uninit_vport_mac_table(hdev);
9421 hclge_uninit_vport_vlan_table(hdev);
9422 mutex_destroy(&hdev->vport_cfg_mutex);
9423 ae_dev->priv = NULL;
9426 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9428 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9429 struct hclge_vport *vport = hclge_get_vport(handle);
9430 struct hclge_dev *hdev = vport->back;
9432 return min_t(u32, hdev->rss_size_max,
9433 vport->alloc_tqps / kinfo->num_tc);
9436 static void hclge_get_channels(struct hnae3_handle *handle,
9437 struct ethtool_channels *ch)
9439 ch->max_combined = hclge_get_max_channels(handle);
9440 ch->other_count = 1;
9442 ch->combined_count = handle->kinfo.rss_size;
9445 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9446 u16 *alloc_tqps, u16 *max_rss_size)
9448 struct hclge_vport *vport = hclge_get_vport(handle);
9449 struct hclge_dev *hdev = vport->back;
9451 *alloc_tqps = vport->alloc_tqps;
9452 *max_rss_size = hdev->rss_size_max;
9455 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9456 bool rxfh_configured)
9458 struct hclge_vport *vport = hclge_get_vport(handle);
9459 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9460 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9461 struct hclge_dev *hdev = vport->back;
9462 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9463 int cur_rss_size = kinfo->rss_size;
9464 int cur_tqps = kinfo->num_tqps;
9465 u16 tc_valid[HCLGE_MAX_TC_NUM];
9471 kinfo->req_rss_size = new_tqps_num;
9473 ret = hclge_tm_vport_map_update(hdev);
9475 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9479 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9480 roundup_size = ilog2(roundup_size);
9481 /* Set the RSS TC mode according to the new RSS size */
9482 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9485 if (!(hdev->hw_tc_map & BIT(i)))
9489 tc_size[i] = roundup_size;
9490 tc_offset[i] = kinfo->rss_size * i;
9492 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9496 /* RSS indirection table has been configuared by user */
9497 if (rxfh_configured)
9500 /* Reinitializes the rss indirect table according to the new RSS size */
9501 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9505 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9506 rss_indir[i] = i % kinfo->rss_size;
9508 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9510 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9517 dev_info(&hdev->pdev->dev,
9518 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9519 cur_rss_size, kinfo->rss_size,
9520 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9525 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9526 u32 *regs_num_64_bit)
9528 struct hclge_desc desc;
9532 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9533 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9535 dev_err(&hdev->pdev->dev,
9536 "Query register number cmd failed, ret = %d.\n", ret);
9540 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9541 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9543 total_num = *regs_num_32_bit + *regs_num_64_bit;
9550 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9553 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9554 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9556 struct hclge_desc *desc;
9557 u32 *reg_val = data;
9567 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9568 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9569 HCLGE_32_BIT_REG_RTN_DATANUM);
9570 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9574 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9575 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9577 dev_err(&hdev->pdev->dev,
9578 "Query 32 bit register cmd failed, ret = %d.\n", ret);
9583 for (i = 0; i < cmd_num; i++) {
9585 desc_data = (__le32 *)(&desc[i].data[0]);
9586 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9588 desc_data = (__le32 *)(&desc[i]);
9589 n = HCLGE_32_BIT_REG_RTN_DATANUM;
9591 for (k = 0; k < n; k++) {
9592 *reg_val++ = le32_to_cpu(*desc_data++);
9604 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9607 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9608 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9610 struct hclge_desc *desc;
9611 u64 *reg_val = data;
9621 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9622 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9623 HCLGE_64_BIT_REG_RTN_DATANUM);
9624 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9628 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9629 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9631 dev_err(&hdev->pdev->dev,
9632 "Query 64 bit register cmd failed, ret = %d.\n", ret);
9637 for (i = 0; i < cmd_num; i++) {
9639 desc_data = (__le64 *)(&desc[i].data[0]);
9640 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9642 desc_data = (__le64 *)(&desc[i]);
9643 n = HCLGE_64_BIT_REG_RTN_DATANUM;
9645 for (k = 0; k < n; k++) {
9646 *reg_val++ = le64_to_cpu(*desc_data++);
9658 #define MAX_SEPARATE_NUM 4
9659 #define SEPARATOR_VALUE 0xFDFCFBFA
9660 #define REG_NUM_PER_LINE 4
9661 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9662 #define REG_SEPARATOR_LINE 1
9663 #define REG_NUM_REMAIN_MASK 3
9664 #define BD_LIST_MAX_NUM 30
9666 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9668 /*prepare 4 commands to query DFX BD number*/
9669 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9670 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9671 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9672 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9673 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9674 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9675 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9677 return hclge_cmd_send(&hdev->hw, desc, 4);
9680 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9684 #define HCLGE_DFX_REG_BD_NUM 4
9686 u32 entries_per_desc, desc_index, index, offset, i;
9687 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9690 ret = hclge_query_bd_num_cmd_send(hdev, desc);
9692 dev_err(&hdev->pdev->dev,
9693 "Get dfx bd num fail, status is %d.\n", ret);
9697 entries_per_desc = ARRAY_SIZE(desc[0].data);
9698 for (i = 0; i < type_num; i++) {
9699 offset = hclge_dfx_bd_offset_list[i];
9700 index = offset % entries_per_desc;
9701 desc_index = offset / entries_per_desc;
9702 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9708 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9709 struct hclge_desc *desc_src, int bd_num,
9710 enum hclge_opcode_type cmd)
9712 struct hclge_desc *desc = desc_src;
9715 hclge_cmd_setup_basic_desc(desc, cmd, true);
9716 for (i = 0; i < bd_num - 1; i++) {
9717 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9719 hclge_cmd_setup_basic_desc(desc, cmd, true);
9723 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9725 dev_err(&hdev->pdev->dev,
9726 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9732 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9735 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9736 struct hclge_desc *desc = desc_src;
9739 entries_per_desc = ARRAY_SIZE(desc->data);
9740 reg_num = entries_per_desc * bd_num;
9741 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9742 for (i = 0; i < reg_num; i++) {
9743 index = i % entries_per_desc;
9744 desc_index = i / entries_per_desc;
9745 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
9747 for (i = 0; i < separator_num; i++)
9748 *reg++ = SEPARATOR_VALUE;
9750 return reg_num + separator_num;
9753 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9755 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9756 int data_len_per_desc, data_len, bd_num, i;
9757 int bd_num_list[BD_LIST_MAX_NUM];
9760 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9762 dev_err(&hdev->pdev->dev,
9763 "Get dfx reg bd num fail, status is %d.\n", ret);
9767 data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9769 for (i = 0; i < dfx_reg_type_num; i++) {
9770 bd_num = bd_num_list[i];
9771 data_len = data_len_per_desc * bd_num;
9772 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9778 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9780 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9781 int bd_num, bd_num_max, buf_len, i;
9782 int bd_num_list[BD_LIST_MAX_NUM];
9783 struct hclge_desc *desc_src;
9787 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9789 dev_err(&hdev->pdev->dev,
9790 "Get dfx reg bd num fail, status is %d.\n", ret);
9794 bd_num_max = bd_num_list[0];
9795 for (i = 1; i < dfx_reg_type_num; i++)
9796 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9798 buf_len = sizeof(*desc_src) * bd_num_max;
9799 desc_src = kzalloc(buf_len, GFP_KERNEL);
9801 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9805 for (i = 0; i < dfx_reg_type_num; i++) {
9806 bd_num = bd_num_list[i];
9807 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9808 hclge_dfx_reg_opcode_list[i]);
9810 dev_err(&hdev->pdev->dev,
9811 "Get dfx reg fail, status is %d.\n", ret);
9815 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9822 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9823 struct hnae3_knic_private_info *kinfo)
9825 #define HCLGE_RING_REG_OFFSET 0x200
9826 #define HCLGE_RING_INT_REG_OFFSET 0x4
9828 int i, j, reg_num, separator_num;
9832 /* fetching per-PF registers valus from PF PCIe register space */
9833 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9834 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9835 for (i = 0; i < reg_num; i++)
9836 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9837 for (i = 0; i < separator_num; i++)
9838 *reg++ = SEPARATOR_VALUE;
9839 data_num_sum = reg_num + separator_num;
9841 reg_num = ARRAY_SIZE(common_reg_addr_list);
9842 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9843 for (i = 0; i < reg_num; i++)
9844 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9845 for (i = 0; i < separator_num; i++)
9846 *reg++ = SEPARATOR_VALUE;
9847 data_num_sum += reg_num + separator_num;
9849 reg_num = ARRAY_SIZE(ring_reg_addr_list);
9850 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9851 for (j = 0; j < kinfo->num_tqps; j++) {
9852 for (i = 0; i < reg_num; i++)
9853 *reg++ = hclge_read_dev(&hdev->hw,
9854 ring_reg_addr_list[i] +
9855 HCLGE_RING_REG_OFFSET * j);
9856 for (i = 0; i < separator_num; i++)
9857 *reg++ = SEPARATOR_VALUE;
9859 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9861 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9862 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9863 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9864 for (i = 0; i < reg_num; i++)
9865 *reg++ = hclge_read_dev(&hdev->hw,
9866 tqp_intr_reg_addr_list[i] +
9867 HCLGE_RING_INT_REG_OFFSET * j);
9868 for (i = 0; i < separator_num; i++)
9869 *reg++ = SEPARATOR_VALUE;
9871 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9873 return data_num_sum;
9876 static int hclge_get_regs_len(struct hnae3_handle *handle)
9878 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9879 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9880 struct hclge_vport *vport = hclge_get_vport(handle);
9881 struct hclge_dev *hdev = vport->back;
9882 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9883 int regs_lines_32_bit, regs_lines_64_bit;
9886 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9888 dev_err(&hdev->pdev->dev,
9889 "Get register number failed, ret = %d.\n", ret);
9893 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9895 dev_err(&hdev->pdev->dev,
9896 "Get dfx reg len failed, ret = %d.\n", ret);
9900 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9902 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9904 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9906 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9908 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9910 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9913 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9914 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9915 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9918 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9921 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9922 struct hclge_vport *vport = hclge_get_vport(handle);
9923 struct hclge_dev *hdev = vport->back;
9924 u32 regs_num_32_bit, regs_num_64_bit;
9925 int i, reg_num, separator_num, ret;
9928 *version = hdev->fw_version;
9930 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9932 dev_err(&hdev->pdev->dev,
9933 "Get register number failed, ret = %d.\n", ret);
9937 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
9939 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9941 dev_err(&hdev->pdev->dev,
9942 "Get 32 bit register failed, ret = %d.\n", ret);
9945 reg_num = regs_num_32_bit;
9947 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9948 for (i = 0; i < separator_num; i++)
9949 *reg++ = SEPARATOR_VALUE;
9951 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9953 dev_err(&hdev->pdev->dev,
9954 "Get 64 bit register failed, ret = %d.\n", ret);
9957 reg_num = regs_num_64_bit * 2;
9959 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9960 for (i = 0; i < separator_num; i++)
9961 *reg++ = SEPARATOR_VALUE;
9963 ret = hclge_get_dfx_reg(hdev, reg);
9965 dev_err(&hdev->pdev->dev,
9966 "Get dfx register failed, ret = %d.\n", ret);
9969 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9971 struct hclge_set_led_state_cmd *req;
9972 struct hclge_desc desc;
9975 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9977 req = (struct hclge_set_led_state_cmd *)desc.data;
9978 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9979 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9981 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9983 dev_err(&hdev->pdev->dev,
9984 "Send set led state cmd error, ret =%d\n", ret);
9989 enum hclge_led_status {
9992 HCLGE_LED_NO_CHANGE = 0xFF,
9995 static int hclge_set_led_id(struct hnae3_handle *handle,
9996 enum ethtool_phys_id_state status)
9998 struct hclge_vport *vport = hclge_get_vport(handle);
9999 struct hclge_dev *hdev = vport->back;
10002 case ETHTOOL_ID_ACTIVE:
10003 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10004 case ETHTOOL_ID_INACTIVE:
10005 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10011 static void hclge_get_link_mode(struct hnae3_handle *handle,
10012 unsigned long *supported,
10013 unsigned long *advertising)
10015 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10016 struct hclge_vport *vport = hclge_get_vport(handle);
10017 struct hclge_dev *hdev = vport->back;
10018 unsigned int idx = 0;
10020 for (; idx < size; idx++) {
10021 supported[idx] = hdev->hw.mac.supported[idx];
10022 advertising[idx] = hdev->hw.mac.advertising[idx];
10026 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10028 struct hclge_vport *vport = hclge_get_vport(handle);
10029 struct hclge_dev *hdev = vport->back;
10031 return hclge_config_gro(hdev, enable);
10034 static const struct hnae3_ae_ops hclge_ops = {
10035 .init_ae_dev = hclge_init_ae_dev,
10036 .uninit_ae_dev = hclge_uninit_ae_dev,
10037 .flr_prepare = hclge_flr_prepare,
10038 .flr_done = hclge_flr_done,
10039 .init_client_instance = hclge_init_client_instance,
10040 .uninit_client_instance = hclge_uninit_client_instance,
10041 .map_ring_to_vector = hclge_map_ring_to_vector,
10042 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10043 .get_vector = hclge_get_vector,
10044 .put_vector = hclge_put_vector,
10045 .set_promisc_mode = hclge_set_promisc_mode,
10046 .set_loopback = hclge_set_loopback,
10047 .start = hclge_ae_start,
10048 .stop = hclge_ae_stop,
10049 .client_start = hclge_client_start,
10050 .client_stop = hclge_client_stop,
10051 .get_status = hclge_get_status,
10052 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10053 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10054 .get_media_type = hclge_get_media_type,
10055 .check_port_speed = hclge_check_port_speed,
10056 .get_fec = hclge_get_fec,
10057 .set_fec = hclge_set_fec,
10058 .get_rss_key_size = hclge_get_rss_key_size,
10059 .get_rss_indir_size = hclge_get_rss_indir_size,
10060 .get_rss = hclge_get_rss,
10061 .set_rss = hclge_set_rss,
10062 .set_rss_tuple = hclge_set_rss_tuple,
10063 .get_rss_tuple = hclge_get_rss_tuple,
10064 .get_tc_size = hclge_get_tc_size,
10065 .get_mac_addr = hclge_get_mac_addr,
10066 .set_mac_addr = hclge_set_mac_addr,
10067 .do_ioctl = hclge_do_ioctl,
10068 .add_uc_addr = hclge_add_uc_addr,
10069 .rm_uc_addr = hclge_rm_uc_addr,
10070 .add_mc_addr = hclge_add_mc_addr,
10071 .rm_mc_addr = hclge_rm_mc_addr,
10072 .set_autoneg = hclge_set_autoneg,
10073 .get_autoneg = hclge_get_autoneg,
10074 .restart_autoneg = hclge_restart_autoneg,
10075 .halt_autoneg = hclge_halt_autoneg,
10076 .get_pauseparam = hclge_get_pauseparam,
10077 .set_pauseparam = hclge_set_pauseparam,
10078 .set_mtu = hclge_set_mtu,
10079 .reset_queue = hclge_reset_tqp,
10080 .get_stats = hclge_get_stats,
10081 .get_mac_stats = hclge_get_mac_stat,
10082 .update_stats = hclge_update_stats,
10083 .get_strings = hclge_get_strings,
10084 .get_sset_count = hclge_get_sset_count,
10085 .get_fw_version = hclge_get_fw_version,
10086 .get_mdix_mode = hclge_get_mdix_mode,
10087 .enable_vlan_filter = hclge_enable_vlan_filter,
10088 .set_vlan_filter = hclge_set_vlan_filter,
10089 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10090 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10091 .reset_event = hclge_reset_event,
10092 .get_reset_level = hclge_get_reset_level,
10093 .set_default_reset_request = hclge_set_def_reset_request,
10094 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10095 .set_channels = hclge_set_channels,
10096 .get_channels = hclge_get_channels,
10097 .get_regs_len = hclge_get_regs_len,
10098 .get_regs = hclge_get_regs,
10099 .set_led_id = hclge_set_led_id,
10100 .get_link_mode = hclge_get_link_mode,
10101 .add_fd_entry = hclge_add_fd_entry,
10102 .del_fd_entry = hclge_del_fd_entry,
10103 .del_all_fd_entries = hclge_del_all_fd_entries,
10104 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10105 .get_fd_rule_info = hclge_get_fd_rule_info,
10106 .get_fd_all_rules = hclge_get_all_rules,
10107 .restore_fd_rules = hclge_restore_fd_entries,
10108 .enable_fd = hclge_enable_fd,
10109 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10110 .dbg_run_cmd = hclge_dbg_run_cmd,
10111 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10112 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10113 .ae_dev_resetting = hclge_ae_dev_resetting,
10114 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10115 .set_gro_en = hclge_gro_en,
10116 .get_global_queue_id = hclge_covert_handle_qid_global,
10117 .set_timer_task = hclge_set_timer_task,
10118 .mac_connect_phy = hclge_mac_connect_phy,
10119 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10120 .restore_vlan_table = hclge_restore_vlan_table,
10123 static struct hnae3_ae_algo ae_algo = {
10125 .pdev_id_table = ae_algo_pci_tbl,
10128 static int hclge_init(void)
10130 pr_info("%s is initializing\n", HCLGE_NAME);
10132 hnae3_register_ae_algo(&ae_algo);
10137 static void hclge_exit(void)
10139 hnae3_unregister_ae_algo(&ae_algo);
10141 module_init(hclge_init);
10142 module_exit(hclge_exit);
10144 MODULE_LICENSE("GPL");
10145 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10146 MODULE_DESCRIPTION("HCLGE Driver");
10147 MODULE_VERSION(HCLGE_MOD_VERSION);