1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
73 static struct hnae3_ae_algo ae_algo;
75 static const struct pci_device_id ae_algo_pci_tbl[] = {
76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
83 /* required last entry */
87 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
89 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
90 HCLGE_CMDQ_TX_ADDR_H_REG,
91 HCLGE_CMDQ_TX_DEPTH_REG,
92 HCLGE_CMDQ_TX_TAIL_REG,
93 HCLGE_CMDQ_TX_HEAD_REG,
94 HCLGE_CMDQ_RX_ADDR_L_REG,
95 HCLGE_CMDQ_RX_ADDR_H_REG,
96 HCLGE_CMDQ_RX_DEPTH_REG,
97 HCLGE_CMDQ_RX_TAIL_REG,
98 HCLGE_CMDQ_RX_HEAD_REG,
99 HCLGE_VECTOR0_CMDQ_SRC_REG,
100 HCLGE_CMDQ_INTR_STS_REG,
101 HCLGE_CMDQ_INTR_EN_REG,
102 HCLGE_CMDQ_INTR_GEN_REG};
104 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
105 HCLGE_VECTOR0_OTER_EN_REG,
106 HCLGE_MISC_RESET_STS_REG,
107 HCLGE_MISC_VECTOR_INT_STS,
108 HCLGE_GLOBAL_RESET_REG,
112 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
113 HCLGE_RING_RX_ADDR_H_REG,
114 HCLGE_RING_RX_BD_NUM_REG,
115 HCLGE_RING_RX_BD_LENGTH_REG,
116 HCLGE_RING_RX_MERGE_EN_REG,
117 HCLGE_RING_RX_TAIL_REG,
118 HCLGE_RING_RX_HEAD_REG,
119 HCLGE_RING_RX_FBD_NUM_REG,
120 HCLGE_RING_RX_OFFSET_REG,
121 HCLGE_RING_RX_FBD_OFFSET_REG,
122 HCLGE_RING_RX_STASH_REG,
123 HCLGE_RING_RX_BD_ERR_REG,
124 HCLGE_RING_TX_ADDR_L_REG,
125 HCLGE_RING_TX_ADDR_H_REG,
126 HCLGE_RING_TX_BD_NUM_REG,
127 HCLGE_RING_TX_PRIORITY_REG,
128 HCLGE_RING_TX_TC_REG,
129 HCLGE_RING_TX_MERGE_EN_REG,
130 HCLGE_RING_TX_TAIL_REG,
131 HCLGE_RING_TX_HEAD_REG,
132 HCLGE_RING_TX_FBD_NUM_REG,
133 HCLGE_RING_TX_OFFSET_REG,
134 HCLGE_RING_TX_EBD_NUM_REG,
135 HCLGE_RING_TX_EBD_OFFSET_REG,
136 HCLGE_RING_TX_BD_ERR_REG,
139 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
140 HCLGE_TQP_INTR_GL0_REG,
141 HCLGE_TQP_INTR_GL1_REG,
142 HCLGE_TQP_INTR_GL2_REG,
143 HCLGE_TQP_INTR_RL_REG};
145 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
147 "Serdes serial Loopback test",
148 "Serdes parallel Loopback test",
152 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
153 {"mac_tx_mac_pause_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
155 {"mac_rx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
157 {"mac_tx_control_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
159 {"mac_rx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
161 {"mac_tx_pfc_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
163 {"mac_tx_pfc_pri0_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
165 {"mac_tx_pfc_pri1_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
167 {"mac_tx_pfc_pri2_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
169 {"mac_tx_pfc_pri3_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
171 {"mac_tx_pfc_pri4_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
173 {"mac_tx_pfc_pri5_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
175 {"mac_tx_pfc_pri6_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
177 {"mac_tx_pfc_pri7_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
179 {"mac_rx_pfc_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
181 {"mac_rx_pfc_pri0_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
183 {"mac_rx_pfc_pri1_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
185 {"mac_rx_pfc_pri2_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
187 {"mac_rx_pfc_pri3_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
189 {"mac_rx_pfc_pri4_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
191 {"mac_rx_pfc_pri5_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
193 {"mac_rx_pfc_pri6_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
195 {"mac_rx_pfc_pri7_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
197 {"mac_tx_total_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
199 {"mac_tx_total_oct_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
201 {"mac_tx_good_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
203 {"mac_tx_bad_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
205 {"mac_tx_good_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
207 {"mac_tx_bad_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
209 {"mac_tx_uni_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
211 {"mac_tx_multi_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
213 {"mac_tx_broad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
215 {"mac_tx_undersize_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
217 {"mac_tx_oversize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
219 {"mac_tx_64_oct_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
221 {"mac_tx_65_127_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
223 {"mac_tx_128_255_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
225 {"mac_tx_256_511_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
227 {"mac_tx_512_1023_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
229 {"mac_tx_1024_1518_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
231 {"mac_tx_1519_2047_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
233 {"mac_tx_2048_4095_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
235 {"mac_tx_4096_8191_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
237 {"mac_tx_8192_9216_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
239 {"mac_tx_9217_12287_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
241 {"mac_tx_12288_16383_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
243 {"mac_tx_1519_max_good_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
245 {"mac_tx_1519_max_bad_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
247 {"mac_rx_total_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
249 {"mac_rx_total_oct_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
251 {"mac_rx_good_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
253 {"mac_rx_bad_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
255 {"mac_rx_good_oct_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
257 {"mac_rx_bad_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
259 {"mac_rx_uni_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
261 {"mac_rx_multi_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
263 {"mac_rx_broad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
265 {"mac_rx_undersize_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
267 {"mac_rx_oversize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
269 {"mac_rx_64_oct_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
271 {"mac_rx_65_127_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
273 {"mac_rx_128_255_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
275 {"mac_rx_256_511_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
277 {"mac_rx_512_1023_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
279 {"mac_rx_1024_1518_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
281 {"mac_rx_1519_2047_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
283 {"mac_rx_2048_4095_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
285 {"mac_rx_4096_8191_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
287 {"mac_rx_8192_9216_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
289 {"mac_rx_9217_12287_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
291 {"mac_rx_12288_16383_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
293 {"mac_rx_1519_max_good_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
295 {"mac_rx_1519_max_bad_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
298 {"mac_tx_fragment_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
300 {"mac_tx_undermin_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
302 {"mac_tx_jabber_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
304 {"mac_tx_err_all_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
306 {"mac_tx_from_app_good_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
308 {"mac_tx_from_app_bad_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
310 {"mac_rx_fragment_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
312 {"mac_rx_undermin_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
314 {"mac_rx_jabber_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
316 {"mac_rx_fcs_err_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
318 {"mac_rx_send_app_good_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
320 {"mac_rx_send_app_bad_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
326 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
327 .ethter_type = cpu_to_le16(ETH_P_LLDP),
328 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
329 .i_port_bitmap = 0x1,
333 static const u8 hclge_hash_key[] = {
334 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
335 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
336 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
337 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
338 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 static const u32 hclge_dfx_bd_offset_list[] = {
342 HCLGE_DFX_BIOS_BD_OFFSET,
343 HCLGE_DFX_SSU_0_BD_OFFSET,
344 HCLGE_DFX_SSU_1_BD_OFFSET,
345 HCLGE_DFX_IGU_BD_OFFSET,
346 HCLGE_DFX_RPU_0_BD_OFFSET,
347 HCLGE_DFX_RPU_1_BD_OFFSET,
348 HCLGE_DFX_NCSI_BD_OFFSET,
349 HCLGE_DFX_RTC_BD_OFFSET,
350 HCLGE_DFX_PPP_BD_OFFSET,
351 HCLGE_DFX_RCB_BD_OFFSET,
352 HCLGE_DFX_TQP_BD_OFFSET,
353 HCLGE_DFX_SSU_2_BD_OFFSET
356 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
357 HCLGE_OPC_DFX_BIOS_COMMON_REG,
358 HCLGE_OPC_DFX_SSU_REG_0,
359 HCLGE_OPC_DFX_SSU_REG_1,
360 HCLGE_OPC_DFX_IGU_EGU_REG,
361 HCLGE_OPC_DFX_RPU_REG_0,
362 HCLGE_OPC_DFX_RPU_REG_1,
363 HCLGE_OPC_DFX_NCSI_REG,
364 HCLGE_OPC_DFX_RTC_REG,
365 HCLGE_OPC_DFX_PPP_REG,
366 HCLGE_OPC_DFX_RCB_REG,
367 HCLGE_OPC_DFX_TQP_REG,
368 HCLGE_OPC_DFX_SSU_REG_2
371 static const struct key_info meta_data_key_info[] = {
372 { PACKET_TYPE_ID, 6},
382 static const struct key_info tuple_key_info[] = {
383 { OUTER_DST_MAC, 48},
384 { OUTER_SRC_MAC, 48},
385 { OUTER_VLAN_TAG_FST, 16},
386 { OUTER_VLAN_TAG_SEC, 16},
387 { OUTER_ETH_TYPE, 16},
390 { OUTER_IP_PROTO, 8},
394 { OUTER_SRC_PORT, 16},
395 { OUTER_DST_PORT, 16},
397 { OUTER_TUN_VNI, 24},
398 { OUTER_TUN_FLOW_ID, 8},
399 { INNER_DST_MAC, 48},
400 { INNER_SRC_MAC, 48},
401 { INNER_VLAN_TAG_FST, 16},
402 { INNER_VLAN_TAG_SEC, 16},
403 { INNER_ETH_TYPE, 16},
406 { INNER_IP_PROTO, 8},
410 { INNER_SRC_PORT, 16},
411 { INNER_DST_PORT, 16},
415 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
417 #define HCLGE_MAC_CMD_NUM 21
419 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
420 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
426 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
428 dev_err(&hdev->pdev->dev,
429 "Get MAC pkt stats fail, status = %d.\n", ret);
434 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
435 /* for special opcode 0032, only the first desc has the head */
436 if (unlikely(i == 0)) {
437 desc_data = (__le64 *)(&desc[i].data[0]);
438 n = HCLGE_RD_FIRST_STATS_NUM;
440 desc_data = (__le64 *)(&desc[i]);
441 n = HCLGE_RD_OTHER_STATS_NUM;
444 for (k = 0; k < n; k++) {
445 *data += le64_to_cpu(*desc_data);
454 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
456 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
457 struct hclge_desc *desc;
462 /* This may be called inside atomic sections,
463 * so GFP_ATOMIC is more suitalbe here
465 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
469 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
470 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476 for (i = 0; i < desc_num; i++) {
477 /* for special opcode 0034, only the first desc has the head */
479 desc_data = (__le64 *)(&desc[i].data[0]);
480 n = HCLGE_RD_FIRST_STATS_NUM;
482 desc_data = (__le64 *)(&desc[i]);
483 n = HCLGE_RD_OTHER_STATS_NUM;
486 for (k = 0; k < n; k++) {
487 *data += le64_to_cpu(*desc_data);
498 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
500 struct hclge_desc desc;
505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
506 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
510 desc_data = (__le32 *)(&desc.data[0]);
511 reg_num = le32_to_cpu(*desc_data);
513 *desc_num = 1 + ((reg_num - 3) >> 2) +
514 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 ret = hclge_mac_query_reg_num(hdev, &desc_num);
526 /* The firmware supports the new statistics acquisition method */
528 ret = hclge_mac_update_stats_complete(hdev, desc_num);
529 else if (ret == -EOPNOTSUPP)
530 ret = hclge_mac_update_stats_defective(hdev);
532 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
539 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
540 struct hclge_vport *vport = hclge_get_vport(handle);
541 struct hclge_dev *hdev = vport->back;
542 struct hnae3_queue *queue;
543 struct hclge_desc desc[1];
544 struct hclge_tqp *tqp;
547 for (i = 0; i < kinfo->num_tqps; i++) {
548 queue = handle->kinfo.tqp[i];
549 tqp = container_of(queue, struct hclge_tqp, q);
550 /* command : HCLGE_OPC_QUERY_IGU_STAT */
551 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
555 ret = hclge_cmd_send(&hdev->hw, desc, 1);
557 dev_err(&hdev->pdev->dev,
558 "Query tqp stat fail, status = %d,queue = %d\n",
562 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
563 le32_to_cpu(desc[0].data[1]);
566 for (i = 0; i < kinfo->num_tqps; i++) {
567 queue = handle->kinfo.tqp[i];
568 tqp = container_of(queue, struct hclge_tqp, q);
569 /* command : HCLGE_OPC_QUERY_IGU_STAT */
570 hclge_cmd_setup_basic_desc(&desc[0],
571 HCLGE_OPC_QUERY_TX_STATUS,
574 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
575 ret = hclge_cmd_send(&hdev->hw, desc, 1);
577 dev_err(&hdev->pdev->dev,
578 "Query tqp stat fail, status = %d,queue = %d\n",
582 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
583 le32_to_cpu(desc[0].data[1]);
589 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
591 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
592 struct hclge_tqp *tqp;
596 for (i = 0; i < kinfo->num_tqps; i++) {
597 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
598 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601 for (i = 0; i < kinfo->num_tqps; i++) {
602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
611 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
613 /* each tqp has TX & RX two queues */
614 return kinfo->num_tqps * (2);
617 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
619 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 for (i = 0; i < kinfo->num_tqps; i++) {
624 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
625 struct hclge_tqp, q);
626 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
628 buff = buff + ETH_GSTRING_LEN;
631 for (i = 0; i < kinfo->num_tqps; i++) {
632 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
633 struct hclge_tqp, q);
634 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
636 buff = buff + ETH_GSTRING_LEN;
642 static u64 *hclge_comm_get_stats(const void *comm_stats,
643 const struct hclge_comm_stats_str strs[],
649 for (i = 0; i < size; i++)
650 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655 static u8 *hclge_comm_get_strings(u32 stringset,
656 const struct hclge_comm_stats_str strs[],
659 char *buff = (char *)data;
662 if (stringset != ETH_SS_STATS)
665 for (i = 0; i < size; i++) {
666 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
667 buff = buff + ETH_GSTRING_LEN;
673 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
675 struct hnae3_handle *handle;
678 handle = &hdev->vport[0].nic;
679 if (handle->client) {
680 status = hclge_tqps_update_stats(handle);
682 dev_err(&hdev->pdev->dev,
683 "Update TQPS stats fail, status = %d.\n",
688 status = hclge_mac_update_stats(hdev);
690 dev_err(&hdev->pdev->dev,
691 "Update MAC stats fail, status = %d.\n", status);
694 static void hclge_update_stats(struct hnae3_handle *handle,
695 struct net_device_stats *net_stats)
697 struct hclge_vport *vport = hclge_get_vport(handle);
698 struct hclge_dev *hdev = vport->back;
701 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704 status = hclge_mac_update_stats(hdev);
706 dev_err(&hdev->pdev->dev,
707 "Update MAC stats fail, status = %d.\n",
710 status = hclge_tqps_update_stats(handle);
712 dev_err(&hdev->pdev->dev,
713 "Update TQPS stats fail, status = %d.\n",
716 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
721 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
722 HNAE3_SUPPORT_PHY_LOOPBACK |\
723 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
724 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
726 struct hclge_vport *vport = hclge_get_vport(handle);
727 struct hclge_dev *hdev = vport->back;
730 /* Loopback test support rules:
731 * mac: only GE mode support
732 * serdes: all mac mode will support include GE/XGE/LGE/CGE
733 * phy: only support when phy device exist on board
735 if (stringset == ETH_SS_TEST) {
736 /* clear loopback bit flags at first */
737 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
738 if (hdev->pdev->revision >= 0x21 ||
739 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
740 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
743 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
747 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
748 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
750 if (hdev->hw.mac.phydev) {
752 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755 } else if (stringset == ETH_SS_STATS) {
756 count = ARRAY_SIZE(g_mac_stats_string) +
757 hclge_tqps_get_sset_count(handle, stringset);
763 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766 u8 *p = (char *)data;
769 if (stringset == ETH_SS_STATS) {
770 size = ARRAY_SIZE(g_mac_stats_string);
771 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
773 p = hclge_tqps_get_strings(handle, p);
774 } else if (stringset == ETH_SS_TEST) {
775 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
776 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
778 p += ETH_GSTRING_LEN;
780 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
783 p += ETH_GSTRING_LEN;
785 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
787 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
789 p += ETH_GSTRING_LEN;
791 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
792 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
794 p += ETH_GSTRING_LEN;
799 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
801 struct hclge_vport *vport = hclge_get_vport(handle);
802 struct hclge_dev *hdev = vport->back;
805 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
806 ARRAY_SIZE(g_mac_stats_string), data);
807 p = hclge_tqps_get_stats(handle, p);
810 static void hclge_get_mac_stat(struct hnae3_handle *handle,
811 struct hns3_mac_stats *mac_stats)
813 struct hclge_vport *vport = hclge_get_vport(handle);
814 struct hclge_dev *hdev = vport->back;
816 hclge_update_stats(handle, NULL);
818 mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
819 mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
822 static int hclge_parse_func_status(struct hclge_dev *hdev,
823 struct hclge_func_status_cmd *status)
825 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828 /* Set the pf to main pf */
829 if (status->pf_state & HCLGE_PF_STATE_MAIN)
830 hdev->flag |= HCLGE_FLAG_MAIN;
832 hdev->flag &= ~HCLGE_FLAG_MAIN;
837 static int hclge_query_function_status(struct hclge_dev *hdev)
839 #define HCLGE_QUERY_MAX_CNT 5
841 struct hclge_func_status_cmd *req;
842 struct hclge_desc desc;
846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
847 req = (struct hclge_func_status_cmd *)desc.data;
850 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
852 dev_err(&hdev->pdev->dev,
853 "query function status failed %d.\n", ret);
857 /* Check pf reset is done */
860 usleep_range(1000, 2000);
861 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
863 ret = hclge_parse_func_status(hdev, req);
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
870 struct hclge_pf_res_cmd *req;
871 struct hclge_desc desc;
874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
877 dev_err(&hdev->pdev->dev,
878 "query pf resource failed %d.\n", ret);
882 req = (struct hclge_pf_res_cmd *)desc.data;
883 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
884 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
886 if (req->tx_buf_size)
888 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
890 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
892 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
894 if (req->dv_buf_size)
896 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
898 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
900 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
902 if (hnae3_dev_roce_supported(hdev)) {
903 hdev->roce_base_msix_offset =
904 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
905 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
907 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
908 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
910 /* nic's msix numbers is always equals to the roce's. */
911 hdev->num_nic_msi = hdev->num_roce_msi;
913 /* PF should have NIC vectors and Roce vectors,
914 * NIC vectors are queued before Roce vectors.
916 hdev->num_msi = hdev->num_roce_msi +
917 hdev->roce_base_msix_offset;
920 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
921 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
923 hdev->num_nic_msi = hdev->num_msi;
926 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927 dev_err(&hdev->pdev->dev,
928 "Just %u msi resources, not enough for pf(min:2).\n",
936 static int hclge_parse_speed(int speed_cmd, int *speed)
940 *speed = HCLGE_MAC_SPEED_10M;
943 *speed = HCLGE_MAC_SPEED_100M;
946 *speed = HCLGE_MAC_SPEED_1G;
949 *speed = HCLGE_MAC_SPEED_10G;
952 *speed = HCLGE_MAC_SPEED_25G;
955 *speed = HCLGE_MAC_SPEED_40G;
958 *speed = HCLGE_MAC_SPEED_50G;
961 *speed = HCLGE_MAC_SPEED_100G;
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
972 struct hclge_vport *vport = hclge_get_vport(handle);
973 struct hclge_dev *hdev = vport->back;
974 u32 speed_ability = hdev->hw.mac.speed_ability;
978 case HCLGE_MAC_SPEED_10M:
979 speed_bit = HCLGE_SUPPORT_10M_BIT;
981 case HCLGE_MAC_SPEED_100M:
982 speed_bit = HCLGE_SUPPORT_100M_BIT;
984 case HCLGE_MAC_SPEED_1G:
985 speed_bit = HCLGE_SUPPORT_1G_BIT;
987 case HCLGE_MAC_SPEED_10G:
988 speed_bit = HCLGE_SUPPORT_10G_BIT;
990 case HCLGE_MAC_SPEED_25G:
991 speed_bit = HCLGE_SUPPORT_25G_BIT;
993 case HCLGE_MAC_SPEED_40G:
994 speed_bit = HCLGE_SUPPORT_40G_BIT;
996 case HCLGE_MAC_SPEED_50G:
997 speed_bit = HCLGE_SUPPORT_50G_BIT;
999 case HCLGE_MAC_SPEED_100G:
1000 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 if (speed_bit & speed_ability)
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1014 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1017 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1020 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1023 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1026 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1033 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1036 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1039 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1042 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1052 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1061 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1064 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1071 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1080 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1083 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1086 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1093 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1096 switch (mac->speed) {
1097 case HCLGE_MAC_SPEED_10G:
1098 case HCLGE_MAC_SPEED_40G:
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1102 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1104 case HCLGE_MAC_SPEED_25G:
1105 case HCLGE_MAC_SPEED_50G:
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1109 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110 BIT(HNAE3_FEC_AUTO);
1112 case HCLGE_MAC_SPEED_100G:
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1117 mac->fec_ability = 0;
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1125 struct hclge_mac *mac = &hdev->hw.mac;
1127 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1131 hclge_convert_setting_sr(mac, speed_ability);
1132 hclge_convert_setting_lr(mac, speed_ability);
1133 hclge_convert_setting_cr(mac, speed_ability);
1134 if (hdev->pdev->revision >= 0x21)
1135 hclge_convert_setting_fec(mac);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1145 struct hclge_mac *mac = &hdev->hw.mac;
1147 hclge_convert_setting_kr(mac, speed_ability);
1148 if (hdev->pdev->revision >= 0x21)
1149 hclge_convert_setting_fec(mac);
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1158 unsigned long *supported = hdev->hw.mac.supported;
1160 /* default to support all speed for GE port */
1162 speed_ability = HCLGE_SUPPORT_GE;
1164 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1168 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1175 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1188 u8 media_type = hdev->hw.mac.media_type;
1190 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193 hclge_parse_copper_link_mode(hdev, speed_ability);
1194 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195 hclge_parse_backplane_link_mode(hdev, speed_ability);
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1200 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201 return HCLGE_MAC_SPEED_100G;
1203 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204 return HCLGE_MAC_SPEED_50G;
1206 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207 return HCLGE_MAC_SPEED_40G;
1209 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210 return HCLGE_MAC_SPEED_25G;
1212 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213 return HCLGE_MAC_SPEED_10G;
1215 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216 return HCLGE_MAC_SPEED_1G;
1218 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219 return HCLGE_MAC_SPEED_100M;
1221 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222 return HCLGE_MAC_SPEED_10M;
1224 return HCLGE_MAC_SPEED_1G;
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1229 struct hclge_cfg_param_cmd *req;
1230 u64 mac_addr_tmp_high;
1234 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1236 /* get the configuration */
1237 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1240 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TQP_DESC_N_M,
1244 HCLGE_CFG_TQP_DESC_N_S);
1246 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 HCLGE_CFG_PHY_ADDR_M,
1248 HCLGE_CFG_PHY_ADDR_S);
1249 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 HCLGE_CFG_MEDIA_TP_M,
1251 HCLGE_CFG_MEDIA_TP_S);
1252 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 HCLGE_CFG_RX_BUF_LEN_M,
1254 HCLGE_CFG_RX_BUF_LEN_S);
1255 /* get mac_address */
1256 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258 HCLGE_CFG_MAC_ADDR_H_M,
1259 HCLGE_CFG_MAC_ADDR_H_S);
1261 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1263 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264 HCLGE_CFG_DEFAULT_SPEED_M,
1265 HCLGE_CFG_DEFAULT_SPEED_S);
1266 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 HCLGE_CFG_RSS_SIZE_M,
1268 HCLGE_CFG_RSS_SIZE_S);
1270 for (i = 0; i < ETH_ALEN; i++)
1271 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1273 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1276 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277 HCLGE_CFG_SPEED_ABILITY_M,
1278 HCLGE_CFG_SPEED_ABILITY_S);
1279 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_UMV_TBL_SPACE_M,
1281 HCLGE_CFG_UMV_TBL_SPACE_S);
1282 if (!cfg->umv_space)
1283 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1286 /* hclge_get_cfg: query the static parameter from flash
1287 * @hdev: pointer to struct hclge_dev
1288 * @hcfg: the config structure to be getted
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1292 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293 struct hclge_cfg_param_cmd *req;
1297 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1300 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1303 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305 /* Len should be united by 4 bytes when send to hardware */
1306 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308 req->offset = cpu_to_le32(offset);
1311 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1313 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1317 hclge_parse_cfg(hcfg, desc);
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1326 ret = hclge_query_function_status(hdev);
1328 dev_err(&hdev->pdev->dev,
1329 "query function status error %d.\n", ret);
1333 /* get pf resource */
1334 ret = hclge_query_pf_resource(hdev);
1336 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1341 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343 #define HCLGE_MIN_TX_DESC 64
1344 #define HCLGE_MIN_RX_DESC 64
1346 if (!is_kdump_kernel())
1349 dev_info(&hdev->pdev->dev,
1350 "Running kdump kernel. Using minimal resources\n");
1352 /* minimal queue pairs equals to the number of vports */
1353 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1354 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1355 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1358 static int hclge_configure(struct hclge_dev *hdev)
1360 struct hclge_cfg cfg;
1364 ret = hclge_get_cfg(hdev, &cfg);
1366 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1370 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1371 hdev->base_tqp_pid = 0;
1372 hdev->rss_size_max = cfg.rss_size_max;
1373 hdev->rx_buf_len = cfg.rx_buf_len;
1374 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1375 hdev->hw.mac.media_type = cfg.media_type;
1376 hdev->hw.mac.phy_addr = cfg.phy_addr;
1377 hdev->num_tx_desc = cfg.tqp_desc_num;
1378 hdev->num_rx_desc = cfg.tqp_desc_num;
1379 hdev->tm_info.num_pg = 1;
1380 hdev->tc_max = cfg.tc_num;
1381 hdev->tm_info.hw_pfc_map = 0;
1382 hdev->wanted_umv_size = cfg.umv_space;
1384 if (hnae3_dev_fd_supported(hdev)) {
1386 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1389 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1391 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1395 hclge_parse_link_mode(hdev, cfg.speed_ability);
1397 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1399 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1400 (hdev->tc_max < 1)) {
1401 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1406 /* Dev does not support DCB */
1407 if (!hnae3_dev_dcb_supported(hdev)) {
1411 hdev->pfc_max = hdev->tc_max;
1414 hdev->tm_info.num_tc = 1;
1416 /* Currently not support uncontiuous tc */
1417 for (i = 0; i < hdev->tm_info.num_tc; i++)
1418 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1420 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1422 hclge_init_kdump_kernel_config(hdev);
1424 /* Set the init affinity based on pci func number */
1425 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1426 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1427 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1428 &hdev->affinity_mask);
1433 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1434 unsigned int tso_mss_max)
1436 struct hclge_cfg_tso_status_cmd *req;
1437 struct hclge_desc desc;
1440 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1442 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1445 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1446 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1447 req->tso_mss_min = cpu_to_le16(tso_mss);
1450 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1451 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1452 req->tso_mss_max = cpu_to_le16(tso_mss);
1454 return hclge_cmd_send(&hdev->hw, &desc, 1);
1457 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1459 struct hclge_cfg_gro_status_cmd *req;
1460 struct hclge_desc desc;
1463 if (!hnae3_dev_gro_supported(hdev))
1466 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1467 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1469 req->gro_en = cpu_to_le16(en ? 1 : 0);
1471 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1473 dev_err(&hdev->pdev->dev,
1474 "GRO hardware config cmd failed, ret = %d\n", ret);
1479 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1481 struct hclge_tqp *tqp;
1484 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1485 sizeof(struct hclge_tqp), GFP_KERNEL);
1491 for (i = 0; i < hdev->num_tqps; i++) {
1492 tqp->dev = &hdev->pdev->dev;
1495 tqp->q.ae_algo = &ae_algo;
1496 tqp->q.buf_size = hdev->rx_buf_len;
1497 tqp->q.tx_desc_num = hdev->num_tx_desc;
1498 tqp->q.rx_desc_num = hdev->num_rx_desc;
1499 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1500 i * HCLGE_TQP_REG_SIZE;
1508 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1509 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1511 struct hclge_tqp_map_cmd *req;
1512 struct hclge_desc desc;
1515 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1517 req = (struct hclge_tqp_map_cmd *)desc.data;
1518 req->tqp_id = cpu_to_le16(tqp_pid);
1519 req->tqp_vf = func_id;
1520 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1522 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1523 req->tqp_vid = cpu_to_le16(tqp_vid);
1525 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1527 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1532 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1534 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1535 struct hclge_dev *hdev = vport->back;
1538 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1539 alloced < num_tqps; i++) {
1540 if (!hdev->htqp[i].alloced) {
1541 hdev->htqp[i].q.handle = &vport->nic;
1542 hdev->htqp[i].q.tqp_index = alloced;
1543 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1544 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1545 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1546 hdev->htqp[i].alloced = true;
1550 vport->alloc_tqps = alloced;
1551 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1552 vport->alloc_tqps / hdev->tm_info.num_tc);
1554 /* ensure one to one mapping between irq and queue at default */
1555 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1556 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1561 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1562 u16 num_tx_desc, u16 num_rx_desc)
1565 struct hnae3_handle *nic = &vport->nic;
1566 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1567 struct hclge_dev *hdev = vport->back;
1570 kinfo->num_tx_desc = num_tx_desc;
1571 kinfo->num_rx_desc = num_rx_desc;
1573 kinfo->rx_buf_len = hdev->rx_buf_len;
1575 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1576 sizeof(struct hnae3_queue *), GFP_KERNEL);
1580 ret = hclge_assign_tqp(vport, num_tqps);
1582 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1587 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1588 struct hclge_vport *vport)
1590 struct hnae3_handle *nic = &vport->nic;
1591 struct hnae3_knic_private_info *kinfo;
1594 kinfo = &nic->kinfo;
1595 for (i = 0; i < vport->alloc_tqps; i++) {
1596 struct hclge_tqp *q =
1597 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1601 is_pf = !(vport->vport_id);
1602 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1611 static int hclge_map_tqp(struct hclge_dev *hdev)
1613 struct hclge_vport *vport = hdev->vport;
1616 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1617 for (i = 0; i < num_vport; i++) {
1620 ret = hclge_map_tqp_to_vport(hdev, vport);
1630 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1632 struct hnae3_handle *nic = &vport->nic;
1633 struct hclge_dev *hdev = vport->back;
1636 nic->pdev = hdev->pdev;
1637 nic->ae_algo = &ae_algo;
1638 nic->numa_node_mask = hdev->numa_node_mask;
1640 ret = hclge_knic_setup(vport, num_tqps,
1641 hdev->num_tx_desc, hdev->num_rx_desc);
1643 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1648 static int hclge_alloc_vport(struct hclge_dev *hdev)
1650 struct pci_dev *pdev = hdev->pdev;
1651 struct hclge_vport *vport;
1657 /* We need to alloc a vport for main NIC of PF */
1658 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1660 if (hdev->num_tqps < num_vport) {
1661 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1662 hdev->num_tqps, num_vport);
1666 /* Alloc the same number of TQPs for every vport */
1667 tqp_per_vport = hdev->num_tqps / num_vport;
1668 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1670 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1675 hdev->vport = vport;
1676 hdev->num_alloc_vport = num_vport;
1678 if (IS_ENABLED(CONFIG_PCI_IOV))
1679 hdev->num_alloc_vfs = hdev->num_req_vfs;
1681 for (i = 0; i < num_vport; i++) {
1683 vport->vport_id = i;
1684 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1685 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1686 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1687 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1688 INIT_LIST_HEAD(&vport->vlan_list);
1689 INIT_LIST_HEAD(&vport->uc_mac_list);
1690 INIT_LIST_HEAD(&vport->mc_mac_list);
1693 ret = hclge_vport_setup(vport, tqp_main_vport);
1695 ret = hclge_vport_setup(vport, tqp_per_vport);
1698 "vport setup failed for vport %d, %d\n",
1709 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1710 struct hclge_pkt_buf_alloc *buf_alloc)
1712 /* TX buffer size is unit by 128 byte */
1713 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1714 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1715 struct hclge_tx_buff_alloc_cmd *req;
1716 struct hclge_desc desc;
1720 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1722 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1723 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1724 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1726 req->tx_pkt_buff[i] =
1727 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1728 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1731 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1733 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1739 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1740 struct hclge_pkt_buf_alloc *buf_alloc)
1742 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1745 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1750 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1755 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1756 if (hdev->hw_tc_map & BIT(i))
1761 /* Get the number of pfc enabled TCs, which have private buffer */
1762 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1763 struct hclge_pkt_buf_alloc *buf_alloc)
1765 struct hclge_priv_buf *priv;
1769 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1770 priv = &buf_alloc->priv_buf[i];
1771 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1779 /* Get the number of pfc disabled TCs, which have private buffer */
1780 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1781 struct hclge_pkt_buf_alloc *buf_alloc)
1783 struct hclge_priv_buf *priv;
1787 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1788 priv = &buf_alloc->priv_buf[i];
1789 if (hdev->hw_tc_map & BIT(i) &&
1790 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1798 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1800 struct hclge_priv_buf *priv;
1804 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1805 priv = &buf_alloc->priv_buf[i];
1807 rx_priv += priv->buf_size;
1812 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1814 u32 i, total_tx_size = 0;
1816 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1817 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1819 return total_tx_size;
1822 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1823 struct hclge_pkt_buf_alloc *buf_alloc,
1826 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1827 u32 tc_num = hclge_get_tc_num(hdev);
1828 u32 shared_buf, aligned_mps;
1832 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1834 if (hnae3_dev_dcb_supported(hdev))
1835 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1838 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1839 + hdev->dv_buf_size;
1841 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1842 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1843 HCLGE_BUF_SIZE_UNIT);
1845 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1846 if (rx_all < rx_priv + shared_std)
1849 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1850 buf_alloc->s_buf.buf_size = shared_buf;
1851 if (hnae3_dev_dcb_supported(hdev)) {
1852 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1853 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1854 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1855 HCLGE_BUF_SIZE_UNIT);
1857 buf_alloc->s_buf.self.high = aligned_mps +
1858 HCLGE_NON_DCB_ADDITIONAL_BUF;
1859 buf_alloc->s_buf.self.low = aligned_mps;
1862 if (hnae3_dev_dcb_supported(hdev)) {
1863 hi_thrd = shared_buf - hdev->dv_buf_size;
1865 if (tc_num <= NEED_RESERVE_TC_NUM)
1866 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1870 hi_thrd = hi_thrd / tc_num;
1872 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1873 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1874 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1876 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1877 lo_thrd = aligned_mps;
1880 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1881 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1882 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1888 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1889 struct hclge_pkt_buf_alloc *buf_alloc)
1893 total_size = hdev->pkt_buf_size;
1895 /* alloc tx buffer for all enabled tc */
1896 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1897 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1899 if (hdev->hw_tc_map & BIT(i)) {
1900 if (total_size < hdev->tx_buf_size)
1903 priv->tx_buf_size = hdev->tx_buf_size;
1905 priv->tx_buf_size = 0;
1908 total_size -= priv->tx_buf_size;
1914 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1915 struct hclge_pkt_buf_alloc *buf_alloc)
1917 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1918 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1921 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1922 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1929 if (!(hdev->hw_tc_map & BIT(i)))
1934 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1935 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1936 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1937 HCLGE_BUF_SIZE_UNIT);
1940 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1944 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1947 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1950 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1951 struct hclge_pkt_buf_alloc *buf_alloc)
1953 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1954 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1957 /* let the last to be cleared first */
1958 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1959 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1960 unsigned int mask = BIT((unsigned int)i);
1962 if (hdev->hw_tc_map & mask &&
1963 !(hdev->tm_info.hw_pfc_map & mask)) {
1964 /* Clear the no pfc TC private buffer */
1972 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1973 no_pfc_priv_num == 0)
1977 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1980 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1981 struct hclge_pkt_buf_alloc *buf_alloc)
1983 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1984 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1987 /* let the last to be cleared first */
1988 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1989 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1990 unsigned int mask = BIT((unsigned int)i);
1992 if (hdev->hw_tc_map & mask &&
1993 hdev->tm_info.hw_pfc_map & mask) {
1994 /* Reduce the number of pfc TC with private buffer */
2002 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2007 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2010 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2011 struct hclge_pkt_buf_alloc *buf_alloc)
2013 #define COMPENSATE_BUFFER 0x3C00
2014 #define COMPENSATE_HALF_MPS_NUM 5
2015 #define PRIV_WL_GAP 0x1800
2017 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2018 u32 tc_num = hclge_get_tc_num(hdev);
2019 u32 half_mps = hdev->mps >> 1;
2024 rx_priv = rx_priv / tc_num;
2026 if (tc_num <= NEED_RESERVE_TC_NUM)
2027 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2029 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2030 COMPENSATE_HALF_MPS_NUM * half_mps;
2031 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2032 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2034 if (rx_priv < min_rx_priv)
2037 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2038 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2045 if (!(hdev->hw_tc_map & BIT(i)))
2049 priv->buf_size = rx_priv;
2050 priv->wl.high = rx_priv - hdev->dv_buf_size;
2051 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2054 buf_alloc->s_buf.buf_size = 0;
2059 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2060 * @hdev: pointer to struct hclge_dev
2061 * @buf_alloc: pointer to buffer calculation data
2062 * @return: 0: calculate sucessful, negative: fail
2064 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2065 struct hclge_pkt_buf_alloc *buf_alloc)
2067 /* When DCB is not supported, rx private buffer is not allocated. */
2068 if (!hnae3_dev_dcb_supported(hdev)) {
2069 u32 rx_all = hdev->pkt_buf_size;
2071 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2072 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2078 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2081 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2084 /* try to decrease the buffer size */
2085 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2088 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2091 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2097 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2098 struct hclge_pkt_buf_alloc *buf_alloc)
2100 struct hclge_rx_priv_buff_cmd *req;
2101 struct hclge_desc desc;
2105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2106 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2108 /* Alloc private buffer TCs */
2109 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2110 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2113 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2115 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2119 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2120 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2122 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2124 dev_err(&hdev->pdev->dev,
2125 "rx private buffer alloc cmd failed %d\n", ret);
2130 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2131 struct hclge_pkt_buf_alloc *buf_alloc)
2133 struct hclge_rx_priv_wl_buf *req;
2134 struct hclge_priv_buf *priv;
2135 struct hclge_desc desc[2];
2139 for (i = 0; i < 2; i++) {
2140 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2142 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2144 /* The first descriptor set the NEXT bit to 1 */
2146 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2148 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2150 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2151 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2153 priv = &buf_alloc->priv_buf[idx];
2154 req->tc_wl[j].high =
2155 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2156 req->tc_wl[j].high |=
2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2159 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2160 req->tc_wl[j].low |=
2161 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2165 /* Send 2 descriptor at one time */
2166 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2168 dev_err(&hdev->pdev->dev,
2169 "rx private waterline config cmd failed %d\n",
2174 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2175 struct hclge_pkt_buf_alloc *buf_alloc)
2177 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2178 struct hclge_rx_com_thrd *req;
2179 struct hclge_desc desc[2];
2180 struct hclge_tc_thrd *tc;
2184 for (i = 0; i < 2; i++) {
2185 hclge_cmd_setup_basic_desc(&desc[i],
2186 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2187 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2189 /* The first descriptor set the NEXT bit to 1 */
2191 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2193 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2195 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2196 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2198 req->com_thrd[j].high =
2199 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2200 req->com_thrd[j].high |=
2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2202 req->com_thrd[j].low =
2203 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2204 req->com_thrd[j].low |=
2205 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2209 /* Send 2 descriptors at one time */
2210 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2212 dev_err(&hdev->pdev->dev,
2213 "common threshold config cmd failed %d\n", ret);
2217 static int hclge_common_wl_config(struct hclge_dev *hdev,
2218 struct hclge_pkt_buf_alloc *buf_alloc)
2220 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2221 struct hclge_rx_com_wl *req;
2222 struct hclge_desc desc;
2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2227 req = (struct hclge_rx_com_wl *)desc.data;
2228 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2229 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2231 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2232 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2234 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2236 dev_err(&hdev->pdev->dev,
2237 "common waterline config cmd failed %d\n", ret);
2242 int hclge_buffer_alloc(struct hclge_dev *hdev)
2244 struct hclge_pkt_buf_alloc *pkt_buf;
2247 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2251 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2253 dev_err(&hdev->pdev->dev,
2254 "could not calc tx buffer size for all TCs %d\n", ret);
2258 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2260 dev_err(&hdev->pdev->dev,
2261 "could not alloc tx buffers %d\n", ret);
2265 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2267 dev_err(&hdev->pdev->dev,
2268 "could not calc rx priv buffer size for all TCs %d\n",
2273 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2275 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2280 if (hnae3_dev_dcb_supported(hdev)) {
2281 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2283 dev_err(&hdev->pdev->dev,
2284 "could not configure rx private waterline %d\n",
2289 ret = hclge_common_thrd_config(hdev, pkt_buf);
2291 dev_err(&hdev->pdev->dev,
2292 "could not configure common threshold %d\n",
2298 ret = hclge_common_wl_config(hdev, pkt_buf);
2300 dev_err(&hdev->pdev->dev,
2301 "could not configure common waterline %d\n", ret);
2308 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2310 struct hnae3_handle *roce = &vport->roce;
2311 struct hnae3_handle *nic = &vport->nic;
2313 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2315 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2316 vport->back->num_msi_left == 0)
2319 roce->rinfo.base_vector = vport->back->roce_base_vector;
2321 roce->rinfo.netdev = nic->kinfo.netdev;
2322 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2324 roce->pdev = nic->pdev;
2325 roce->ae_algo = nic->ae_algo;
2326 roce->numa_node_mask = nic->numa_node_mask;
2331 static int hclge_init_msi(struct hclge_dev *hdev)
2333 struct pci_dev *pdev = hdev->pdev;
2337 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2339 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2342 "failed(%d) to allocate MSI/MSI-X vectors\n",
2346 if (vectors < hdev->num_msi)
2347 dev_warn(&hdev->pdev->dev,
2348 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2349 hdev->num_msi, vectors);
2351 hdev->num_msi = vectors;
2352 hdev->num_msi_left = vectors;
2354 hdev->base_msi_vector = pdev->irq;
2355 hdev->roce_base_vector = hdev->base_msi_vector +
2356 hdev->roce_base_msix_offset;
2358 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2359 sizeof(u16), GFP_KERNEL);
2360 if (!hdev->vector_status) {
2361 pci_free_irq_vectors(pdev);
2365 for (i = 0; i < hdev->num_msi; i++)
2366 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2368 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2369 sizeof(int), GFP_KERNEL);
2370 if (!hdev->vector_irq) {
2371 pci_free_irq_vectors(pdev);
2378 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2380 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2381 duplex = HCLGE_MAC_FULL;
2386 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2389 struct hclge_config_mac_speed_dup_cmd *req;
2390 struct hclge_desc desc;
2393 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2398 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2401 case HCLGE_MAC_SPEED_10M:
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 6);
2405 case HCLGE_MAC_SPEED_100M:
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 7);
2409 case HCLGE_MAC_SPEED_1G:
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 0);
2413 case HCLGE_MAC_SPEED_10G:
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 1);
2417 case HCLGE_MAC_SPEED_25G:
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 2);
2421 case HCLGE_MAC_SPEED_40G:
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 3);
2425 case HCLGE_MAC_SPEED_50G:
2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 HCLGE_CFG_SPEED_S, 4);
2429 case HCLGE_MAC_SPEED_100G:
2430 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2431 HCLGE_CFG_SPEED_S, 5);
2434 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2438 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2441 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2443 dev_err(&hdev->pdev->dev,
2444 "mac speed/duplex config cmd failed %d.\n", ret);
2451 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2455 duplex = hclge_check_speed_dup(duplex, speed);
2456 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2459 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2463 hdev->hw.mac.speed = speed;
2464 hdev->hw.mac.duplex = duplex;
2469 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2472 struct hclge_vport *vport = hclge_get_vport(handle);
2473 struct hclge_dev *hdev = vport->back;
2475 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2478 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2480 struct hclge_config_auto_neg_cmd *req;
2481 struct hclge_desc desc;
2485 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2487 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2489 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2490 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2492 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2494 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2500 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2502 struct hclge_vport *vport = hclge_get_vport(handle);
2503 struct hclge_dev *hdev = vport->back;
2505 if (!hdev->hw.mac.support_autoneg) {
2507 dev_err(&hdev->pdev->dev,
2508 "autoneg is not supported by current port\n");
2515 return hclge_set_autoneg_en(hdev, enable);
2518 static int hclge_get_autoneg(struct hnae3_handle *handle)
2520 struct hclge_vport *vport = hclge_get_vport(handle);
2521 struct hclge_dev *hdev = vport->back;
2522 struct phy_device *phydev = hdev->hw.mac.phydev;
2525 return phydev->autoneg;
2527 return hdev->hw.mac.autoneg;
2530 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2532 struct hclge_vport *vport = hclge_get_vport(handle);
2533 struct hclge_dev *hdev = vport->back;
2536 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2538 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2541 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2544 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2546 struct hclge_vport *vport = hclge_get_vport(handle);
2547 struct hclge_dev *hdev = vport->back;
2549 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2550 return hclge_set_autoneg_en(hdev, !halt);
2555 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2557 struct hclge_config_fec_cmd *req;
2558 struct hclge_desc desc;
2561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2563 req = (struct hclge_config_fec_cmd *)desc.data;
2564 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2565 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2566 if (fec_mode & BIT(HNAE3_FEC_RS))
2567 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2568 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2569 if (fec_mode & BIT(HNAE3_FEC_BASER))
2570 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2571 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2573 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2575 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2580 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2582 struct hclge_vport *vport = hclge_get_vport(handle);
2583 struct hclge_dev *hdev = vport->back;
2584 struct hclge_mac *mac = &hdev->hw.mac;
2587 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2588 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2592 ret = hclge_set_fec_hw(hdev, fec_mode);
2596 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2600 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2603 struct hclge_vport *vport = hclge_get_vport(handle);
2604 struct hclge_dev *hdev = vport->back;
2605 struct hclge_mac *mac = &hdev->hw.mac;
2608 *fec_ability = mac->fec_ability;
2610 *fec_mode = mac->fec_mode;
2613 static int hclge_mac_init(struct hclge_dev *hdev)
2615 struct hclge_mac *mac = &hdev->hw.mac;
2618 hdev->support_sfp_query = true;
2619 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2620 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2621 hdev->hw.mac.duplex);
2623 dev_err(&hdev->pdev->dev,
2624 "Config mac speed dup fail ret=%d\n", ret);
2628 if (hdev->hw.mac.support_autoneg) {
2629 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2631 dev_err(&hdev->pdev->dev,
2632 "Config mac autoneg fail ret=%d\n", ret);
2639 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2640 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2642 dev_err(&hdev->pdev->dev,
2643 "Fec mode init fail, ret = %d\n", ret);
2648 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2650 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2654 ret = hclge_set_default_loopback(hdev);
2658 ret = hclge_buffer_alloc(hdev);
2660 dev_err(&hdev->pdev->dev,
2661 "allocate buffer fail, ret=%d\n", ret);
2666 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2668 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2669 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2670 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2671 &hdev->mbx_service_task);
2674 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2676 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2677 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2678 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2679 &hdev->rst_service_task);
2682 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2684 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2685 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2686 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2687 hdev->hw_stats.stats_timer++;
2688 hdev->fd_arfs_expire_timer++;
2689 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2690 system_wq, &hdev->service_task,
2695 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2697 struct hclge_link_status_cmd *req;
2698 struct hclge_desc desc;
2702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2703 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2705 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2710 req = (struct hclge_link_status_cmd *)desc.data;
2711 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2713 return !!link_status;
2716 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2718 unsigned int mac_state;
2721 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2724 mac_state = hclge_get_mac_link_status(hdev);
2726 if (hdev->hw.mac.phydev) {
2727 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2728 link_stat = mac_state &
2729 hdev->hw.mac.phydev->link;
2734 link_stat = mac_state;
2740 static void hclge_update_link_status(struct hclge_dev *hdev)
2742 struct hnae3_client *rclient = hdev->roce_client;
2743 struct hnae3_client *client = hdev->nic_client;
2744 struct hnae3_handle *rhandle;
2745 struct hnae3_handle *handle;
2751 state = hclge_get_mac_phy_link(hdev);
2752 if (state != hdev->hw.mac.link) {
2753 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2754 handle = &hdev->vport[i].nic;
2755 client->ops->link_status_change(handle, state);
2756 hclge_config_mac_tnl_int(hdev, state);
2757 rhandle = &hdev->vport[i].roce;
2758 if (rclient && rclient->ops->link_status_change)
2759 rclient->ops->link_status_change(rhandle,
2762 hdev->hw.mac.link = state;
2766 static void hclge_update_port_capability(struct hclge_mac *mac)
2768 /* update fec ability by speed */
2769 hclge_convert_setting_fec(mac);
2771 /* firmware can not identify back plane type, the media type
2772 * read from configuration can help deal it
2774 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2775 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2776 mac->module_type = HNAE3_MODULE_TYPE_KR;
2777 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2778 mac->module_type = HNAE3_MODULE_TYPE_TP;
2780 if (mac->support_autoneg) {
2781 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2782 linkmode_copy(mac->advertising, mac->supported);
2784 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2786 linkmode_zero(mac->advertising);
2790 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2792 struct hclge_sfp_info_cmd *resp;
2793 struct hclge_desc desc;
2796 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2797 resp = (struct hclge_sfp_info_cmd *)desc.data;
2798 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2799 if (ret == -EOPNOTSUPP) {
2800 dev_warn(&hdev->pdev->dev,
2801 "IMP do not support get SFP speed %d\n", ret);
2804 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2808 *speed = le32_to_cpu(resp->speed);
2813 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2815 struct hclge_sfp_info_cmd *resp;
2816 struct hclge_desc desc;
2819 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2820 resp = (struct hclge_sfp_info_cmd *)desc.data;
2822 resp->query_type = QUERY_ACTIVE_SPEED;
2824 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2825 if (ret == -EOPNOTSUPP) {
2826 dev_warn(&hdev->pdev->dev,
2827 "IMP does not support get SFP info %d\n", ret);
2830 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2834 mac->speed = le32_to_cpu(resp->speed);
2835 /* if resp->speed_ability is 0, it means it's an old version
2836 * firmware, do not update these params
2838 if (resp->speed_ability) {
2839 mac->module_type = le32_to_cpu(resp->module_type);
2840 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2841 mac->autoneg = resp->autoneg;
2842 mac->support_autoneg = resp->autoneg_ability;
2843 mac->speed_type = QUERY_ACTIVE_SPEED;
2844 if (!resp->active_fec)
2847 mac->fec_mode = BIT(resp->active_fec);
2849 mac->speed_type = QUERY_SFP_SPEED;
2855 static int hclge_update_port_info(struct hclge_dev *hdev)
2857 struct hclge_mac *mac = &hdev->hw.mac;
2858 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2861 /* get the port info from SFP cmd if not copper port */
2862 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2865 /* if IMP does not support get SFP/qSFP info, return directly */
2866 if (!hdev->support_sfp_query)
2869 if (hdev->pdev->revision >= 0x21)
2870 ret = hclge_get_sfp_info(hdev, mac);
2872 ret = hclge_get_sfp_speed(hdev, &speed);
2874 if (ret == -EOPNOTSUPP) {
2875 hdev->support_sfp_query = false;
2881 if (hdev->pdev->revision >= 0x21) {
2882 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2883 hclge_update_port_capability(mac);
2886 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2889 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2890 return 0; /* do nothing if no SFP */
2892 /* must config full duplex for SFP */
2893 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2897 static int hclge_get_status(struct hnae3_handle *handle)
2899 struct hclge_vport *vport = hclge_get_vport(handle);
2900 struct hclge_dev *hdev = vport->back;
2902 hclge_update_link_status(hdev);
2904 return hdev->hw.mac.link;
2907 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2909 if (pci_num_vf(hdev->pdev) == 0) {
2910 dev_err(&hdev->pdev->dev,
2911 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2915 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2916 dev_err(&hdev->pdev->dev,
2917 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2918 vf, pci_num_vf(hdev->pdev));
2922 /* VF start from 1 in vport */
2923 vf += HCLGE_VF_VPORT_START_NUM;
2924 return &hdev->vport[vf];
2927 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2928 struct ifla_vf_info *ivf)
2930 struct hclge_vport *vport = hclge_get_vport(handle);
2931 struct hclge_dev *hdev = vport->back;
2933 vport = hclge_get_vf_vport(hdev, vf);
2938 ivf->linkstate = vport->vf_info.link_state;
2939 ivf->spoofchk = vport->vf_info.spoofchk;
2940 ivf->trusted = vport->vf_info.trusted;
2941 ivf->min_tx_rate = 0;
2942 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2943 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2948 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2951 struct hclge_vport *vport = hclge_get_vport(handle);
2952 struct hclge_dev *hdev = vport->back;
2954 vport = hclge_get_vf_vport(hdev, vf);
2958 vport->vf_info.link_state = link_state;
2963 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2965 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2967 /* fetch the events from their corresponding regs */
2968 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2969 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2970 msix_src_reg = hclge_read_dev(&hdev->hw,
2971 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2973 /* Assumption: If by any chance reset and mailbox events are reported
2974 * together then we will only process reset event in this go and will
2975 * defer the processing of the mailbox events. Since, we would have not
2976 * cleared RX CMDQ event this time we would receive again another
2977 * interrupt from H/W just for the mailbox.
2979 * check for vector0 reset event sources
2981 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2982 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2983 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2984 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2985 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2986 hdev->rst_stats.imp_rst_cnt++;
2987 return HCLGE_VECTOR0_EVENT_RST;
2990 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2991 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2992 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2993 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2994 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2995 hdev->rst_stats.global_rst_cnt++;
2996 return HCLGE_VECTOR0_EVENT_RST;
2999 /* check for vector0 msix event source */
3000 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3001 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
3003 *clearval = msix_src_reg;
3004 return HCLGE_VECTOR0_EVENT_ERR;
3007 /* check for vector0 mailbox(=CMDQ RX) event source */
3008 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3009 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3010 *clearval = cmdq_src_reg;
3011 return HCLGE_VECTOR0_EVENT_MBX;
3014 /* print other vector0 event source */
3015 dev_info(&hdev->pdev->dev,
3016 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3017 cmdq_src_reg, msix_src_reg);
3018 *clearval = msix_src_reg;
3020 return HCLGE_VECTOR0_EVENT_OTHER;
3023 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3026 switch (event_type) {
3027 case HCLGE_VECTOR0_EVENT_RST:
3028 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3030 case HCLGE_VECTOR0_EVENT_MBX:
3031 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3038 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3040 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3041 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3042 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3043 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3044 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3047 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3049 writel(enable ? 1 : 0, vector->addr);
3052 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3054 struct hclge_dev *hdev = data;
3058 hclge_enable_vector(&hdev->misc_vector, false);
3059 event_cause = hclge_check_event_cause(hdev, &clearval);
3061 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3062 switch (event_cause) {
3063 case HCLGE_VECTOR0_EVENT_ERR:
3064 /* we do not know what type of reset is required now. This could
3065 * only be decided after we fetch the type of errors which
3066 * caused this event. Therefore, we will do below for now:
3067 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3068 * have defered type of reset to be used.
3069 * 2. Schedule the reset serivce task.
3070 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3071 * will fetch the correct type of reset. This would be done
3072 * by first decoding the types of errors.
3074 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3076 case HCLGE_VECTOR0_EVENT_RST:
3077 hclge_reset_task_schedule(hdev);
3079 case HCLGE_VECTOR0_EVENT_MBX:
3080 /* If we are here then,
3081 * 1. Either we are not handling any mbx task and we are not
3084 * 2. We could be handling a mbx task but nothing more is
3086 * In both cases, we should schedule mbx task as there are more
3087 * mbx messages reported by this interrupt.
3089 hclge_mbx_task_schedule(hdev);
3092 dev_warn(&hdev->pdev->dev,
3093 "received unknown or unhandled event of vector0\n");
3097 hclge_clear_event_cause(hdev, event_cause, clearval);
3099 /* Enable interrupt if it is not cause by reset. And when
3100 * clearval equal to 0, it means interrupt status may be
3101 * cleared by hardware before driver reads status register.
3102 * For this case, vector0 interrupt also should be enabled.
3105 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3106 hclge_enable_vector(&hdev->misc_vector, true);
3112 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3114 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3115 dev_warn(&hdev->pdev->dev,
3116 "vector(vector_id %d) has been freed.\n", vector_id);
3120 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3121 hdev->num_msi_left += 1;
3122 hdev->num_msi_used -= 1;
3125 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3127 struct hclge_misc_vector *vector = &hdev->misc_vector;
3129 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3131 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3132 hdev->vector_status[0] = 0;
3134 hdev->num_msi_left -= 1;
3135 hdev->num_msi_used += 1;
3138 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3139 const cpumask_t *mask)
3141 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3144 cpumask_copy(&hdev->affinity_mask, mask);
3147 static void hclge_irq_affinity_release(struct kref *ref)
3151 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3153 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3154 &hdev->affinity_mask);
3156 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3157 hdev->affinity_notify.release = hclge_irq_affinity_release;
3158 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3159 &hdev->affinity_notify);
3162 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3164 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3165 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3168 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3172 hclge_get_misc_vector(hdev);
3174 /* this would be explicitly freed in the end */
3175 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3176 0, "hclge_misc", hdev);
3178 hclge_free_vector(hdev, 0);
3179 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3180 hdev->misc_vector.vector_irq);
3186 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3188 free_irq(hdev->misc_vector.vector_irq, hdev);
3189 hclge_free_vector(hdev, 0);
3192 int hclge_notify_client(struct hclge_dev *hdev,
3193 enum hnae3_reset_notify_type type)
3195 struct hnae3_client *client = hdev->nic_client;
3198 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3201 if (!client->ops->reset_notify)
3204 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3205 struct hnae3_handle *handle = &hdev->vport[i].nic;
3208 ret = client->ops->reset_notify(handle, type);
3210 dev_err(&hdev->pdev->dev,
3211 "notify nic client failed %d(%d)\n", type, ret);
3219 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3220 enum hnae3_reset_notify_type type)
3222 struct hnae3_client *client = hdev->roce_client;
3226 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3229 if (!client->ops->reset_notify)
3232 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3233 struct hnae3_handle *handle = &hdev->vport[i].roce;
3235 ret = client->ops->reset_notify(handle, type);
3237 dev_err(&hdev->pdev->dev,
3238 "notify roce client failed %d(%d)",
3247 static int hclge_reset_wait(struct hclge_dev *hdev)
3249 #define HCLGE_RESET_WATI_MS 100
3250 #define HCLGE_RESET_WAIT_CNT 200
3251 u32 val, reg, reg_bit;
3254 switch (hdev->reset_type) {
3255 case HNAE3_IMP_RESET:
3256 reg = HCLGE_GLOBAL_RESET_REG;
3257 reg_bit = HCLGE_IMP_RESET_BIT;
3259 case HNAE3_GLOBAL_RESET:
3260 reg = HCLGE_GLOBAL_RESET_REG;
3261 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3263 case HNAE3_FUNC_RESET:
3264 reg = HCLGE_FUN_RST_ING;
3265 reg_bit = HCLGE_FUN_RST_ING_B;
3267 case HNAE3_FLR_RESET:
3270 dev_err(&hdev->pdev->dev,
3271 "Wait for unsupported reset type: %d\n",
3276 if (hdev->reset_type == HNAE3_FLR_RESET) {
3277 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3278 cnt++ < HCLGE_RESET_WAIT_CNT)
3279 msleep(HCLGE_RESET_WATI_MS);
3281 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3282 dev_err(&hdev->pdev->dev,
3283 "flr wait timeout: %u\n", cnt);
3290 val = hclge_read_dev(&hdev->hw, reg);
3291 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3292 msleep(HCLGE_RESET_WATI_MS);
3293 val = hclge_read_dev(&hdev->hw, reg);
3297 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3298 dev_warn(&hdev->pdev->dev,
3299 "Wait for reset timeout: %d\n", hdev->reset_type);
3306 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3308 struct hclge_vf_rst_cmd *req;
3309 struct hclge_desc desc;
3311 req = (struct hclge_vf_rst_cmd *)desc.data;
3312 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3313 req->dest_vfid = func_id;
3318 return hclge_cmd_send(&hdev->hw, &desc, 1);
3321 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3325 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3326 struct hclge_vport *vport = &hdev->vport[i];
3329 /* Send cmd to set/clear VF's FUNC_RST_ING */
3330 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3332 dev_err(&hdev->pdev->dev,
3333 "set vf(%u) rst failed %d!\n",
3334 vport->vport_id, ret);
3338 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3341 /* Inform VF to process the reset.
3342 * hclge_inform_reset_assert_to_vf may fail if VF
3343 * driver is not loaded.
3345 ret = hclge_inform_reset_assert_to_vf(vport);
3347 dev_warn(&hdev->pdev->dev,
3348 "inform reset to vf(%u) failed %d!\n",
3349 vport->vport_id, ret);
3355 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3357 struct hclge_pf_rst_sync_cmd *req;
3358 struct hclge_desc desc;
3362 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3363 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3366 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367 /* for compatible with old firmware, wait
3368 * 100 ms for VF to stop IO
3370 if (ret == -EOPNOTSUPP) {
3371 msleep(HCLGE_RESET_SYNC_TIME);
3374 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3377 } else if (req->all_vf_ready) {
3380 msleep(HCLGE_PF_RESET_SYNC_TIME);
3381 hclge_cmd_reuse_desc(&desc, true);
3382 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3384 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3388 void hclge_report_hw_error(struct hclge_dev *hdev,
3389 enum hnae3_hw_error_type type)
3391 struct hnae3_client *client = hdev->nic_client;
3394 if (!client || !client->ops->process_hw_error ||
3395 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3398 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3399 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3402 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3406 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3407 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3408 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3409 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3410 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3413 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3414 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3415 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3416 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3420 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3422 struct hclge_desc desc;
3423 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3426 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3427 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3428 req->fun_reset_vfid = func_id;
3430 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3432 dev_err(&hdev->pdev->dev,
3433 "send function reset cmd fail, status =%d\n", ret);
3438 static void hclge_do_reset(struct hclge_dev *hdev)
3440 struct hnae3_handle *handle = &hdev->vport[0].nic;
3441 struct pci_dev *pdev = hdev->pdev;
3444 if (hclge_get_hw_reset_stat(handle)) {
3445 dev_info(&pdev->dev, "Hardware reset not finish\n");
3446 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3447 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3448 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3452 switch (hdev->reset_type) {
3453 case HNAE3_GLOBAL_RESET:
3454 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3455 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3456 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3457 dev_info(&pdev->dev, "Global Reset requested\n");
3459 case HNAE3_FUNC_RESET:
3460 dev_info(&pdev->dev, "PF Reset requested\n");
3461 /* schedule again to check later */
3462 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3463 hclge_reset_task_schedule(hdev);
3465 case HNAE3_FLR_RESET:
3466 dev_info(&pdev->dev, "FLR requested\n");
3467 /* schedule again to check later */
3468 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3469 hclge_reset_task_schedule(hdev);
3472 dev_warn(&pdev->dev,
3473 "Unsupported reset type: %d\n", hdev->reset_type);
3478 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3479 unsigned long *addr)
3481 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3482 struct hclge_dev *hdev = ae_dev->priv;
3484 /* first, resolve any unknown reset type to the known type(s) */
3485 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3486 /* we will intentionally ignore any errors from this function
3487 * as we will end up in *some* reset request in any case
3489 hclge_handle_hw_msix_error(hdev, addr);
3490 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3491 /* We defered the clearing of the error event which caused
3492 * interrupt since it was not posssible to do that in
3493 * interrupt context (and this is the reason we introduced
3494 * new UNKNOWN reset type). Now, the errors have been
3495 * handled and cleared in hardware we can safely enable
3496 * interrupts. This is an exception to the norm.
3498 hclge_enable_vector(&hdev->misc_vector, true);
3501 /* return the highest priority reset level amongst all */
3502 if (test_bit(HNAE3_IMP_RESET, addr)) {
3503 rst_level = HNAE3_IMP_RESET;
3504 clear_bit(HNAE3_IMP_RESET, addr);
3505 clear_bit(HNAE3_GLOBAL_RESET, addr);
3506 clear_bit(HNAE3_FUNC_RESET, addr);
3507 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3508 rst_level = HNAE3_GLOBAL_RESET;
3509 clear_bit(HNAE3_GLOBAL_RESET, addr);
3510 clear_bit(HNAE3_FUNC_RESET, addr);
3511 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3512 rst_level = HNAE3_FUNC_RESET;
3513 clear_bit(HNAE3_FUNC_RESET, addr);
3514 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3515 rst_level = HNAE3_FLR_RESET;
3516 clear_bit(HNAE3_FLR_RESET, addr);
3519 if (hdev->reset_type != HNAE3_NONE_RESET &&
3520 rst_level < hdev->reset_type)
3521 return HNAE3_NONE_RESET;
3526 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3530 switch (hdev->reset_type) {
3531 case HNAE3_IMP_RESET:
3532 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3534 case HNAE3_GLOBAL_RESET:
3535 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3544 /* For revision 0x20, the reset interrupt source
3545 * can only be cleared after hardware reset done
3547 if (hdev->pdev->revision == 0x20)
3548 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3551 hclge_enable_vector(&hdev->misc_vector, true);
3554 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3558 switch (hdev->reset_type) {
3559 case HNAE3_FUNC_RESET:
3561 case HNAE3_FLR_RESET:
3562 ret = hclge_set_all_vf_rst(hdev, true);
3571 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3575 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3577 reg_val |= HCLGE_NIC_SW_RST_RDY;
3579 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3581 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3584 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3589 switch (hdev->reset_type) {
3590 case HNAE3_FUNC_RESET:
3591 /* to confirm whether all running VF is ready
3592 * before request PF reset
3594 ret = hclge_func_reset_sync_vf(hdev);
3598 ret = hclge_func_reset_cmd(hdev, 0);
3600 dev_err(&hdev->pdev->dev,
3601 "asserting function reset fail %d!\n", ret);
3605 /* After performaning pf reset, it is not necessary to do the
3606 * mailbox handling or send any command to firmware, because
3607 * any mailbox handling or command to firmware is only valid
3608 * after hclge_cmd_init is called.
3610 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3611 hdev->rst_stats.pf_rst_cnt++;
3613 case HNAE3_FLR_RESET:
3614 /* to confirm whether all running VF is ready
3615 * before request PF reset
3617 ret = hclge_func_reset_sync_vf(hdev);
3621 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3622 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3623 hdev->rst_stats.flr_rst_cnt++;
3625 case HNAE3_IMP_RESET:
3626 hclge_handle_imp_error(hdev);
3627 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3628 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3629 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3635 /* inform hardware that preparatory work is done */
3636 msleep(HCLGE_RESET_SYNC_TIME);
3637 hclge_reset_handshake(hdev, true);
3638 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3643 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3645 #define MAX_RESET_FAIL_CNT 5
3647 if (hdev->reset_pending) {
3648 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3649 hdev->reset_pending);
3651 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3652 HCLGE_RESET_INT_M) {
3653 dev_info(&hdev->pdev->dev,
3654 "reset failed because new reset interrupt\n");
3655 hclge_clear_reset_cause(hdev);
3657 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3658 hdev->rst_stats.reset_fail_cnt++;
3659 set_bit(hdev->reset_type, &hdev->reset_pending);
3660 dev_info(&hdev->pdev->dev,
3661 "re-schedule reset task(%u)\n",
3662 hdev->rst_stats.reset_fail_cnt);
3666 hclge_clear_reset_cause(hdev);
3668 /* recover the handshake status when reset fail */
3669 hclge_reset_handshake(hdev, true);
3671 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3673 hclge_dbg_dump_rst_info(hdev);
3678 static int hclge_set_rst_done(struct hclge_dev *hdev)
3680 struct hclge_pf_rst_done_cmd *req;
3681 struct hclge_desc desc;
3683 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3684 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3685 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3687 return hclge_cmd_send(&hdev->hw, &desc, 1);
3690 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3694 switch (hdev->reset_type) {
3695 case HNAE3_FUNC_RESET:
3697 case HNAE3_FLR_RESET:
3698 ret = hclge_set_all_vf_rst(hdev, false);
3700 case HNAE3_GLOBAL_RESET:
3702 case HNAE3_IMP_RESET:
3703 ret = hclge_set_rst_done(hdev);
3709 /* clear up the handshake status after re-initialize done */
3710 hclge_reset_handshake(hdev, false);
3715 static int hclge_reset_stack(struct hclge_dev *hdev)
3719 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3723 ret = hclge_reset_ae_dev(hdev->ae_dev);
3727 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3731 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3734 static void hclge_reset(struct hclge_dev *hdev)
3736 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3737 enum hnae3_reset_type reset_level;
3740 /* Initialize ae_dev reset status as well, in case enet layer wants to
3741 * know if device is undergoing reset
3743 ae_dev->reset_type = hdev->reset_type;
3744 hdev->rst_stats.reset_cnt++;
3745 /* perform reset of the stack & ae device for a client */
3746 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3750 ret = hclge_reset_prepare_down(hdev);
3755 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3757 goto err_reset_lock;
3761 ret = hclge_reset_prepare_wait(hdev);
3765 if (hclge_reset_wait(hdev))
3768 hdev->rst_stats.hw_reset_done_cnt++;
3770 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3776 ret = hclge_reset_stack(hdev);
3778 goto err_reset_lock;
3780 hclge_clear_reset_cause(hdev);
3782 ret = hclge_reset_prepare_up(hdev);
3784 goto err_reset_lock;
3788 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3789 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3793 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3798 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3800 goto err_reset_lock;
3804 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3808 hdev->last_reset_time = jiffies;
3809 hdev->rst_stats.reset_fail_cnt = 0;
3810 hdev->rst_stats.reset_done_cnt++;
3811 ae_dev->reset_type = HNAE3_NONE_RESET;
3813 /* if default_reset_request has a higher level reset request,
3814 * it should be handled as soon as possible. since some errors
3815 * need this kind of reset to fix.
3817 reset_level = hclge_get_reset_level(ae_dev,
3818 &hdev->default_reset_request);
3819 if (reset_level != HNAE3_NONE_RESET)
3820 set_bit(reset_level, &hdev->reset_request);
3827 if (hclge_reset_err_handle(hdev))
3828 hclge_reset_task_schedule(hdev);
3831 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3833 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3834 struct hclge_dev *hdev = ae_dev->priv;
3836 /* We might end up getting called broadly because of 2 below cases:
3837 * 1. Recoverable error was conveyed through APEI and only way to bring
3838 * normalcy is to reset.
3839 * 2. A new reset request from the stack due to timeout
3841 * For the first case,error event might not have ae handle available.
3842 * check if this is a new reset request and we are not here just because
3843 * last reset attempt did not succeed and watchdog hit us again. We will
3844 * know this if last reset request did not occur very recently (watchdog
3845 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3846 * In case of new request we reset the "reset level" to PF reset.
3847 * And if it is a repeat reset request of the most recent one then we
3848 * want to make sure we throttle the reset request. Therefore, we will
3849 * not allow it again before 3*HZ times.
3852 handle = &hdev->vport[0].nic;
3854 if (time_before(jiffies, (hdev->last_reset_time +
3855 HCLGE_RESET_INTERVAL))) {
3856 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3858 } else if (hdev->default_reset_request) {
3860 hclge_get_reset_level(ae_dev,
3861 &hdev->default_reset_request);
3862 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3863 hdev->reset_level = HNAE3_FUNC_RESET;
3866 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3869 /* request reset & schedule reset task */
3870 set_bit(hdev->reset_level, &hdev->reset_request);
3871 hclge_reset_task_schedule(hdev);
3873 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3874 hdev->reset_level++;
3877 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3878 enum hnae3_reset_type rst_type)
3880 struct hclge_dev *hdev = ae_dev->priv;
3882 set_bit(rst_type, &hdev->default_reset_request);
3885 static void hclge_reset_timer(struct timer_list *t)
3887 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3889 /* if default_reset_request has no value, it means that this reset
3890 * request has already be handled, so just return here
3892 if (!hdev->default_reset_request)
3895 dev_info(&hdev->pdev->dev,
3896 "triggering reset in reset timer\n");
3897 hclge_reset_event(hdev->pdev, NULL);
3900 static void hclge_reset_subtask(struct hclge_dev *hdev)
3902 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3904 /* check if there is any ongoing reset in the hardware. This status can
3905 * be checked from reset_pending. If there is then, we need to wait for
3906 * hardware to complete reset.
3907 * a. If we are able to figure out in reasonable time that hardware
3908 * has fully resetted then, we can proceed with driver, client
3910 * b. else, we can come back later to check this status so re-sched
3913 hdev->last_reset_time = jiffies;
3914 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3915 if (hdev->reset_type != HNAE3_NONE_RESET)
3918 /* check if we got any *new* reset requests to be honored */
3919 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3920 if (hdev->reset_type != HNAE3_NONE_RESET)
3921 hclge_do_reset(hdev);
3923 hdev->reset_type = HNAE3_NONE_RESET;
3926 static void hclge_reset_service_task(struct work_struct *work)
3928 struct hclge_dev *hdev =
3929 container_of(work, struct hclge_dev, rst_service_task);
3931 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3934 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3936 hclge_reset_subtask(hdev);
3938 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3941 static void hclge_mailbox_service_task(struct work_struct *work)
3943 struct hclge_dev *hdev =
3944 container_of(work, struct hclge_dev, mbx_service_task);
3946 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3949 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3951 hclge_mbx_handler(hdev);
3953 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3956 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3960 /* start from vport 1 for PF is always alive */
3961 for (i = 1; i < hdev->num_alloc_vport; i++) {
3962 struct hclge_vport *vport = &hdev->vport[i];
3964 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3965 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3967 /* If vf is not alive, set to default value */
3968 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3969 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3973 static void hclge_service_task(struct work_struct *work)
3975 struct hclge_dev *hdev =
3976 container_of(work, struct hclge_dev, service_task.work);
3978 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3980 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3981 hclge_update_stats_for_all(hdev);
3982 hdev->hw_stats.stats_timer = 0;
3985 hclge_update_port_info(hdev);
3986 hclge_update_link_status(hdev);
3987 hclge_update_vport_alive(hdev);
3988 hclge_sync_vlan_filter(hdev);
3990 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3991 hclge_rfs_filter_expire(hdev);
3992 hdev->fd_arfs_expire_timer = 0;
3995 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3998 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4000 /* VF handle has no client */
4001 if (!handle->client)
4002 return container_of(handle, struct hclge_vport, nic);
4003 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4004 return container_of(handle, struct hclge_vport, roce);
4006 return container_of(handle, struct hclge_vport, nic);
4009 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4010 struct hnae3_vector_info *vector_info)
4012 struct hclge_vport *vport = hclge_get_vport(handle);
4013 struct hnae3_vector_info *vector = vector_info;
4014 struct hclge_dev *hdev = vport->back;
4018 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4019 vector_num = min(hdev->num_msi_left, vector_num);
4021 for (j = 0; j < vector_num; j++) {
4022 for (i = 1; i < hdev->num_msi; i++) {
4023 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4024 vector->vector = pci_irq_vector(hdev->pdev, i);
4025 vector->io_addr = hdev->hw.io_base +
4026 HCLGE_VECTOR_REG_BASE +
4027 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4029 HCLGE_VECTOR_VF_OFFSET;
4030 hdev->vector_status[i] = vport->vport_id;
4031 hdev->vector_irq[i] = vector->vector;
4040 hdev->num_msi_left -= alloc;
4041 hdev->num_msi_used += alloc;
4046 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4050 for (i = 0; i < hdev->num_msi; i++)
4051 if (vector == hdev->vector_irq[i])
4057 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4059 struct hclge_vport *vport = hclge_get_vport(handle);
4060 struct hclge_dev *hdev = vport->back;
4063 vector_id = hclge_get_vector_index(hdev, vector);
4064 if (vector_id < 0) {
4065 dev_err(&hdev->pdev->dev,
4066 "Get vector index fail. vector_id =%d\n", vector_id);
4070 hclge_free_vector(hdev, vector_id);
4075 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4077 return HCLGE_RSS_KEY_SIZE;
4080 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4082 return HCLGE_RSS_IND_TBL_SIZE;
4085 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4086 const u8 hfunc, const u8 *key)
4088 struct hclge_rss_config_cmd *req;
4089 unsigned int key_offset = 0;
4090 struct hclge_desc desc;
4095 key_counts = HCLGE_RSS_KEY_SIZE;
4096 req = (struct hclge_rss_config_cmd *)desc.data;
4098 while (key_counts) {
4099 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4102 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4103 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4105 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4106 memcpy(req->hash_key,
4107 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4109 key_counts -= key_size;
4111 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4113 dev_err(&hdev->pdev->dev,
4114 "Configure RSS config fail, status = %d\n",
4122 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4124 struct hclge_rss_indirection_table_cmd *req;
4125 struct hclge_desc desc;
4129 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4131 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4132 hclge_cmd_setup_basic_desc
4133 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4135 req->start_table_index =
4136 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4137 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4139 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4140 req->rss_result[j] =
4141 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4143 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4145 dev_err(&hdev->pdev->dev,
4146 "Configure rss indir table fail,status = %d\n",
4154 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4155 u16 *tc_size, u16 *tc_offset)
4157 struct hclge_rss_tc_mode_cmd *req;
4158 struct hclge_desc desc;
4162 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4163 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4165 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4168 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4169 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4170 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4171 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4172 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4174 req->rss_tc_mode[i] = cpu_to_le16(mode);
4177 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4179 dev_err(&hdev->pdev->dev,
4180 "Configure rss tc mode fail, status = %d\n", ret);
4185 static void hclge_get_rss_type(struct hclge_vport *vport)
4187 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4188 vport->rss_tuple_sets.ipv4_udp_en ||
4189 vport->rss_tuple_sets.ipv4_sctp_en ||
4190 vport->rss_tuple_sets.ipv6_tcp_en ||
4191 vport->rss_tuple_sets.ipv6_udp_en ||
4192 vport->rss_tuple_sets.ipv6_sctp_en)
4193 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4194 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4195 vport->rss_tuple_sets.ipv6_fragment_en)
4196 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4198 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4201 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4203 struct hclge_rss_input_tuple_cmd *req;
4204 struct hclge_desc desc;
4207 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4209 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4211 /* Get the tuple cfg from pf */
4212 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4213 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4214 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4215 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4216 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4217 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4218 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4219 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4220 hclge_get_rss_type(&hdev->vport[0]);
4221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4223 dev_err(&hdev->pdev->dev,
4224 "Configure rss input fail, status = %d\n", ret);
4228 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4231 struct hclge_vport *vport = hclge_get_vport(handle);
4234 /* Get hash algorithm */
4236 switch (vport->rss_algo) {
4237 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4238 *hfunc = ETH_RSS_HASH_TOP;
4240 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4241 *hfunc = ETH_RSS_HASH_XOR;
4244 *hfunc = ETH_RSS_HASH_UNKNOWN;
4249 /* Get the RSS Key required by the user */
4251 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4253 /* Get indirect table */
4255 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4256 indir[i] = vport->rss_indirection_tbl[i];
4261 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4262 const u8 *key, const u8 hfunc)
4264 struct hclge_vport *vport = hclge_get_vport(handle);
4265 struct hclge_dev *hdev = vport->back;
4269 /* Set the RSS Hash Key if specififed by the user */
4272 case ETH_RSS_HASH_TOP:
4273 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4275 case ETH_RSS_HASH_XOR:
4276 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4278 case ETH_RSS_HASH_NO_CHANGE:
4279 hash_algo = vport->rss_algo;
4285 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4289 /* Update the shadow RSS key with user specified qids */
4290 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4291 vport->rss_algo = hash_algo;
4294 /* Update the shadow RSS table with user specified qids */
4295 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4296 vport->rss_indirection_tbl[i] = indir[i];
4298 /* Update the hardware */
4299 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4302 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4304 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4306 if (nfc->data & RXH_L4_B_2_3)
4307 hash_sets |= HCLGE_D_PORT_BIT;
4309 hash_sets &= ~HCLGE_D_PORT_BIT;
4311 if (nfc->data & RXH_IP_SRC)
4312 hash_sets |= HCLGE_S_IP_BIT;
4314 hash_sets &= ~HCLGE_S_IP_BIT;
4316 if (nfc->data & RXH_IP_DST)
4317 hash_sets |= HCLGE_D_IP_BIT;
4319 hash_sets &= ~HCLGE_D_IP_BIT;
4321 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4322 hash_sets |= HCLGE_V_TAG_BIT;
4327 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4328 struct ethtool_rxnfc *nfc)
4330 struct hclge_vport *vport = hclge_get_vport(handle);
4331 struct hclge_dev *hdev = vport->back;
4332 struct hclge_rss_input_tuple_cmd *req;
4333 struct hclge_desc desc;
4337 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4338 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4341 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4344 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4345 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4346 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4347 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4348 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4349 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4350 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4351 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4353 tuple_sets = hclge_get_rss_hash_bits(nfc);
4354 switch (nfc->flow_type) {
4356 req->ipv4_tcp_en = tuple_sets;
4359 req->ipv6_tcp_en = tuple_sets;
4362 req->ipv4_udp_en = tuple_sets;
4365 req->ipv6_udp_en = tuple_sets;
4368 req->ipv4_sctp_en = tuple_sets;
4371 if ((nfc->data & RXH_L4_B_0_1) ||
4372 (nfc->data & RXH_L4_B_2_3))
4375 req->ipv6_sctp_en = tuple_sets;
4378 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4381 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4387 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4389 dev_err(&hdev->pdev->dev,
4390 "Set rss tuple fail, status = %d\n", ret);
4394 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4395 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4396 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4397 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4398 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4399 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4400 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4401 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4402 hclge_get_rss_type(vport);
4406 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4407 struct ethtool_rxnfc *nfc)
4409 struct hclge_vport *vport = hclge_get_vport(handle);
4414 switch (nfc->flow_type) {
4416 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4419 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4422 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4425 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4428 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4431 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4435 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4444 if (tuple_sets & HCLGE_D_PORT_BIT)
4445 nfc->data |= RXH_L4_B_2_3;
4446 if (tuple_sets & HCLGE_S_PORT_BIT)
4447 nfc->data |= RXH_L4_B_0_1;
4448 if (tuple_sets & HCLGE_D_IP_BIT)
4449 nfc->data |= RXH_IP_DST;
4450 if (tuple_sets & HCLGE_S_IP_BIT)
4451 nfc->data |= RXH_IP_SRC;
4456 static int hclge_get_tc_size(struct hnae3_handle *handle)
4458 struct hclge_vport *vport = hclge_get_vport(handle);
4459 struct hclge_dev *hdev = vport->back;
4461 return hdev->rss_size_max;
4464 int hclge_rss_init_hw(struct hclge_dev *hdev)
4466 struct hclge_vport *vport = hdev->vport;
4467 u8 *rss_indir = vport[0].rss_indirection_tbl;
4468 u16 rss_size = vport[0].alloc_rss_size;
4469 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4470 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4471 u8 *key = vport[0].rss_hash_key;
4472 u8 hfunc = vport[0].rss_algo;
4473 u16 tc_valid[HCLGE_MAX_TC_NUM];
4478 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4482 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4486 ret = hclge_set_rss_input_tuple(hdev);
4490 /* Each TC have the same queue size, and tc_size set to hardware is
4491 * the log2 of roundup power of two of rss_size, the acutal queue
4492 * size is limited by indirection table.
4494 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4495 dev_err(&hdev->pdev->dev,
4496 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4501 roundup_size = roundup_pow_of_two(rss_size);
4502 roundup_size = ilog2(roundup_size);
4504 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4507 if (!(hdev->hw_tc_map & BIT(i)))
4511 tc_size[i] = roundup_size;
4512 tc_offset[i] = rss_size * i;
4515 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4518 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4520 struct hclge_vport *vport = hdev->vport;
4523 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4524 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4525 vport[j].rss_indirection_tbl[i] =
4526 i % vport[j].alloc_rss_size;
4530 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4532 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4533 struct hclge_vport *vport = hdev->vport;
4535 if (hdev->pdev->revision >= 0x21)
4536 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4538 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4539 vport[i].rss_tuple_sets.ipv4_tcp_en =
4540 HCLGE_RSS_INPUT_TUPLE_OTHER;
4541 vport[i].rss_tuple_sets.ipv4_udp_en =
4542 HCLGE_RSS_INPUT_TUPLE_OTHER;
4543 vport[i].rss_tuple_sets.ipv4_sctp_en =
4544 HCLGE_RSS_INPUT_TUPLE_SCTP;
4545 vport[i].rss_tuple_sets.ipv4_fragment_en =
4546 HCLGE_RSS_INPUT_TUPLE_OTHER;
4547 vport[i].rss_tuple_sets.ipv6_tcp_en =
4548 HCLGE_RSS_INPUT_TUPLE_OTHER;
4549 vport[i].rss_tuple_sets.ipv6_udp_en =
4550 HCLGE_RSS_INPUT_TUPLE_OTHER;
4551 vport[i].rss_tuple_sets.ipv6_sctp_en =
4552 HCLGE_RSS_INPUT_TUPLE_SCTP;
4553 vport[i].rss_tuple_sets.ipv6_fragment_en =
4554 HCLGE_RSS_INPUT_TUPLE_OTHER;
4556 vport[i].rss_algo = rss_algo;
4558 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4559 HCLGE_RSS_KEY_SIZE);
4562 hclge_rss_indir_init_cfg(hdev);
4565 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4566 int vector_id, bool en,
4567 struct hnae3_ring_chain_node *ring_chain)
4569 struct hclge_dev *hdev = vport->back;
4570 struct hnae3_ring_chain_node *node;
4571 struct hclge_desc desc;
4572 struct hclge_ctrl_vector_chain_cmd *req =
4573 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4574 enum hclge_cmd_status status;
4575 enum hclge_opcode_type op;
4576 u16 tqp_type_and_id;
4579 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4580 hclge_cmd_setup_basic_desc(&desc, op, false);
4581 req->int_vector_id = vector_id;
4584 for (node = ring_chain; node; node = node->next) {
4585 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4586 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4588 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4589 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4590 HCLGE_TQP_ID_S, node->tqp_index);
4591 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4593 hnae3_get_field(node->int_gl_idx,
4594 HNAE3_RING_GL_IDX_M,
4595 HNAE3_RING_GL_IDX_S));
4596 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4597 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4598 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4599 req->vfid = vport->vport_id;
4601 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4603 dev_err(&hdev->pdev->dev,
4604 "Map TQP fail, status is %d.\n",
4610 hclge_cmd_setup_basic_desc(&desc,
4613 req->int_vector_id = vector_id;
4618 req->int_cause_num = i;
4619 req->vfid = vport->vport_id;
4620 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4622 dev_err(&hdev->pdev->dev,
4623 "Map TQP fail, status is %d.\n", status);
4631 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4632 struct hnae3_ring_chain_node *ring_chain)
4634 struct hclge_vport *vport = hclge_get_vport(handle);
4635 struct hclge_dev *hdev = vport->back;
4638 vector_id = hclge_get_vector_index(hdev, vector);
4639 if (vector_id < 0) {
4640 dev_err(&hdev->pdev->dev,
4641 "Get vector index fail. vector_id =%d\n", vector_id);
4645 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4648 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4649 struct hnae3_ring_chain_node *ring_chain)
4651 struct hclge_vport *vport = hclge_get_vport(handle);
4652 struct hclge_dev *hdev = vport->back;
4655 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4658 vector_id = hclge_get_vector_index(hdev, vector);
4659 if (vector_id < 0) {
4660 dev_err(&handle->pdev->dev,
4661 "Get vector index fail. ret =%d\n", vector_id);
4665 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4667 dev_err(&handle->pdev->dev,
4668 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4674 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4675 struct hclge_promisc_param *param)
4677 struct hclge_promisc_cfg_cmd *req;
4678 struct hclge_desc desc;
4681 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4683 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4684 req->vf_id = param->vf_id;
4686 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4687 * pdev revision(0x20), new revision support them. The
4688 * value of this two fields will not return error when driver
4689 * send command to fireware in revision(0x20).
4691 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4692 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4694 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4696 dev_err(&hdev->pdev->dev,
4697 "Set promisc mode fail, status is %d.\n", ret);
4702 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4703 bool en_uc, bool en_mc, bool en_bc,
4709 memset(param, 0, sizeof(struct hclge_promisc_param));
4711 param->enable = HCLGE_PROMISC_EN_UC;
4713 param->enable |= HCLGE_PROMISC_EN_MC;
4715 param->enable |= HCLGE_PROMISC_EN_BC;
4716 param->vf_id = vport_id;
4719 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4720 bool en_mc_pmc, bool en_bc_pmc)
4722 struct hclge_dev *hdev = vport->back;
4723 struct hclge_promisc_param param;
4725 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4727 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4730 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4733 struct hclge_vport *vport = hclge_get_vport(handle);
4734 bool en_bc_pmc = true;
4736 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4737 * always bypassed. So broadcast promisc should be disabled until
4738 * user enable promisc mode
4740 if (handle->pdev->revision == 0x20)
4741 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4743 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4747 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4749 struct hclge_get_fd_mode_cmd *req;
4750 struct hclge_desc desc;
4753 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4755 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4757 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4759 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4763 *fd_mode = req->mode;
4768 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4769 u32 *stage1_entry_num,
4770 u32 *stage2_entry_num,
4771 u16 *stage1_counter_num,
4772 u16 *stage2_counter_num)
4774 struct hclge_get_fd_allocation_cmd *req;
4775 struct hclge_desc desc;
4778 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4780 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4782 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4784 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4789 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4790 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4791 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4792 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4797 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4799 struct hclge_set_fd_key_config_cmd *req;
4800 struct hclge_fd_key_cfg *stage;
4801 struct hclge_desc desc;
4804 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4806 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4807 stage = &hdev->fd_cfg.key_cfg[stage_num];
4808 req->stage = stage_num;
4809 req->key_select = stage->key_sel;
4810 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4811 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4812 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4813 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4814 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4815 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4817 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4819 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4824 static int hclge_init_fd_config(struct hclge_dev *hdev)
4826 #define LOW_2_WORDS 0x03
4827 struct hclge_fd_key_cfg *key_cfg;
4830 if (!hnae3_dev_fd_supported(hdev))
4833 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4837 switch (hdev->fd_cfg.fd_mode) {
4838 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4839 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4841 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4842 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4845 dev_err(&hdev->pdev->dev,
4846 "Unsupported flow director mode %u\n",
4847 hdev->fd_cfg.fd_mode);
4851 hdev->fd_cfg.proto_support =
4852 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4853 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4854 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4855 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4856 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4857 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4858 key_cfg->outer_sipv6_word_en = 0;
4859 key_cfg->outer_dipv6_word_en = 0;
4861 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4862 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4863 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4864 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4866 /* If use max 400bit key, we can support tuples for ether type */
4867 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4868 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4869 key_cfg->tuple_active |=
4870 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4873 /* roce_type is used to filter roce frames
4874 * dst_vport is used to specify the rule
4876 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4878 ret = hclge_get_fd_allocation(hdev,
4879 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4880 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4881 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4882 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4886 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4889 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4890 int loc, u8 *key, bool is_add)
4892 struct hclge_fd_tcam_config_1_cmd *req1;
4893 struct hclge_fd_tcam_config_2_cmd *req2;
4894 struct hclge_fd_tcam_config_3_cmd *req3;
4895 struct hclge_desc desc[3];
4898 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4899 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4900 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4901 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4902 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4904 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4905 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4906 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4908 req1->stage = stage;
4909 req1->xy_sel = sel_x ? 1 : 0;
4910 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4911 req1->index = cpu_to_le32(loc);
4912 req1->entry_vld = sel_x ? is_add : 0;
4915 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4916 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4917 sizeof(req2->tcam_data));
4918 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4919 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4922 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4924 dev_err(&hdev->pdev->dev,
4925 "config tcam key fail, ret=%d\n",
4931 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4932 struct hclge_fd_ad_data *action)
4934 struct hclge_fd_ad_config_cmd *req;
4935 struct hclge_desc desc;
4939 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4941 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4942 req->index = cpu_to_le32(loc);
4945 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4946 action->write_rule_id_to_bd);
4947 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4950 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4951 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4952 action->forward_to_direct_queue);
4953 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4955 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4956 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4957 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4958 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4959 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4960 action->counter_id);
4962 req->ad_data = cpu_to_le64(ad_data);
4963 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4965 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4970 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4971 struct hclge_fd_rule *rule)
4973 u16 tmp_x_s, tmp_y_s;
4974 u32 tmp_x_l, tmp_y_l;
4977 if (rule->unused_tuple & tuple_bit)
4980 switch (tuple_bit) {
4983 case BIT(INNER_DST_MAC):
4984 for (i = 0; i < ETH_ALEN; i++) {
4985 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4986 rule->tuples_mask.dst_mac[i]);
4987 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4988 rule->tuples_mask.dst_mac[i]);
4992 case BIT(INNER_SRC_MAC):
4993 for (i = 0; i < ETH_ALEN; i++) {
4994 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4995 rule->tuples.src_mac[i]);
4996 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4997 rule->tuples.src_mac[i]);
5001 case BIT(INNER_VLAN_TAG_FST):
5002 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5003 rule->tuples_mask.vlan_tag1);
5004 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5005 rule->tuples_mask.vlan_tag1);
5006 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5007 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5010 case BIT(INNER_ETH_TYPE):
5011 calc_x(tmp_x_s, rule->tuples.ether_proto,
5012 rule->tuples_mask.ether_proto);
5013 calc_y(tmp_y_s, rule->tuples.ether_proto,
5014 rule->tuples_mask.ether_proto);
5015 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5016 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5019 case BIT(INNER_IP_TOS):
5020 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5021 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5024 case BIT(INNER_IP_PROTO):
5025 calc_x(*key_x, rule->tuples.ip_proto,
5026 rule->tuples_mask.ip_proto);
5027 calc_y(*key_y, rule->tuples.ip_proto,
5028 rule->tuples_mask.ip_proto);
5031 case BIT(INNER_SRC_IP):
5032 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5033 rule->tuples_mask.src_ip[IPV4_INDEX]);
5034 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5035 rule->tuples_mask.src_ip[IPV4_INDEX]);
5036 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5037 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5040 case BIT(INNER_DST_IP):
5041 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5042 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5043 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5044 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5045 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5046 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5049 case BIT(INNER_SRC_PORT):
5050 calc_x(tmp_x_s, rule->tuples.src_port,
5051 rule->tuples_mask.src_port);
5052 calc_y(tmp_y_s, rule->tuples.src_port,
5053 rule->tuples_mask.src_port);
5054 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5055 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5058 case BIT(INNER_DST_PORT):
5059 calc_x(tmp_x_s, rule->tuples.dst_port,
5060 rule->tuples_mask.dst_port);
5061 calc_y(tmp_y_s, rule->tuples.dst_port,
5062 rule->tuples_mask.dst_port);
5063 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5064 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5072 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5073 u8 vf_id, u8 network_port_id)
5075 u32 port_number = 0;
5077 if (port_type == HOST_PORT) {
5078 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5080 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5082 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5084 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5085 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5086 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5092 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5093 __le32 *key_x, __le32 *key_y,
5094 struct hclge_fd_rule *rule)
5096 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5097 u8 cur_pos = 0, tuple_size, shift_bits;
5100 for (i = 0; i < MAX_META_DATA; i++) {
5101 tuple_size = meta_data_key_info[i].key_length;
5102 tuple_bit = key_cfg->meta_data_active & BIT(i);
5104 switch (tuple_bit) {
5105 case BIT(ROCE_TYPE):
5106 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5107 cur_pos += tuple_size;
5109 case BIT(DST_VPORT):
5110 port_number = hclge_get_port_number(HOST_PORT, 0,
5112 hnae3_set_field(meta_data,
5113 GENMASK(cur_pos + tuple_size, cur_pos),
5114 cur_pos, port_number);
5115 cur_pos += tuple_size;
5122 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5123 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5124 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5126 *key_x = cpu_to_le32(tmp_x << shift_bits);
5127 *key_y = cpu_to_le32(tmp_y << shift_bits);
5130 /* A complete key is combined with meta data key and tuple key.
5131 * Meta data key is stored at the MSB region, and tuple key is stored at
5132 * the LSB region, unused bits will be filled 0.
5134 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5135 struct hclge_fd_rule *rule)
5137 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5138 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5139 u8 *cur_key_x, *cur_key_y;
5141 int ret, tuple_size;
5142 u8 meta_data_region;
5144 memset(key_x, 0, sizeof(key_x));
5145 memset(key_y, 0, sizeof(key_y));
5149 for (i = 0 ; i < MAX_TUPLE; i++) {
5153 tuple_size = tuple_key_info[i].key_length / 8;
5154 check_tuple = key_cfg->tuple_active & BIT(i);
5156 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5159 cur_key_x += tuple_size;
5160 cur_key_y += tuple_size;
5164 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5165 MAX_META_DATA_LENGTH / 8;
5167 hclge_fd_convert_meta_data(key_cfg,
5168 (__le32 *)(key_x + meta_data_region),
5169 (__le32 *)(key_y + meta_data_region),
5172 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5175 dev_err(&hdev->pdev->dev,
5176 "fd key_y config fail, loc=%u, ret=%d\n",
5177 rule->queue_id, ret);
5181 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5184 dev_err(&hdev->pdev->dev,
5185 "fd key_x config fail, loc=%u, ret=%d\n",
5186 rule->queue_id, ret);
5190 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5191 struct hclge_fd_rule *rule)
5193 struct hclge_fd_ad_data ad_data;
5195 ad_data.ad_id = rule->location;
5197 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5198 ad_data.drop_packet = true;
5199 ad_data.forward_to_direct_queue = false;
5200 ad_data.queue_id = 0;
5202 ad_data.drop_packet = false;
5203 ad_data.forward_to_direct_queue = true;
5204 ad_data.queue_id = rule->queue_id;
5207 ad_data.use_counter = false;
5208 ad_data.counter_id = 0;
5210 ad_data.use_next_stage = false;
5211 ad_data.next_input_key = 0;
5213 ad_data.write_rule_id_to_bd = true;
5214 ad_data.rule_id = rule->location;
5216 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5219 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5220 struct ethtool_rx_flow_spec *fs, u32 *unused)
5222 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5223 struct ethtool_usrip4_spec *usr_ip4_spec;
5224 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5225 struct ethtool_usrip6_spec *usr_ip6_spec;
5226 struct ethhdr *ether_spec;
5228 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5231 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5234 if ((fs->flow_type & FLOW_EXT) &&
5235 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5236 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5240 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5244 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5245 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5247 if (!tcp_ip4_spec->ip4src)
5248 *unused |= BIT(INNER_SRC_IP);
5250 if (!tcp_ip4_spec->ip4dst)
5251 *unused |= BIT(INNER_DST_IP);
5253 if (!tcp_ip4_spec->psrc)
5254 *unused |= BIT(INNER_SRC_PORT);
5256 if (!tcp_ip4_spec->pdst)
5257 *unused |= BIT(INNER_DST_PORT);
5259 if (!tcp_ip4_spec->tos)
5260 *unused |= BIT(INNER_IP_TOS);
5264 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5265 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5266 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5268 if (!usr_ip4_spec->ip4src)
5269 *unused |= BIT(INNER_SRC_IP);
5271 if (!usr_ip4_spec->ip4dst)
5272 *unused |= BIT(INNER_DST_IP);
5274 if (!usr_ip4_spec->tos)
5275 *unused |= BIT(INNER_IP_TOS);
5277 if (!usr_ip4_spec->proto)
5278 *unused |= BIT(INNER_IP_PROTO);
5280 if (usr_ip4_spec->l4_4_bytes)
5283 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5290 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5291 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5294 /* check whether src/dst ip address used */
5295 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5296 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5297 *unused |= BIT(INNER_SRC_IP);
5299 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5300 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5301 *unused |= BIT(INNER_DST_IP);
5303 if (!tcp_ip6_spec->psrc)
5304 *unused |= BIT(INNER_SRC_PORT);
5306 if (!tcp_ip6_spec->pdst)
5307 *unused |= BIT(INNER_DST_PORT);
5309 if (tcp_ip6_spec->tclass)
5313 case IPV6_USER_FLOW:
5314 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5315 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5316 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5317 BIT(INNER_DST_PORT);
5319 /* check whether src/dst ip address used */
5320 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5321 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5322 *unused |= BIT(INNER_SRC_IP);
5324 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5325 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5326 *unused |= BIT(INNER_DST_IP);
5328 if (!usr_ip6_spec->l4_proto)
5329 *unused |= BIT(INNER_IP_PROTO);
5331 if (usr_ip6_spec->tclass)
5334 if (usr_ip6_spec->l4_4_bytes)
5339 ether_spec = &fs->h_u.ether_spec;
5340 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5341 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5342 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5344 if (is_zero_ether_addr(ether_spec->h_source))
5345 *unused |= BIT(INNER_SRC_MAC);
5347 if (is_zero_ether_addr(ether_spec->h_dest))
5348 *unused |= BIT(INNER_DST_MAC);
5350 if (!ether_spec->h_proto)
5351 *unused |= BIT(INNER_ETH_TYPE);
5358 if ((fs->flow_type & FLOW_EXT)) {
5359 if (fs->h_ext.vlan_etype)
5361 if (!fs->h_ext.vlan_tci)
5362 *unused |= BIT(INNER_VLAN_TAG_FST);
5364 if (fs->m_ext.vlan_tci) {
5365 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5369 *unused |= BIT(INNER_VLAN_TAG_FST);
5372 if (fs->flow_type & FLOW_MAC_EXT) {
5373 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5376 if (is_zero_ether_addr(fs->h_ext.h_dest))
5377 *unused |= BIT(INNER_DST_MAC);
5379 *unused &= ~(BIT(INNER_DST_MAC));
5385 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5387 struct hclge_fd_rule *rule = NULL;
5388 struct hlist_node *node2;
5390 spin_lock_bh(&hdev->fd_rule_lock);
5391 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5392 if (rule->location >= location)
5396 spin_unlock_bh(&hdev->fd_rule_lock);
5398 return rule && rule->location == location;
5401 /* make sure being called after lock up with fd_rule_lock */
5402 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5403 struct hclge_fd_rule *new_rule,
5407 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5408 struct hlist_node *node2;
5410 if (is_add && !new_rule)
5413 hlist_for_each_entry_safe(rule, node2,
5414 &hdev->fd_rule_list, rule_node) {
5415 if (rule->location >= location)
5420 if (rule && rule->location == location) {
5421 hlist_del(&rule->rule_node);
5423 hdev->hclge_fd_rule_num--;
5426 if (!hdev->hclge_fd_rule_num)
5427 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5428 clear_bit(location, hdev->fd_bmap);
5432 } else if (!is_add) {
5433 dev_err(&hdev->pdev->dev,
5434 "delete fail, rule %u is inexistent\n",
5439 INIT_HLIST_NODE(&new_rule->rule_node);
5442 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5444 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5446 set_bit(location, hdev->fd_bmap);
5447 hdev->hclge_fd_rule_num++;
5448 hdev->fd_active_type = new_rule->rule_type;
5453 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5454 struct ethtool_rx_flow_spec *fs,
5455 struct hclge_fd_rule *rule)
5457 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5459 switch (flow_type) {
5463 rule->tuples.src_ip[IPV4_INDEX] =
5464 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5465 rule->tuples_mask.src_ip[IPV4_INDEX] =
5466 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5468 rule->tuples.dst_ip[IPV4_INDEX] =
5469 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5470 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5471 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5473 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5474 rule->tuples_mask.src_port =
5475 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5477 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5478 rule->tuples_mask.dst_port =
5479 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5481 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5482 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5484 rule->tuples.ether_proto = ETH_P_IP;
5485 rule->tuples_mask.ether_proto = 0xFFFF;
5489 rule->tuples.src_ip[IPV4_INDEX] =
5490 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5491 rule->tuples_mask.src_ip[IPV4_INDEX] =
5492 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5494 rule->tuples.dst_ip[IPV4_INDEX] =
5495 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5496 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5497 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5499 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5500 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5502 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5503 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5505 rule->tuples.ether_proto = ETH_P_IP;
5506 rule->tuples_mask.ether_proto = 0xFFFF;
5512 be32_to_cpu_array(rule->tuples.src_ip,
5513 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5514 be32_to_cpu_array(rule->tuples_mask.src_ip,
5515 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5517 be32_to_cpu_array(rule->tuples.dst_ip,
5518 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5519 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5520 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5522 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5523 rule->tuples_mask.src_port =
5524 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5526 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5527 rule->tuples_mask.dst_port =
5528 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5530 rule->tuples.ether_proto = ETH_P_IPV6;
5531 rule->tuples_mask.ether_proto = 0xFFFF;
5534 case IPV6_USER_FLOW:
5535 be32_to_cpu_array(rule->tuples.src_ip,
5536 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5537 be32_to_cpu_array(rule->tuples_mask.src_ip,
5538 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5540 be32_to_cpu_array(rule->tuples.dst_ip,
5541 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5542 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5543 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5545 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5546 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5548 rule->tuples.ether_proto = ETH_P_IPV6;
5549 rule->tuples_mask.ether_proto = 0xFFFF;
5553 ether_addr_copy(rule->tuples.src_mac,
5554 fs->h_u.ether_spec.h_source);
5555 ether_addr_copy(rule->tuples_mask.src_mac,
5556 fs->m_u.ether_spec.h_source);
5558 ether_addr_copy(rule->tuples.dst_mac,
5559 fs->h_u.ether_spec.h_dest);
5560 ether_addr_copy(rule->tuples_mask.dst_mac,
5561 fs->m_u.ether_spec.h_dest);
5563 rule->tuples.ether_proto =
5564 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5565 rule->tuples_mask.ether_proto =
5566 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5573 switch (flow_type) {
5576 rule->tuples.ip_proto = IPPROTO_SCTP;
5577 rule->tuples_mask.ip_proto = 0xFF;
5581 rule->tuples.ip_proto = IPPROTO_TCP;
5582 rule->tuples_mask.ip_proto = 0xFF;
5586 rule->tuples.ip_proto = IPPROTO_UDP;
5587 rule->tuples_mask.ip_proto = 0xFF;
5593 if ((fs->flow_type & FLOW_EXT)) {
5594 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5595 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5598 if (fs->flow_type & FLOW_MAC_EXT) {
5599 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5600 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5606 /* make sure being called after lock up with fd_rule_lock */
5607 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5608 struct hclge_fd_rule *rule)
5613 dev_err(&hdev->pdev->dev,
5614 "The flow director rule is NULL\n");
5618 /* it will never fail here, so needn't to check return value */
5619 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5621 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5625 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5632 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5636 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5637 struct ethtool_rxnfc *cmd)
5639 struct hclge_vport *vport = hclge_get_vport(handle);
5640 struct hclge_dev *hdev = vport->back;
5641 u16 dst_vport_id = 0, q_index = 0;
5642 struct ethtool_rx_flow_spec *fs;
5643 struct hclge_fd_rule *rule;
5648 if (!hnae3_dev_fd_supported(hdev))
5652 dev_warn(&hdev->pdev->dev,
5653 "Please enable flow director first\n");
5657 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5659 ret = hclge_fd_check_spec(hdev, fs, &unused);
5661 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5665 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5666 action = HCLGE_FD_ACTION_DROP_PACKET;
5668 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5669 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5672 if (vf > hdev->num_req_vfs) {
5673 dev_err(&hdev->pdev->dev,
5674 "Error: vf id (%u) > max vf num (%u)\n",
5675 vf, hdev->num_req_vfs);
5679 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5680 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5683 dev_err(&hdev->pdev->dev,
5684 "Error: queue id (%u) > max tqp num (%u)\n",
5689 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5693 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5697 ret = hclge_fd_get_tuple(hdev, fs, rule);
5703 rule->flow_type = fs->flow_type;
5705 rule->location = fs->location;
5706 rule->unused_tuple = unused;
5707 rule->vf_id = dst_vport_id;
5708 rule->queue_id = q_index;
5709 rule->action = action;
5710 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5712 /* to avoid rule conflict, when user configure rule by ethtool,
5713 * we need to clear all arfs rules
5715 hclge_clear_arfs_rules(handle);
5717 spin_lock_bh(&hdev->fd_rule_lock);
5718 ret = hclge_fd_config_rule(hdev, rule);
5720 spin_unlock_bh(&hdev->fd_rule_lock);
5725 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5726 struct ethtool_rxnfc *cmd)
5728 struct hclge_vport *vport = hclge_get_vport(handle);
5729 struct hclge_dev *hdev = vport->back;
5730 struct ethtool_rx_flow_spec *fs;
5733 if (!hnae3_dev_fd_supported(hdev))
5736 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5738 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5741 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5742 dev_err(&hdev->pdev->dev,
5743 "Delete fail, rule %d is inexistent\n", fs->location);
5747 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5752 spin_lock_bh(&hdev->fd_rule_lock);
5753 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5755 spin_unlock_bh(&hdev->fd_rule_lock);
5760 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5763 struct hclge_vport *vport = hclge_get_vport(handle);
5764 struct hclge_dev *hdev = vport->back;
5765 struct hclge_fd_rule *rule;
5766 struct hlist_node *node;
5769 if (!hnae3_dev_fd_supported(hdev))
5772 spin_lock_bh(&hdev->fd_rule_lock);
5773 for_each_set_bit(location, hdev->fd_bmap,
5774 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5775 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5779 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5781 hlist_del(&rule->rule_node);
5784 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5785 hdev->hclge_fd_rule_num = 0;
5786 bitmap_zero(hdev->fd_bmap,
5787 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5790 spin_unlock_bh(&hdev->fd_rule_lock);
5793 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5795 struct hclge_vport *vport = hclge_get_vport(handle);
5796 struct hclge_dev *hdev = vport->back;
5797 struct hclge_fd_rule *rule;
5798 struct hlist_node *node;
5801 /* Return ok here, because reset error handling will check this
5802 * return value. If error is returned here, the reset process will
5805 if (!hnae3_dev_fd_supported(hdev))
5808 /* if fd is disabled, should not restore it when reset */
5812 spin_lock_bh(&hdev->fd_rule_lock);
5813 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5814 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5816 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5819 dev_warn(&hdev->pdev->dev,
5820 "Restore rule %u failed, remove it\n",
5822 clear_bit(rule->location, hdev->fd_bmap);
5823 hlist_del(&rule->rule_node);
5825 hdev->hclge_fd_rule_num--;
5829 if (hdev->hclge_fd_rule_num)
5830 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5832 spin_unlock_bh(&hdev->fd_rule_lock);
5837 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5838 struct ethtool_rxnfc *cmd)
5840 struct hclge_vport *vport = hclge_get_vport(handle);
5841 struct hclge_dev *hdev = vport->back;
5843 if (!hnae3_dev_fd_supported(hdev))
5846 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5847 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5852 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5853 struct ethtool_rxnfc *cmd)
5855 struct hclge_vport *vport = hclge_get_vport(handle);
5856 struct hclge_fd_rule *rule = NULL;
5857 struct hclge_dev *hdev = vport->back;
5858 struct ethtool_rx_flow_spec *fs;
5859 struct hlist_node *node2;
5861 if (!hnae3_dev_fd_supported(hdev))
5864 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5866 spin_lock_bh(&hdev->fd_rule_lock);
5868 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5869 if (rule->location >= fs->location)
5873 if (!rule || fs->location != rule->location) {
5874 spin_unlock_bh(&hdev->fd_rule_lock);
5879 fs->flow_type = rule->flow_type;
5880 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5884 fs->h_u.tcp_ip4_spec.ip4src =
5885 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5886 fs->m_u.tcp_ip4_spec.ip4src =
5887 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5888 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5890 fs->h_u.tcp_ip4_spec.ip4dst =
5891 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5892 fs->m_u.tcp_ip4_spec.ip4dst =
5893 rule->unused_tuple & BIT(INNER_DST_IP) ?
5894 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5896 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5897 fs->m_u.tcp_ip4_spec.psrc =
5898 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5899 0 : cpu_to_be16(rule->tuples_mask.src_port);
5901 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5902 fs->m_u.tcp_ip4_spec.pdst =
5903 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5904 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5906 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5907 fs->m_u.tcp_ip4_spec.tos =
5908 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5909 0 : rule->tuples_mask.ip_tos;
5913 fs->h_u.usr_ip4_spec.ip4src =
5914 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5915 fs->m_u.tcp_ip4_spec.ip4src =
5916 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5917 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5919 fs->h_u.usr_ip4_spec.ip4dst =
5920 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5921 fs->m_u.usr_ip4_spec.ip4dst =
5922 rule->unused_tuple & BIT(INNER_DST_IP) ?
5923 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5925 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5926 fs->m_u.usr_ip4_spec.tos =
5927 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5928 0 : rule->tuples_mask.ip_tos;
5930 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5931 fs->m_u.usr_ip4_spec.proto =
5932 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5933 0 : rule->tuples_mask.ip_proto;
5935 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5941 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5942 rule->tuples.src_ip, IPV6_SIZE);
5943 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5944 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5945 sizeof(int) * IPV6_SIZE);
5947 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5948 rule->tuples_mask.src_ip, IPV6_SIZE);
5950 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5951 rule->tuples.dst_ip, IPV6_SIZE);
5952 if (rule->unused_tuple & BIT(INNER_DST_IP))
5953 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5954 sizeof(int) * IPV6_SIZE);
5956 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5957 rule->tuples_mask.dst_ip, IPV6_SIZE);
5959 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5960 fs->m_u.tcp_ip6_spec.psrc =
5961 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5962 0 : cpu_to_be16(rule->tuples_mask.src_port);
5964 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5965 fs->m_u.tcp_ip6_spec.pdst =
5966 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5967 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5970 case IPV6_USER_FLOW:
5971 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5972 rule->tuples.src_ip, IPV6_SIZE);
5973 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5974 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5975 sizeof(int) * IPV6_SIZE);
5977 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5978 rule->tuples_mask.src_ip, IPV6_SIZE);
5980 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5981 rule->tuples.dst_ip, IPV6_SIZE);
5982 if (rule->unused_tuple & BIT(INNER_DST_IP))
5983 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5984 sizeof(int) * IPV6_SIZE);
5986 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5987 rule->tuples_mask.dst_ip, IPV6_SIZE);
5989 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5990 fs->m_u.usr_ip6_spec.l4_proto =
5991 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5992 0 : rule->tuples_mask.ip_proto;
5996 ether_addr_copy(fs->h_u.ether_spec.h_source,
5997 rule->tuples.src_mac);
5998 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5999 eth_zero_addr(fs->m_u.ether_spec.h_source);
6001 ether_addr_copy(fs->m_u.ether_spec.h_source,
6002 rule->tuples_mask.src_mac);
6004 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6005 rule->tuples.dst_mac);
6006 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6007 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6009 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6010 rule->tuples_mask.dst_mac);
6012 fs->h_u.ether_spec.h_proto =
6013 cpu_to_be16(rule->tuples.ether_proto);
6014 fs->m_u.ether_spec.h_proto =
6015 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6016 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6020 spin_unlock_bh(&hdev->fd_rule_lock);
6024 if (fs->flow_type & FLOW_EXT) {
6025 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6026 fs->m_ext.vlan_tci =
6027 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6028 cpu_to_be16(VLAN_VID_MASK) :
6029 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6032 if (fs->flow_type & FLOW_MAC_EXT) {
6033 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6034 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6035 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6037 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6038 rule->tuples_mask.dst_mac);
6041 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6042 fs->ring_cookie = RX_CLS_FLOW_DISC;
6046 fs->ring_cookie = rule->queue_id;
6047 vf_id = rule->vf_id;
6048 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6049 fs->ring_cookie |= vf_id;
6052 spin_unlock_bh(&hdev->fd_rule_lock);
6057 static int hclge_get_all_rules(struct hnae3_handle *handle,
6058 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6060 struct hclge_vport *vport = hclge_get_vport(handle);
6061 struct hclge_dev *hdev = vport->back;
6062 struct hclge_fd_rule *rule;
6063 struct hlist_node *node2;
6066 if (!hnae3_dev_fd_supported(hdev))
6069 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6071 spin_lock_bh(&hdev->fd_rule_lock);
6072 hlist_for_each_entry_safe(rule, node2,
6073 &hdev->fd_rule_list, rule_node) {
6074 if (cnt == cmd->rule_cnt) {
6075 spin_unlock_bh(&hdev->fd_rule_lock);
6079 rule_locs[cnt] = rule->location;
6083 spin_unlock_bh(&hdev->fd_rule_lock);
6085 cmd->rule_cnt = cnt;
6090 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6091 struct hclge_fd_rule_tuples *tuples)
6093 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6094 tuples->ip_proto = fkeys->basic.ip_proto;
6095 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6097 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6098 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6099 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6101 memcpy(tuples->src_ip,
6102 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6103 sizeof(tuples->src_ip));
6104 memcpy(tuples->dst_ip,
6105 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6106 sizeof(tuples->dst_ip));
6110 /* traverse all rules, check whether an existed rule has the same tuples */
6111 static struct hclge_fd_rule *
6112 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6113 const struct hclge_fd_rule_tuples *tuples)
6115 struct hclge_fd_rule *rule = NULL;
6116 struct hlist_node *node;
6118 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6119 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6126 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6127 struct hclge_fd_rule *rule)
6129 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6130 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6131 BIT(INNER_SRC_PORT);
6134 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6135 if (tuples->ether_proto == ETH_P_IP) {
6136 if (tuples->ip_proto == IPPROTO_TCP)
6137 rule->flow_type = TCP_V4_FLOW;
6139 rule->flow_type = UDP_V4_FLOW;
6141 if (tuples->ip_proto == IPPROTO_TCP)
6142 rule->flow_type = TCP_V6_FLOW;
6144 rule->flow_type = UDP_V6_FLOW;
6146 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6147 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6150 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6151 u16 flow_id, struct flow_keys *fkeys)
6153 struct hclge_vport *vport = hclge_get_vport(handle);
6154 struct hclge_fd_rule_tuples new_tuples;
6155 struct hclge_dev *hdev = vport->back;
6156 struct hclge_fd_rule *rule;
6161 if (!hnae3_dev_fd_supported(hdev))
6164 memset(&new_tuples, 0, sizeof(new_tuples));
6165 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6167 spin_lock_bh(&hdev->fd_rule_lock);
6169 /* when there is already fd rule existed add by user,
6170 * arfs should not work
6172 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6173 spin_unlock_bh(&hdev->fd_rule_lock);
6178 /* check is there flow director filter existed for this flow,
6179 * if not, create a new filter for it;
6180 * if filter exist with different queue id, modify the filter;
6181 * if filter exist with same queue id, do nothing
6183 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6185 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6186 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6187 spin_unlock_bh(&hdev->fd_rule_lock);
6192 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6194 spin_unlock_bh(&hdev->fd_rule_lock);
6199 set_bit(bit_id, hdev->fd_bmap);
6200 rule->location = bit_id;
6201 rule->flow_id = flow_id;
6202 rule->queue_id = queue_id;
6203 hclge_fd_build_arfs_rule(&new_tuples, rule);
6204 ret = hclge_fd_config_rule(hdev, rule);
6206 spin_unlock_bh(&hdev->fd_rule_lock);
6211 return rule->location;
6214 spin_unlock_bh(&hdev->fd_rule_lock);
6216 if (rule->queue_id == queue_id)
6217 return rule->location;
6219 tmp_queue_id = rule->queue_id;
6220 rule->queue_id = queue_id;
6221 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6223 rule->queue_id = tmp_queue_id;
6227 return rule->location;
6230 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6232 #ifdef CONFIG_RFS_ACCEL
6233 struct hnae3_handle *handle = &hdev->vport[0].nic;
6234 struct hclge_fd_rule *rule;
6235 struct hlist_node *node;
6236 HLIST_HEAD(del_list);
6238 spin_lock_bh(&hdev->fd_rule_lock);
6239 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6240 spin_unlock_bh(&hdev->fd_rule_lock);
6243 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6244 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6245 rule->flow_id, rule->location)) {
6246 hlist_del_init(&rule->rule_node);
6247 hlist_add_head(&rule->rule_node, &del_list);
6248 hdev->hclge_fd_rule_num--;
6249 clear_bit(rule->location, hdev->fd_bmap);
6252 spin_unlock_bh(&hdev->fd_rule_lock);
6254 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6255 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6256 rule->location, NULL, false);
6262 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6264 #ifdef CONFIG_RFS_ACCEL
6265 struct hclge_vport *vport = hclge_get_vport(handle);
6266 struct hclge_dev *hdev = vport->back;
6268 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6269 hclge_del_all_fd_entries(handle, true);
6273 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6275 struct hclge_vport *vport = hclge_get_vport(handle);
6276 struct hclge_dev *hdev = vport->back;
6278 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6279 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6282 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6284 struct hclge_vport *vport = hclge_get_vport(handle);
6285 struct hclge_dev *hdev = vport->back;
6287 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6290 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6292 struct hclge_vport *vport = hclge_get_vport(handle);
6293 struct hclge_dev *hdev = vport->back;
6295 return hdev->rst_stats.hw_reset_done_cnt;
6298 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6300 struct hclge_vport *vport = hclge_get_vport(handle);
6301 struct hclge_dev *hdev = vport->back;
6304 hdev->fd_en = enable;
6305 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6307 hclge_del_all_fd_entries(handle, clear);
6309 hclge_restore_fd_entries(handle);
6312 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6314 struct hclge_desc desc;
6315 struct hclge_config_mac_mode_cmd *req =
6316 (struct hclge_config_mac_mode_cmd *)desc.data;
6320 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6323 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6324 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6325 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6326 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6327 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6328 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6329 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6330 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6331 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6332 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6335 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6337 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6339 dev_err(&hdev->pdev->dev,
6340 "mac enable fail, ret =%d.\n", ret);
6343 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6344 u8 switch_param, u8 param_mask)
6346 struct hclge_mac_vlan_switch_cmd *req;
6347 struct hclge_desc desc;
6351 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6352 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6353 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6355 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6356 req->func_id = cpu_to_le32(func_id);
6357 req->switch_param = switch_param;
6358 req->param_mask = param_mask;
6360 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6362 dev_err(&hdev->pdev->dev,
6363 "set mac vlan switch parameter fail, ret = %d\n", ret);
6367 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6370 #define HCLGE_PHY_LINK_STATUS_NUM 200
6372 struct phy_device *phydev = hdev->hw.mac.phydev;
6377 ret = phy_read_status(phydev);
6379 dev_err(&hdev->pdev->dev,
6380 "phy update link status fail, ret = %d\n", ret);
6384 if (phydev->link == link_ret)
6387 msleep(HCLGE_LINK_STATUS_MS);
6388 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6391 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6393 #define HCLGE_MAC_LINK_STATUS_NUM 100
6399 ret = hclge_get_mac_link_status(hdev);
6402 else if (ret == link_ret)
6405 msleep(HCLGE_LINK_STATUS_MS);
6406 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6410 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6413 #define HCLGE_LINK_STATUS_DOWN 0
6414 #define HCLGE_LINK_STATUS_UP 1
6418 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6421 hclge_phy_link_status_wait(hdev, link_ret);
6423 return hclge_mac_link_status_wait(hdev, link_ret);
6426 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6428 struct hclge_config_mac_mode_cmd *req;
6429 struct hclge_desc desc;
6433 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6434 /* 1 Read out the MAC mode config at first */
6435 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6436 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6438 dev_err(&hdev->pdev->dev,
6439 "mac loopback get fail, ret =%d.\n", ret);
6443 /* 2 Then setup the loopback flag */
6444 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6445 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6446 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6447 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6449 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6451 /* 3 Config mac work mode with loopback flag
6452 * and its original configure parameters
6454 hclge_cmd_reuse_desc(&desc, false);
6455 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6457 dev_err(&hdev->pdev->dev,
6458 "mac loopback set fail, ret =%d.\n", ret);
6462 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6463 enum hnae3_loop loop_mode)
6465 #define HCLGE_SERDES_RETRY_MS 10
6466 #define HCLGE_SERDES_RETRY_NUM 100
6468 struct hclge_serdes_lb_cmd *req;
6469 struct hclge_desc desc;
6473 req = (struct hclge_serdes_lb_cmd *)desc.data;
6474 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6476 switch (loop_mode) {
6477 case HNAE3_LOOP_SERIAL_SERDES:
6478 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6480 case HNAE3_LOOP_PARALLEL_SERDES:
6481 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6484 dev_err(&hdev->pdev->dev,
6485 "unsupported serdes loopback mode %d\n", loop_mode);
6490 req->enable = loop_mode_b;
6491 req->mask = loop_mode_b;
6493 req->mask = loop_mode_b;
6496 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6498 dev_err(&hdev->pdev->dev,
6499 "serdes loopback set fail, ret = %d\n", ret);
6504 msleep(HCLGE_SERDES_RETRY_MS);
6505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6507 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6509 dev_err(&hdev->pdev->dev,
6510 "serdes loopback get, ret = %d\n", ret);
6513 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6514 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6516 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6517 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6519 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6520 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6526 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6527 enum hnae3_loop loop_mode)
6531 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6535 hclge_cfg_mac_mode(hdev, en);
6537 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6539 dev_err(&hdev->pdev->dev,
6540 "serdes loopback config mac mode timeout\n");
6545 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6546 struct phy_device *phydev)
6550 if (!phydev->suspended) {
6551 ret = phy_suspend(phydev);
6556 ret = phy_resume(phydev);
6560 return phy_loopback(phydev, true);
6563 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6564 struct phy_device *phydev)
6568 ret = phy_loopback(phydev, false);
6572 return phy_suspend(phydev);
6575 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6577 struct phy_device *phydev = hdev->hw.mac.phydev;
6584 ret = hclge_enable_phy_loopback(hdev, phydev);
6586 ret = hclge_disable_phy_loopback(hdev, phydev);
6588 dev_err(&hdev->pdev->dev,
6589 "set phy loopback fail, ret = %d\n", ret);
6593 hclge_cfg_mac_mode(hdev, en);
6595 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6597 dev_err(&hdev->pdev->dev,
6598 "phy loopback config mac mode timeout\n");
6603 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6604 int stream_id, bool enable)
6606 struct hclge_desc desc;
6607 struct hclge_cfg_com_tqp_queue_cmd *req =
6608 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6611 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6612 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6613 req->stream_id = cpu_to_le16(stream_id);
6615 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6617 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6619 dev_err(&hdev->pdev->dev,
6620 "Tqp enable fail, status =%d.\n", ret);
6624 static int hclge_set_loopback(struct hnae3_handle *handle,
6625 enum hnae3_loop loop_mode, bool en)
6627 struct hclge_vport *vport = hclge_get_vport(handle);
6628 struct hnae3_knic_private_info *kinfo;
6629 struct hclge_dev *hdev = vport->back;
6632 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6633 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6634 * the same, the packets are looped back in the SSU. If SSU loopback
6635 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6637 if (hdev->pdev->revision >= 0x21) {
6638 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6640 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6641 HCLGE_SWITCH_ALW_LPBK_MASK);
6646 switch (loop_mode) {
6647 case HNAE3_LOOP_APP:
6648 ret = hclge_set_app_loopback(hdev, en);
6650 case HNAE3_LOOP_SERIAL_SERDES:
6651 case HNAE3_LOOP_PARALLEL_SERDES:
6652 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6654 case HNAE3_LOOP_PHY:
6655 ret = hclge_set_phy_loopback(hdev, en);
6659 dev_err(&hdev->pdev->dev,
6660 "loop_mode %d is not supported\n", loop_mode);
6667 kinfo = &vport->nic.kinfo;
6668 for (i = 0; i < kinfo->num_tqps; i++) {
6669 ret = hclge_tqp_enable(hdev, i, 0, en);
6677 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6681 ret = hclge_set_app_loopback(hdev, false);
6685 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6689 return hclge_cfg_serdes_loopback(hdev, false,
6690 HNAE3_LOOP_PARALLEL_SERDES);
6693 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6695 struct hclge_vport *vport = hclge_get_vport(handle);
6696 struct hnae3_knic_private_info *kinfo;
6697 struct hnae3_queue *queue;
6698 struct hclge_tqp *tqp;
6701 kinfo = &vport->nic.kinfo;
6702 for (i = 0; i < kinfo->num_tqps; i++) {
6703 queue = handle->kinfo.tqp[i];
6704 tqp = container_of(queue, struct hclge_tqp, q);
6705 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6709 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6711 struct hclge_vport *vport = hclge_get_vport(handle);
6712 struct hclge_dev *hdev = vport->back;
6715 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6717 /* Set the DOWN flag here to disable the service to be
6720 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6721 cancel_delayed_work_sync(&hdev->service_task);
6722 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6726 static int hclge_ae_start(struct hnae3_handle *handle)
6728 struct hclge_vport *vport = hclge_get_vport(handle);
6729 struct hclge_dev *hdev = vport->back;
6732 hclge_cfg_mac_mode(hdev, true);
6733 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6734 hdev->hw.mac.link = 0;
6736 /* reset tqp stats */
6737 hclge_reset_tqp_stats(handle);
6739 hclge_mac_start_phy(hdev);
6744 static void hclge_ae_stop(struct hnae3_handle *handle)
6746 struct hclge_vport *vport = hclge_get_vport(handle);
6747 struct hclge_dev *hdev = vport->back;
6750 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6752 hclge_clear_arfs_rules(handle);
6754 /* If it is not PF reset, the firmware will disable the MAC,
6755 * so it only need to stop phy here.
6757 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6758 hdev->reset_type != HNAE3_FUNC_RESET) {
6759 hclge_mac_stop_phy(hdev);
6760 hclge_update_link_status(hdev);
6764 for (i = 0; i < handle->kinfo.num_tqps; i++)
6765 hclge_reset_tqp(handle, i);
6767 hclge_config_mac_tnl_int(hdev, false);
6770 hclge_cfg_mac_mode(hdev, false);
6772 hclge_mac_stop_phy(hdev);
6774 /* reset tqp stats */
6775 hclge_reset_tqp_stats(handle);
6776 hclge_update_link_status(hdev);
6779 int hclge_vport_start(struct hclge_vport *vport)
6781 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6782 vport->last_active_jiffies = jiffies;
6786 void hclge_vport_stop(struct hclge_vport *vport)
6788 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6791 static int hclge_client_start(struct hnae3_handle *handle)
6793 struct hclge_vport *vport = hclge_get_vport(handle);
6795 return hclge_vport_start(vport);
6798 static void hclge_client_stop(struct hnae3_handle *handle)
6800 struct hclge_vport *vport = hclge_get_vport(handle);
6802 hclge_vport_stop(vport);
6805 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6806 u16 cmdq_resp, u8 resp_code,
6807 enum hclge_mac_vlan_tbl_opcode op)
6809 struct hclge_dev *hdev = vport->back;
6812 dev_err(&hdev->pdev->dev,
6813 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6818 if (op == HCLGE_MAC_VLAN_ADD) {
6819 if ((!resp_code) || (resp_code == 1)) {
6821 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6822 dev_err(&hdev->pdev->dev,
6823 "add mac addr failed for uc_overflow.\n");
6825 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6826 dev_err(&hdev->pdev->dev,
6827 "add mac addr failed for mc_overflow.\n");
6831 dev_err(&hdev->pdev->dev,
6832 "add mac addr failed for undefined, code=%u.\n",
6835 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6838 } else if (resp_code == 1) {
6839 dev_dbg(&hdev->pdev->dev,
6840 "remove mac addr failed for miss.\n");
6844 dev_err(&hdev->pdev->dev,
6845 "remove mac addr failed for undefined, code=%u.\n",
6848 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6851 } else if (resp_code == 1) {
6852 dev_dbg(&hdev->pdev->dev,
6853 "lookup mac addr failed for miss.\n");
6857 dev_err(&hdev->pdev->dev,
6858 "lookup mac addr failed for undefined, code=%u.\n",
6863 dev_err(&hdev->pdev->dev,
6864 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6869 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6871 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6873 unsigned int word_num;
6874 unsigned int bit_num;
6876 if (vfid > 255 || vfid < 0)
6879 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6880 word_num = vfid / 32;
6881 bit_num = vfid % 32;
6883 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6885 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6887 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6888 bit_num = vfid % 32;
6890 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6892 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6898 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6900 #define HCLGE_DESC_NUMBER 3
6901 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6904 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6905 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6906 if (desc[i].data[j])
6912 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6913 const u8 *addr, bool is_mc)
6915 const unsigned char *mac_addr = addr;
6916 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6917 (mac_addr[0]) | (mac_addr[1] << 8);
6918 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6920 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6922 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6923 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6926 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6927 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6930 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6931 struct hclge_mac_vlan_tbl_entry_cmd *req)
6933 struct hclge_dev *hdev = vport->back;
6934 struct hclge_desc desc;
6939 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6941 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6943 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6945 dev_err(&hdev->pdev->dev,
6946 "del mac addr failed for cmd_send, ret =%d.\n",
6950 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6951 retval = le16_to_cpu(desc.retval);
6953 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6954 HCLGE_MAC_VLAN_REMOVE);
6957 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6958 struct hclge_mac_vlan_tbl_entry_cmd *req,
6959 struct hclge_desc *desc,
6962 struct hclge_dev *hdev = vport->back;
6967 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6969 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6970 memcpy(desc[0].data,
6972 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6973 hclge_cmd_setup_basic_desc(&desc[1],
6974 HCLGE_OPC_MAC_VLAN_ADD,
6976 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6977 hclge_cmd_setup_basic_desc(&desc[2],
6978 HCLGE_OPC_MAC_VLAN_ADD,
6980 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6982 memcpy(desc[0].data,
6984 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6985 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6988 dev_err(&hdev->pdev->dev,
6989 "lookup mac addr failed for cmd_send, ret =%d.\n",
6993 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6994 retval = le16_to_cpu(desc[0].retval);
6996 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6997 HCLGE_MAC_VLAN_LKUP);
7000 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7001 struct hclge_mac_vlan_tbl_entry_cmd *req,
7002 struct hclge_desc *mc_desc)
7004 struct hclge_dev *hdev = vport->back;
7011 struct hclge_desc desc;
7013 hclge_cmd_setup_basic_desc(&desc,
7014 HCLGE_OPC_MAC_VLAN_ADD,
7016 memcpy(desc.data, req,
7017 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7018 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7019 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7020 retval = le16_to_cpu(desc.retval);
7022 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7024 HCLGE_MAC_VLAN_ADD);
7026 hclge_cmd_reuse_desc(&mc_desc[0], false);
7027 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7028 hclge_cmd_reuse_desc(&mc_desc[1], false);
7029 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7030 hclge_cmd_reuse_desc(&mc_desc[2], false);
7031 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7032 memcpy(mc_desc[0].data, req,
7033 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7034 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7035 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7036 retval = le16_to_cpu(mc_desc[0].retval);
7038 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7040 HCLGE_MAC_VLAN_ADD);
7044 dev_err(&hdev->pdev->dev,
7045 "add mac addr failed for cmd_send, ret =%d.\n",
7053 static int hclge_init_umv_space(struct hclge_dev *hdev)
7055 u16 allocated_size = 0;
7058 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7063 if (allocated_size < hdev->wanted_umv_size)
7064 dev_warn(&hdev->pdev->dev,
7065 "Alloc umv space failed, want %u, get %u\n",
7066 hdev->wanted_umv_size, allocated_size);
7068 mutex_init(&hdev->umv_mutex);
7069 hdev->max_umv_size = allocated_size;
7070 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7071 * preserve some unicast mac vlan table entries shared by pf
7074 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7075 hdev->share_umv_size = hdev->priv_umv_size +
7076 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7081 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7085 if (hdev->max_umv_size > 0) {
7086 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7090 hdev->max_umv_size = 0;
7092 mutex_destroy(&hdev->umv_mutex);
7097 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7098 u16 *allocated_size, bool is_alloc)
7100 struct hclge_umv_spc_alc_cmd *req;
7101 struct hclge_desc desc;
7104 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7107 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7109 req->space_size = cpu_to_le32(space_size);
7111 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7113 dev_err(&hdev->pdev->dev,
7114 "%s umv space failed for cmd_send, ret =%d\n",
7115 is_alloc ? "allocate" : "free", ret);
7119 if (is_alloc && allocated_size)
7120 *allocated_size = le32_to_cpu(desc.data[1]);
7125 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7127 struct hclge_vport *vport;
7130 for (i = 0; i < hdev->num_alloc_vport; i++) {
7131 vport = &hdev->vport[i];
7132 vport->used_umv_num = 0;
7135 mutex_lock(&hdev->umv_mutex);
7136 hdev->share_umv_size = hdev->priv_umv_size +
7137 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7138 mutex_unlock(&hdev->umv_mutex);
7141 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7143 struct hclge_dev *hdev = vport->back;
7146 mutex_lock(&hdev->umv_mutex);
7147 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7148 hdev->share_umv_size == 0);
7149 mutex_unlock(&hdev->umv_mutex);
7154 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7156 struct hclge_dev *hdev = vport->back;
7158 mutex_lock(&hdev->umv_mutex);
7160 if (vport->used_umv_num > hdev->priv_umv_size)
7161 hdev->share_umv_size++;
7163 if (vport->used_umv_num > 0)
7164 vport->used_umv_num--;
7166 if (vport->used_umv_num >= hdev->priv_umv_size &&
7167 hdev->share_umv_size > 0)
7168 hdev->share_umv_size--;
7169 vport->used_umv_num++;
7171 mutex_unlock(&hdev->umv_mutex);
7174 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7175 const unsigned char *addr)
7177 struct hclge_vport *vport = hclge_get_vport(handle);
7179 return hclge_add_uc_addr_common(vport, addr);
7182 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7183 const unsigned char *addr)
7185 struct hclge_dev *hdev = vport->back;
7186 struct hclge_mac_vlan_tbl_entry_cmd req;
7187 struct hclge_desc desc;
7188 u16 egress_port = 0;
7191 /* mac addr check */
7192 if (is_zero_ether_addr(addr) ||
7193 is_broadcast_ether_addr(addr) ||
7194 is_multicast_ether_addr(addr)) {
7195 dev_err(&hdev->pdev->dev,
7196 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7197 addr, is_zero_ether_addr(addr),
7198 is_broadcast_ether_addr(addr),
7199 is_multicast_ether_addr(addr));
7203 memset(&req, 0, sizeof(req));
7205 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7206 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7208 req.egress_port = cpu_to_le16(egress_port);
7210 hclge_prepare_mac_addr(&req, addr, false);
7212 /* Lookup the mac address in the mac_vlan table, and add
7213 * it if the entry is inexistent. Repeated unicast entry
7214 * is not allowed in the mac vlan table.
7216 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7217 if (ret == -ENOENT) {
7218 if (!hclge_is_umv_space_full(vport)) {
7219 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7221 hclge_update_umv_space(vport, false);
7225 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7226 hdev->priv_umv_size);
7231 /* check if we just hit the duplicate */
7233 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7234 vport->vport_id, addr);
7238 dev_err(&hdev->pdev->dev,
7239 "PF failed to add unicast entry(%pM) in the MAC table\n",
7245 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7246 const unsigned char *addr)
7248 struct hclge_vport *vport = hclge_get_vport(handle);
7250 return hclge_rm_uc_addr_common(vport, addr);
7253 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7254 const unsigned char *addr)
7256 struct hclge_dev *hdev = vport->back;
7257 struct hclge_mac_vlan_tbl_entry_cmd req;
7260 /* mac addr check */
7261 if (is_zero_ether_addr(addr) ||
7262 is_broadcast_ether_addr(addr) ||
7263 is_multicast_ether_addr(addr)) {
7264 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7269 memset(&req, 0, sizeof(req));
7270 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7271 hclge_prepare_mac_addr(&req, addr, false);
7272 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7274 hclge_update_umv_space(vport, true);
7279 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7280 const unsigned char *addr)
7282 struct hclge_vport *vport = hclge_get_vport(handle);
7284 return hclge_add_mc_addr_common(vport, addr);
7287 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7288 const unsigned char *addr)
7290 struct hclge_dev *hdev = vport->back;
7291 struct hclge_mac_vlan_tbl_entry_cmd req;
7292 struct hclge_desc desc[3];
7295 /* mac addr check */
7296 if (!is_multicast_ether_addr(addr)) {
7297 dev_err(&hdev->pdev->dev,
7298 "Add mc mac err! invalid mac:%pM.\n",
7302 memset(&req, 0, sizeof(req));
7303 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7304 hclge_prepare_mac_addr(&req, addr, true);
7305 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7307 /* This mac addr do not exist, add new entry for it */
7308 memset(desc[0].data, 0, sizeof(desc[0].data));
7309 memset(desc[1].data, 0, sizeof(desc[0].data));
7310 memset(desc[2].data, 0, sizeof(desc[0].data));
7312 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7315 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7317 if (status == -ENOSPC)
7318 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7323 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7324 const unsigned char *addr)
7326 struct hclge_vport *vport = hclge_get_vport(handle);
7328 return hclge_rm_mc_addr_common(vport, addr);
7331 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7332 const unsigned char *addr)
7334 struct hclge_dev *hdev = vport->back;
7335 struct hclge_mac_vlan_tbl_entry_cmd req;
7336 enum hclge_cmd_status status;
7337 struct hclge_desc desc[3];
7339 /* mac addr check */
7340 if (!is_multicast_ether_addr(addr)) {
7341 dev_dbg(&hdev->pdev->dev,
7342 "Remove mc mac err! invalid mac:%pM.\n",
7347 memset(&req, 0, sizeof(req));
7348 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7349 hclge_prepare_mac_addr(&req, addr, true);
7350 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7352 /* This mac addr exist, remove this handle's VFID for it */
7353 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7357 if (hclge_is_all_function_id_zero(desc))
7358 /* All the vfid is zero, so need to delete this entry */
7359 status = hclge_remove_mac_vlan_tbl(vport, &req);
7361 /* Not all the vfid is zero, update the vfid */
7362 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7365 /* Maybe this mac address is in mta table, but it cannot be
7366 * deleted here because an entry of mta represents an address
7367 * range rather than a specific address. the delete action to
7368 * all entries will take effect in update_mta_status called by
7369 * hns3_nic_set_rx_mode.
7377 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7378 enum HCLGE_MAC_ADDR_TYPE mac_type)
7380 struct hclge_vport_mac_addr_cfg *mac_cfg;
7381 struct list_head *list;
7383 if (!vport->vport_id)
7386 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7390 mac_cfg->hd_tbl_status = true;
7391 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7393 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7394 &vport->uc_mac_list : &vport->mc_mac_list;
7396 list_add_tail(&mac_cfg->node, list);
7399 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7401 enum HCLGE_MAC_ADDR_TYPE mac_type)
7403 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7404 struct list_head *list;
7405 bool uc_flag, mc_flag;
7407 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7408 &vport->uc_mac_list : &vport->mc_mac_list;
7410 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7411 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7413 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7414 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7415 if (uc_flag && mac_cfg->hd_tbl_status)
7416 hclge_rm_uc_addr_common(vport, mac_addr);
7418 if (mc_flag && mac_cfg->hd_tbl_status)
7419 hclge_rm_mc_addr_common(vport, mac_addr);
7421 list_del(&mac_cfg->node);
7428 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7429 enum HCLGE_MAC_ADDR_TYPE mac_type)
7431 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7432 struct list_head *list;
7434 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7435 &vport->uc_mac_list : &vport->mc_mac_list;
7437 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7438 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7439 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7441 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7442 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7444 mac_cfg->hd_tbl_status = false;
7446 list_del(&mac_cfg->node);
7452 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7454 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7455 struct hclge_vport *vport;
7458 mutex_lock(&hdev->vport_cfg_mutex);
7459 for (i = 0; i < hdev->num_alloc_vport; i++) {
7460 vport = &hdev->vport[i];
7461 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7462 list_del(&mac->node);
7466 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7467 list_del(&mac->node);
7471 mutex_unlock(&hdev->vport_cfg_mutex);
7474 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7475 u16 cmdq_resp, u8 resp_code)
7477 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7478 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7479 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7480 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7485 dev_err(&hdev->pdev->dev,
7486 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7491 switch (resp_code) {
7492 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7493 case HCLGE_ETHERTYPE_ALREADY_ADD:
7496 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7497 dev_err(&hdev->pdev->dev,
7498 "add mac ethertype failed for manager table overflow.\n");
7499 return_status = -EIO;
7501 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7502 dev_err(&hdev->pdev->dev,
7503 "add mac ethertype failed for key conflict.\n");
7504 return_status = -EIO;
7507 dev_err(&hdev->pdev->dev,
7508 "add mac ethertype failed for undefined, code=%u.\n",
7510 return_status = -EIO;
7513 return return_status;
7516 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7519 struct hclge_mac_vlan_tbl_entry_cmd req;
7520 struct hclge_dev *hdev = vport->back;
7521 struct hclge_desc desc;
7522 u16 egress_port = 0;
7525 if (is_zero_ether_addr(mac_addr))
7528 memset(&req, 0, sizeof(req));
7529 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7530 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7531 req.egress_port = cpu_to_le16(egress_port);
7532 hclge_prepare_mac_addr(&req, mac_addr, false);
7534 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7537 vf_idx += HCLGE_VF_VPORT_START_NUM;
7538 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7540 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7546 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7549 struct hclge_vport *vport = hclge_get_vport(handle);
7550 struct hclge_dev *hdev = vport->back;
7552 vport = hclge_get_vf_vport(hdev, vf);
7556 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7557 dev_info(&hdev->pdev->dev,
7558 "Specified MAC(=%pM) is same as before, no change committed!\n",
7563 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7564 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7569 ether_addr_copy(vport->vf_info.mac, mac_addr);
7570 dev_info(&hdev->pdev->dev,
7571 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7574 return hclge_inform_reset_assert_to_vf(vport);
7577 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7578 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7580 struct hclge_desc desc;
7585 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7586 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7588 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7590 dev_err(&hdev->pdev->dev,
7591 "add mac ethertype failed for cmd_send, ret =%d.\n",
7596 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7597 retval = le16_to_cpu(desc.retval);
7599 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7602 static int init_mgr_tbl(struct hclge_dev *hdev)
7607 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7608 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7610 dev_err(&hdev->pdev->dev,
7611 "add mac ethertype failed, ret =%d.\n",
7620 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7622 struct hclge_vport *vport = hclge_get_vport(handle);
7623 struct hclge_dev *hdev = vport->back;
7625 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7628 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7631 const unsigned char *new_addr = (const unsigned char *)p;
7632 struct hclge_vport *vport = hclge_get_vport(handle);
7633 struct hclge_dev *hdev = vport->back;
7636 /* mac addr check */
7637 if (is_zero_ether_addr(new_addr) ||
7638 is_broadcast_ether_addr(new_addr) ||
7639 is_multicast_ether_addr(new_addr)) {
7640 dev_err(&hdev->pdev->dev,
7641 "Change uc mac err! invalid mac:%pM.\n",
7646 if ((!is_first || is_kdump_kernel()) &&
7647 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7648 dev_warn(&hdev->pdev->dev,
7649 "remove old uc mac address fail.\n");
7651 ret = hclge_add_uc_addr(handle, new_addr);
7653 dev_err(&hdev->pdev->dev,
7654 "add uc mac address fail, ret =%d.\n",
7658 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7659 dev_err(&hdev->pdev->dev,
7660 "restore uc mac address fail.\n");
7665 ret = hclge_pause_addr_cfg(hdev, new_addr);
7667 dev_err(&hdev->pdev->dev,
7668 "configure mac pause address fail, ret =%d.\n",
7673 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7678 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7681 struct hclge_vport *vport = hclge_get_vport(handle);
7682 struct hclge_dev *hdev = vport->back;
7684 if (!hdev->hw.mac.phydev)
7687 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7690 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7691 u8 fe_type, bool filter_en, u8 vf_id)
7693 struct hclge_vlan_filter_ctrl_cmd *req;
7694 struct hclge_desc desc;
7697 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7699 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7700 req->vlan_type = vlan_type;
7701 req->vlan_fe = filter_en ? fe_type : 0;
7704 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7706 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7712 #define HCLGE_FILTER_TYPE_VF 0
7713 #define HCLGE_FILTER_TYPE_PORT 1
7714 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7715 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7716 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7717 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7718 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7719 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7720 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7721 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7722 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7724 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7726 struct hclge_vport *vport = hclge_get_vport(handle);
7727 struct hclge_dev *hdev = vport->back;
7729 if (hdev->pdev->revision >= 0x21) {
7730 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7731 HCLGE_FILTER_FE_EGRESS, enable, 0);
7732 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7733 HCLGE_FILTER_FE_INGRESS, enable, 0);
7735 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7736 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7740 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7742 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7745 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7746 bool is_kill, u16 vlan,
7749 struct hclge_vport *vport = &hdev->vport[vfid];
7750 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7751 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7752 struct hclge_desc desc[2];
7757 /* if vf vlan table is full, firmware will close vf vlan filter, it
7758 * is unable and unnecessary to add new vlan id to vf vlan filter.
7759 * If spoof check is enable, and vf vlan is full, it shouldn't add
7760 * new vlan, because tx packets with these vlan id will be dropped.
7762 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7763 if (vport->vf_info.spoofchk && vlan) {
7764 dev_err(&hdev->pdev->dev,
7765 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7771 hclge_cmd_setup_basic_desc(&desc[0],
7772 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7773 hclge_cmd_setup_basic_desc(&desc[1],
7774 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7776 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7778 vf_byte_off = vfid / 8;
7779 vf_byte_val = 1 << (vfid % 8);
7781 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7782 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7784 req0->vlan_id = cpu_to_le16(vlan);
7785 req0->vlan_cfg = is_kill;
7787 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7788 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7790 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7792 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7794 dev_err(&hdev->pdev->dev,
7795 "Send vf vlan command fail, ret =%d.\n",
7801 #define HCLGE_VF_VLAN_NO_ENTRY 2
7802 if (!req0->resp_code || req0->resp_code == 1)
7805 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7806 set_bit(vfid, hdev->vf_vlan_full);
7807 dev_warn(&hdev->pdev->dev,
7808 "vf vlan table is full, vf vlan filter is disabled\n");
7812 dev_err(&hdev->pdev->dev,
7813 "Add vf vlan filter fail, ret =%u.\n",
7816 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7817 if (!req0->resp_code)
7820 /* vf vlan filter is disabled when vf vlan table is full,
7821 * then new vlan id will not be added into vf vlan table.
7822 * Just return 0 without warning, avoid massive verbose
7823 * print logs when unload.
7825 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7828 dev_err(&hdev->pdev->dev,
7829 "Kill vf vlan filter fail, ret =%u.\n",
7836 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7837 u16 vlan_id, bool is_kill)
7839 struct hclge_vlan_filter_pf_cfg_cmd *req;
7840 struct hclge_desc desc;
7841 u8 vlan_offset_byte_val;
7842 u8 vlan_offset_byte;
7846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7848 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7849 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7850 HCLGE_VLAN_BYTE_SIZE;
7851 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7853 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7854 req->vlan_offset = vlan_offset_160;
7855 req->vlan_cfg = is_kill;
7856 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7860 dev_err(&hdev->pdev->dev,
7861 "port vlan command, send fail, ret =%d.\n", ret);
7865 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7866 u16 vport_id, u16 vlan_id,
7869 u16 vport_idx, vport_num = 0;
7872 if (is_kill && !vlan_id)
7875 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7878 dev_err(&hdev->pdev->dev,
7879 "Set %u vport vlan filter config fail, ret =%d.\n",
7884 /* vlan 0 may be added twice when 8021q module is enabled */
7885 if (!is_kill && !vlan_id &&
7886 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7889 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7890 dev_err(&hdev->pdev->dev,
7891 "Add port vlan failed, vport %u is already in vlan %u\n",
7897 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7898 dev_err(&hdev->pdev->dev,
7899 "Delete port vlan failed, vport %u is not in vlan %u\n",
7904 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7907 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7908 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7914 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7916 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7917 struct hclge_vport_vtag_tx_cfg_cmd *req;
7918 struct hclge_dev *hdev = vport->back;
7919 struct hclge_desc desc;
7923 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7925 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7926 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7927 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7928 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7929 vcfg->accept_tag1 ? 1 : 0);
7930 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7931 vcfg->accept_untag1 ? 1 : 0);
7932 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7933 vcfg->accept_tag2 ? 1 : 0);
7934 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7935 vcfg->accept_untag2 ? 1 : 0);
7936 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7937 vcfg->insert_tag1_en ? 1 : 0);
7938 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7939 vcfg->insert_tag2_en ? 1 : 0);
7940 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7942 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7943 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7944 HCLGE_VF_NUM_PER_BYTE;
7945 req->vf_bitmap[bmap_index] =
7946 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7948 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7950 dev_err(&hdev->pdev->dev,
7951 "Send port txvlan cfg command fail, ret =%d\n",
7957 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7959 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7960 struct hclge_vport_vtag_rx_cfg_cmd *req;
7961 struct hclge_dev *hdev = vport->back;
7962 struct hclge_desc desc;
7966 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7968 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7969 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7970 vcfg->strip_tag1_en ? 1 : 0);
7971 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7972 vcfg->strip_tag2_en ? 1 : 0);
7973 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7974 vcfg->vlan1_vlan_prionly ? 1 : 0);
7975 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7976 vcfg->vlan2_vlan_prionly ? 1 : 0);
7978 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7979 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7980 HCLGE_VF_NUM_PER_BYTE;
7981 req->vf_bitmap[bmap_index] =
7982 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7984 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7986 dev_err(&hdev->pdev->dev,
7987 "Send port rxvlan cfg command fail, ret =%d\n",
7993 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7994 u16 port_base_vlan_state,
7999 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8000 vport->txvlan_cfg.accept_tag1 = true;
8001 vport->txvlan_cfg.insert_tag1_en = false;
8002 vport->txvlan_cfg.default_tag1 = 0;
8004 vport->txvlan_cfg.accept_tag1 = false;
8005 vport->txvlan_cfg.insert_tag1_en = true;
8006 vport->txvlan_cfg.default_tag1 = vlan_tag;
8009 vport->txvlan_cfg.accept_untag1 = true;
8011 /* accept_tag2 and accept_untag2 are not supported on
8012 * pdev revision(0x20), new revision support them,
8013 * this two fields can not be configured by user.
8015 vport->txvlan_cfg.accept_tag2 = true;
8016 vport->txvlan_cfg.accept_untag2 = true;
8017 vport->txvlan_cfg.insert_tag2_en = false;
8018 vport->txvlan_cfg.default_tag2 = 0;
8020 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8021 vport->rxvlan_cfg.strip_tag1_en = false;
8022 vport->rxvlan_cfg.strip_tag2_en =
8023 vport->rxvlan_cfg.rx_vlan_offload_en;
8025 vport->rxvlan_cfg.strip_tag1_en =
8026 vport->rxvlan_cfg.rx_vlan_offload_en;
8027 vport->rxvlan_cfg.strip_tag2_en = true;
8029 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8030 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8032 ret = hclge_set_vlan_tx_offload_cfg(vport);
8036 return hclge_set_vlan_rx_offload_cfg(vport);
8039 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8041 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8042 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8043 struct hclge_desc desc;
8046 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8047 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8048 rx_req->ot_fst_vlan_type =
8049 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8050 rx_req->ot_sec_vlan_type =
8051 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8052 rx_req->in_fst_vlan_type =
8053 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8054 rx_req->in_sec_vlan_type =
8055 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8057 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8059 dev_err(&hdev->pdev->dev,
8060 "Send rxvlan protocol type command fail, ret =%d\n",
8065 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8067 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8068 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8069 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8071 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8073 dev_err(&hdev->pdev->dev,
8074 "Send txvlan protocol type command fail, ret =%d\n",
8080 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8082 #define HCLGE_DEF_VLAN_TYPE 0x8100
8084 struct hnae3_handle *handle = &hdev->vport[0].nic;
8085 struct hclge_vport *vport;
8089 if (hdev->pdev->revision >= 0x21) {
8090 /* for revision 0x21, vf vlan filter is per function */
8091 for (i = 0; i < hdev->num_alloc_vport; i++) {
8092 vport = &hdev->vport[i];
8093 ret = hclge_set_vlan_filter_ctrl(hdev,
8094 HCLGE_FILTER_TYPE_VF,
8095 HCLGE_FILTER_FE_EGRESS,
8102 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8103 HCLGE_FILTER_FE_INGRESS, true,
8108 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8109 HCLGE_FILTER_FE_EGRESS_V1_B,
8115 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8117 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8118 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8119 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8120 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8121 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8122 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8124 ret = hclge_set_vlan_protocol_type(hdev);
8128 for (i = 0; i < hdev->num_alloc_vport; i++) {
8131 vport = &hdev->vport[i];
8132 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8134 ret = hclge_vlan_offload_cfg(vport,
8135 vport->port_base_vlan_cfg.state,
8141 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8144 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8147 struct hclge_vport_vlan_cfg *vlan;
8149 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8153 vlan->hd_tbl_status = writen_to_tbl;
8154 vlan->vlan_id = vlan_id;
8156 list_add_tail(&vlan->node, &vport->vlan_list);
8159 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8161 struct hclge_vport_vlan_cfg *vlan, *tmp;
8162 struct hclge_dev *hdev = vport->back;
8165 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8166 if (!vlan->hd_tbl_status) {
8167 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8169 vlan->vlan_id, false);
8171 dev_err(&hdev->pdev->dev,
8172 "restore vport vlan list failed, ret=%d\n",
8177 vlan->hd_tbl_status = true;
8183 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8186 struct hclge_vport_vlan_cfg *vlan, *tmp;
8187 struct hclge_dev *hdev = vport->back;
8189 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8190 if (vlan->vlan_id == vlan_id) {
8191 if (is_write_tbl && vlan->hd_tbl_status)
8192 hclge_set_vlan_filter_hw(hdev,
8198 list_del(&vlan->node);
8205 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8207 struct hclge_vport_vlan_cfg *vlan, *tmp;
8208 struct hclge_dev *hdev = vport->back;
8210 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8211 if (vlan->hd_tbl_status)
8212 hclge_set_vlan_filter_hw(hdev,
8218 vlan->hd_tbl_status = false;
8220 list_del(&vlan->node);
8226 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8228 struct hclge_vport_vlan_cfg *vlan, *tmp;
8229 struct hclge_vport *vport;
8232 mutex_lock(&hdev->vport_cfg_mutex);
8233 for (i = 0; i < hdev->num_alloc_vport; i++) {
8234 vport = &hdev->vport[i];
8235 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8236 list_del(&vlan->node);
8240 mutex_unlock(&hdev->vport_cfg_mutex);
8243 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8245 struct hclge_vport *vport = hclge_get_vport(handle);
8246 struct hclge_vport_vlan_cfg *vlan, *tmp;
8247 struct hclge_dev *hdev = vport->back;
8252 mutex_lock(&hdev->vport_cfg_mutex);
8253 for (i = 0; i < hdev->num_alloc_vport; i++) {
8254 vport = &hdev->vport[i];
8255 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8256 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8257 state = vport->port_base_vlan_cfg.state;
8259 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8260 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8261 vport->vport_id, vlan_id,
8266 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8269 if (!vlan->hd_tbl_status)
8271 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8273 vlan->vlan_id, false);
8279 mutex_unlock(&hdev->vport_cfg_mutex);
8282 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8284 struct hclge_vport *vport = hclge_get_vport(handle);
8286 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8287 vport->rxvlan_cfg.strip_tag1_en = false;
8288 vport->rxvlan_cfg.strip_tag2_en = enable;
8290 vport->rxvlan_cfg.strip_tag1_en = enable;
8291 vport->rxvlan_cfg.strip_tag2_en = true;
8293 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8294 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8295 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8297 return hclge_set_vlan_rx_offload_cfg(vport);
8300 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8301 u16 port_base_vlan_state,
8302 struct hclge_vlan_info *new_info,
8303 struct hclge_vlan_info *old_info)
8305 struct hclge_dev *hdev = vport->back;
8308 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8309 hclge_rm_vport_all_vlan_table(vport, false);
8310 return hclge_set_vlan_filter_hw(hdev,
8311 htons(new_info->vlan_proto),
8317 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8318 vport->vport_id, old_info->vlan_tag,
8323 return hclge_add_vport_all_vlan_table(vport);
8326 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8327 struct hclge_vlan_info *vlan_info)
8329 struct hnae3_handle *nic = &vport->nic;
8330 struct hclge_vlan_info *old_vlan_info;
8331 struct hclge_dev *hdev = vport->back;
8334 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8336 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8340 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8341 /* add new VLAN tag */
8342 ret = hclge_set_vlan_filter_hw(hdev,
8343 htons(vlan_info->vlan_proto),
8345 vlan_info->vlan_tag,
8350 /* remove old VLAN tag */
8351 ret = hclge_set_vlan_filter_hw(hdev,
8352 htons(old_vlan_info->vlan_proto),
8354 old_vlan_info->vlan_tag,
8362 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8367 /* update state only when disable/enable port based VLAN */
8368 vport->port_base_vlan_cfg.state = state;
8369 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8370 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8372 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8375 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8376 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8377 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8382 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8383 enum hnae3_port_base_vlan_state state,
8386 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8388 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8390 return HNAE3_PORT_BASE_VLAN_ENABLE;
8393 return HNAE3_PORT_BASE_VLAN_DISABLE;
8394 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8395 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8397 return HNAE3_PORT_BASE_VLAN_MODIFY;
8401 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8402 u16 vlan, u8 qos, __be16 proto)
8404 struct hclge_vport *vport = hclge_get_vport(handle);
8405 struct hclge_dev *hdev = vport->back;
8406 struct hclge_vlan_info vlan_info;
8410 if (hdev->pdev->revision == 0x20)
8413 /* qos is a 3 bits value, so can not be bigger than 7 */
8414 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
8416 if (proto != htons(ETH_P_8021Q))
8417 return -EPROTONOSUPPORT;
8419 vport = &hdev->vport[vfid];
8420 state = hclge_get_port_base_vlan_state(vport,
8421 vport->port_base_vlan_cfg.state,
8423 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8426 vlan_info.vlan_tag = vlan;
8427 vlan_info.qos = qos;
8428 vlan_info.vlan_proto = ntohs(proto);
8430 /* update port based VLAN for PF */
8432 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8433 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8434 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8439 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8440 return hclge_update_port_base_vlan_cfg(vport, state,
8443 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8451 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8452 u16 vlan_id, bool is_kill)
8454 struct hclge_vport *vport = hclge_get_vport(handle);
8455 struct hclge_dev *hdev = vport->back;
8456 bool writen_to_tbl = false;
8459 /* When device is resetting, firmware is unable to handle
8460 * mailbox. Just record the vlan id, and remove it after
8463 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8464 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8468 /* when port base vlan enabled, we use port base vlan as the vlan
8469 * filter entry. In this case, we don't update vlan filter table
8470 * when user add new vlan or remove exist vlan, just update the vport
8471 * vlan list. The vlan id in vlan list will be writen in vlan filter
8472 * table until port base vlan disabled
8474 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8475 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8477 writen_to_tbl = true;
8482 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8484 hclge_add_vport_vlan_table(vport, vlan_id,
8486 } else if (is_kill) {
8487 /* when remove hw vlan filter failed, record the vlan id,
8488 * and try to remove it from hw later, to be consistence
8491 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8496 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8498 #define HCLGE_MAX_SYNC_COUNT 60
8500 int i, ret, sync_cnt = 0;
8503 /* start from vport 1 for PF is always alive */
8504 for (i = 0; i < hdev->num_alloc_vport; i++) {
8505 struct hclge_vport *vport = &hdev->vport[i];
8507 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8509 while (vlan_id != VLAN_N_VID) {
8510 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8511 vport->vport_id, vlan_id,
8513 if (ret && ret != -EINVAL)
8516 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8517 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8520 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8523 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8529 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8531 struct hclge_config_max_frm_size_cmd *req;
8532 struct hclge_desc desc;
8534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8536 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8537 req->max_frm_size = cpu_to_le16(new_mps);
8538 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8540 return hclge_cmd_send(&hdev->hw, &desc, 1);
8543 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8545 struct hclge_vport *vport = hclge_get_vport(handle);
8547 return hclge_set_vport_mtu(vport, new_mtu);
8550 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8552 struct hclge_dev *hdev = vport->back;
8553 int i, max_frm_size, ret;
8555 /* HW supprt 2 layer vlan */
8556 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8557 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8558 max_frm_size > HCLGE_MAC_MAX_FRAME)
8561 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8562 mutex_lock(&hdev->vport_lock);
8563 /* VF's mps must fit within hdev->mps */
8564 if (vport->vport_id && max_frm_size > hdev->mps) {
8565 mutex_unlock(&hdev->vport_lock);
8567 } else if (vport->vport_id) {
8568 vport->mps = max_frm_size;
8569 mutex_unlock(&hdev->vport_lock);
8573 /* PF's mps must be greater then VF's mps */
8574 for (i = 1; i < hdev->num_alloc_vport; i++)
8575 if (max_frm_size < hdev->vport[i].mps) {
8576 mutex_unlock(&hdev->vport_lock);
8580 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8582 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8584 dev_err(&hdev->pdev->dev,
8585 "Change mtu fail, ret =%d\n", ret);
8589 hdev->mps = max_frm_size;
8590 vport->mps = max_frm_size;
8592 ret = hclge_buffer_alloc(hdev);
8594 dev_err(&hdev->pdev->dev,
8595 "Allocate buffer fail, ret =%d\n", ret);
8598 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8599 mutex_unlock(&hdev->vport_lock);
8603 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8606 struct hclge_reset_tqp_queue_cmd *req;
8607 struct hclge_desc desc;
8610 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8612 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8613 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8615 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8617 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8619 dev_err(&hdev->pdev->dev,
8620 "Send tqp reset cmd error, status =%d\n", ret);
8627 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8629 struct hclge_reset_tqp_queue_cmd *req;
8630 struct hclge_desc desc;
8633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8635 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8636 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8638 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8640 dev_err(&hdev->pdev->dev,
8641 "Get reset status error, status =%d\n", ret);
8645 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8648 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8650 struct hnae3_queue *queue;
8651 struct hclge_tqp *tqp;
8653 queue = handle->kinfo.tqp[queue_id];
8654 tqp = container_of(queue, struct hclge_tqp, q);
8659 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8661 struct hclge_vport *vport = hclge_get_vport(handle);
8662 struct hclge_dev *hdev = vport->back;
8663 int reset_try_times = 0;
8668 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8670 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8672 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8676 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8678 dev_err(&hdev->pdev->dev,
8679 "Send reset tqp cmd fail, ret = %d\n", ret);
8683 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8684 reset_status = hclge_get_reset_status(hdev, queue_gid);
8688 /* Wait for tqp hw reset */
8689 usleep_range(1000, 1200);
8692 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8693 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8697 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8699 dev_err(&hdev->pdev->dev,
8700 "Deassert the soft reset fail, ret = %d\n", ret);
8705 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8707 struct hclge_dev *hdev = vport->back;
8708 int reset_try_times = 0;
8713 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8715 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8717 dev_warn(&hdev->pdev->dev,
8718 "Send reset tqp cmd fail, ret = %d\n", ret);
8722 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8723 reset_status = hclge_get_reset_status(hdev, queue_gid);
8727 /* Wait for tqp hw reset */
8728 usleep_range(1000, 1200);
8731 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8732 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8736 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8738 dev_warn(&hdev->pdev->dev,
8739 "Deassert the soft reset fail, ret = %d\n", ret);
8742 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8744 struct hclge_vport *vport = hclge_get_vport(handle);
8745 struct hclge_dev *hdev = vport->back;
8747 return hdev->fw_version;
8750 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8752 struct phy_device *phydev = hdev->hw.mac.phydev;
8757 phy_set_asym_pause(phydev, rx_en, tx_en);
8760 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8764 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8767 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8769 dev_err(&hdev->pdev->dev,
8770 "configure pauseparam error, ret = %d.\n", ret);
8775 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8777 struct phy_device *phydev = hdev->hw.mac.phydev;
8778 u16 remote_advertising = 0;
8779 u16 local_advertising;
8780 u32 rx_pause, tx_pause;
8783 if (!phydev->link || !phydev->autoneg)
8786 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8789 remote_advertising = LPA_PAUSE_CAP;
8791 if (phydev->asym_pause)
8792 remote_advertising |= LPA_PAUSE_ASYM;
8794 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8795 remote_advertising);
8796 tx_pause = flowctl & FLOW_CTRL_TX;
8797 rx_pause = flowctl & FLOW_CTRL_RX;
8799 if (phydev->duplex == HCLGE_MAC_HALF) {
8804 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8807 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8808 u32 *rx_en, u32 *tx_en)
8810 struct hclge_vport *vport = hclge_get_vport(handle);
8811 struct hclge_dev *hdev = vport->back;
8812 struct phy_device *phydev = hdev->hw.mac.phydev;
8814 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8816 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8822 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8825 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8828 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8837 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8838 u32 rx_en, u32 tx_en)
8841 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8842 else if (rx_en && !tx_en)
8843 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8844 else if (!rx_en && tx_en)
8845 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8847 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8849 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8852 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8853 u32 rx_en, u32 tx_en)
8855 struct hclge_vport *vport = hclge_get_vport(handle);
8856 struct hclge_dev *hdev = vport->back;
8857 struct phy_device *phydev = hdev->hw.mac.phydev;
8861 fc_autoneg = hclge_get_autoneg(handle);
8862 if (auto_neg != fc_autoneg) {
8863 dev_info(&hdev->pdev->dev,
8864 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8869 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8870 dev_info(&hdev->pdev->dev,
8871 "Priority flow control enabled. Cannot set link flow control.\n");
8875 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8877 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8880 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8883 return phy_start_aneg(phydev);
8888 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8889 u8 *auto_neg, u32 *speed, u8 *duplex)
8891 struct hclge_vport *vport = hclge_get_vport(handle);
8892 struct hclge_dev *hdev = vport->back;
8895 *speed = hdev->hw.mac.speed;
8897 *duplex = hdev->hw.mac.duplex;
8899 *auto_neg = hdev->hw.mac.autoneg;
8902 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8905 struct hclge_vport *vport = hclge_get_vport(handle);
8906 struct hclge_dev *hdev = vport->back;
8909 *media_type = hdev->hw.mac.media_type;
8912 *module_type = hdev->hw.mac.module_type;
8915 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8916 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8918 struct hclge_vport *vport = hclge_get_vport(handle);
8919 struct hclge_dev *hdev = vport->back;
8920 struct phy_device *phydev = hdev->hw.mac.phydev;
8921 int mdix_ctrl, mdix, is_resolved;
8922 unsigned int retval;
8925 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8926 *tp_mdix = ETH_TP_MDI_INVALID;
8930 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8932 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8933 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8934 HCLGE_PHY_MDIX_CTRL_S);
8936 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8937 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8938 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8940 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8942 switch (mdix_ctrl) {
8944 *tp_mdix_ctrl = ETH_TP_MDI;
8947 *tp_mdix_ctrl = ETH_TP_MDI_X;
8950 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8953 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8958 *tp_mdix = ETH_TP_MDI_INVALID;
8960 *tp_mdix = ETH_TP_MDI_X;
8962 *tp_mdix = ETH_TP_MDI;
8965 static void hclge_info_show(struct hclge_dev *hdev)
8967 struct device *dev = &hdev->pdev->dev;
8969 dev_info(dev, "PF info begin:\n");
8971 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
8972 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
8973 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
8974 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
8975 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
8976 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
8977 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
8978 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
8979 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
8980 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
8981 dev_info(dev, "This is %s PF\n",
8982 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8983 dev_info(dev, "DCB %s\n",
8984 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8985 dev_info(dev, "MQPRIO %s\n",
8986 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8988 dev_info(dev, "PF info end.\n");
8991 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8992 struct hclge_vport *vport)
8994 struct hnae3_client *client = vport->nic.client;
8995 struct hclge_dev *hdev = ae_dev->priv;
8996 int rst_cnt = hdev->rst_stats.reset_cnt;
8999 ret = client->ops->init_instance(&vport->nic);
9003 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9004 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9005 rst_cnt != hdev->rst_stats.reset_cnt) {
9010 /* Enable nic hw error interrupts */
9011 ret = hclge_config_nic_hw_error(hdev, true);
9013 dev_err(&ae_dev->pdev->dev,
9014 "fail(%d) to enable hw error interrupts\n", ret);
9018 hnae3_set_client_init_flag(client, ae_dev, 1);
9020 if (netif_msg_drv(&hdev->vport->nic))
9021 hclge_info_show(hdev);
9026 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9027 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9028 msleep(HCLGE_WAIT_RESET_DONE);
9030 client->ops->uninit_instance(&vport->nic, 0);
9035 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9036 struct hclge_vport *vport)
9038 struct hnae3_client *client = vport->roce.client;
9039 struct hclge_dev *hdev = ae_dev->priv;
9043 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9047 client = hdev->roce_client;
9048 ret = hclge_init_roce_base_info(vport);
9052 rst_cnt = hdev->rst_stats.reset_cnt;
9053 ret = client->ops->init_instance(&vport->roce);
9057 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9058 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9059 rst_cnt != hdev->rst_stats.reset_cnt) {
9064 /* Enable roce ras interrupts */
9065 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9067 dev_err(&ae_dev->pdev->dev,
9068 "fail(%d) to enable roce ras interrupts\n", ret);
9072 hnae3_set_client_init_flag(client, ae_dev, 1);
9077 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9078 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9079 msleep(HCLGE_WAIT_RESET_DONE);
9081 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9086 static int hclge_init_client_instance(struct hnae3_client *client,
9087 struct hnae3_ae_dev *ae_dev)
9089 struct hclge_dev *hdev = ae_dev->priv;
9090 struct hclge_vport *vport;
9093 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9094 vport = &hdev->vport[i];
9096 switch (client->type) {
9097 case HNAE3_CLIENT_KNIC:
9098 hdev->nic_client = client;
9099 vport->nic.client = client;
9100 ret = hclge_init_nic_client_instance(ae_dev, vport);
9104 ret = hclge_init_roce_client_instance(ae_dev, vport);
9109 case HNAE3_CLIENT_ROCE:
9110 if (hnae3_dev_roce_supported(hdev)) {
9111 hdev->roce_client = client;
9112 vport->roce.client = client;
9115 ret = hclge_init_roce_client_instance(ae_dev, vport);
9128 hdev->nic_client = NULL;
9129 vport->nic.client = NULL;
9132 hdev->roce_client = NULL;
9133 vport->roce.client = NULL;
9137 static void hclge_uninit_client_instance(struct hnae3_client *client,
9138 struct hnae3_ae_dev *ae_dev)
9140 struct hclge_dev *hdev = ae_dev->priv;
9141 struct hclge_vport *vport;
9144 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9145 vport = &hdev->vport[i];
9146 if (hdev->roce_client) {
9147 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9148 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9149 msleep(HCLGE_WAIT_RESET_DONE);
9151 hdev->roce_client->ops->uninit_instance(&vport->roce,
9153 hdev->roce_client = NULL;
9154 vport->roce.client = NULL;
9156 if (client->type == HNAE3_CLIENT_ROCE)
9158 if (hdev->nic_client && client->ops->uninit_instance) {
9159 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9160 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9161 msleep(HCLGE_WAIT_RESET_DONE);
9163 client->ops->uninit_instance(&vport->nic, 0);
9164 hdev->nic_client = NULL;
9165 vport->nic.client = NULL;
9170 static int hclge_pci_init(struct hclge_dev *hdev)
9172 struct pci_dev *pdev = hdev->pdev;
9173 struct hclge_hw *hw;
9176 ret = pci_enable_device(pdev);
9178 dev_err(&pdev->dev, "failed to enable PCI device\n");
9182 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9184 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9187 "can't set consistent PCI DMA");
9188 goto err_disable_device;
9190 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9193 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9195 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9196 goto err_disable_device;
9199 pci_set_master(pdev);
9201 hw->io_base = pcim_iomap(pdev, 2, 0);
9203 dev_err(&pdev->dev, "Can't map configuration register space\n");
9205 goto err_clr_master;
9208 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9212 pci_clear_master(pdev);
9213 pci_release_regions(pdev);
9215 pci_disable_device(pdev);
9220 static void hclge_pci_uninit(struct hclge_dev *hdev)
9222 struct pci_dev *pdev = hdev->pdev;
9224 pcim_iounmap(pdev, hdev->hw.io_base);
9225 pci_free_irq_vectors(pdev);
9226 pci_clear_master(pdev);
9227 pci_release_mem_regions(pdev);
9228 pci_disable_device(pdev);
9231 static void hclge_state_init(struct hclge_dev *hdev)
9233 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9234 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9235 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9236 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9237 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9238 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9241 static void hclge_state_uninit(struct hclge_dev *hdev)
9243 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9244 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9246 if (hdev->reset_timer.function)
9247 del_timer_sync(&hdev->reset_timer);
9248 if (hdev->service_task.work.func)
9249 cancel_delayed_work_sync(&hdev->service_task);
9250 if (hdev->rst_service_task.func)
9251 cancel_work_sync(&hdev->rst_service_task);
9252 if (hdev->mbx_service_task.func)
9253 cancel_work_sync(&hdev->mbx_service_task);
9256 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9258 #define HCLGE_FLR_WAIT_MS 100
9259 #define HCLGE_FLR_WAIT_CNT 50
9260 struct hclge_dev *hdev = ae_dev->priv;
9263 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9264 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9265 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9266 hclge_reset_event(hdev->pdev, NULL);
9268 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9269 cnt++ < HCLGE_FLR_WAIT_CNT)
9270 msleep(HCLGE_FLR_WAIT_MS);
9272 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9273 dev_err(&hdev->pdev->dev,
9274 "flr wait down timeout: %d\n", cnt);
9277 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9279 struct hclge_dev *hdev = ae_dev->priv;
9281 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9284 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9288 for (i = 0; i < hdev->num_alloc_vport; i++) {
9289 struct hclge_vport *vport = &hdev->vport[i];
9292 /* Send cmd to clear VF's FUNC_RST_ING */
9293 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9295 dev_warn(&hdev->pdev->dev,
9296 "clear vf(%u) rst failed %d!\n",
9297 vport->vport_id, ret);
9301 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9303 struct pci_dev *pdev = ae_dev->pdev;
9304 struct hclge_dev *hdev;
9307 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9314 hdev->ae_dev = ae_dev;
9315 hdev->reset_type = HNAE3_NONE_RESET;
9316 hdev->reset_level = HNAE3_FUNC_RESET;
9317 ae_dev->priv = hdev;
9319 /* HW supprt 2 layer vlan */
9320 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9322 mutex_init(&hdev->vport_lock);
9323 mutex_init(&hdev->vport_cfg_mutex);
9324 spin_lock_init(&hdev->fd_rule_lock);
9326 ret = hclge_pci_init(hdev);
9328 dev_err(&pdev->dev, "PCI init failed\n");
9332 /* Firmware command queue initialize */
9333 ret = hclge_cmd_queue_init(hdev);
9335 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9336 goto err_pci_uninit;
9339 /* Firmware command initialize */
9340 ret = hclge_cmd_init(hdev);
9342 goto err_cmd_uninit;
9344 ret = hclge_get_cap(hdev);
9346 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9348 goto err_cmd_uninit;
9351 ret = hclge_configure(hdev);
9353 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9354 goto err_cmd_uninit;
9357 ret = hclge_init_msi(hdev);
9359 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9360 goto err_cmd_uninit;
9363 ret = hclge_misc_irq_init(hdev);
9366 "Misc IRQ(vector0) init error, ret = %d.\n",
9368 goto err_msi_uninit;
9371 ret = hclge_alloc_tqps(hdev);
9373 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9374 goto err_msi_irq_uninit;
9377 ret = hclge_alloc_vport(hdev);
9379 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9380 goto err_msi_irq_uninit;
9383 ret = hclge_map_tqp(hdev);
9385 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9386 goto err_msi_irq_uninit;
9389 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9390 ret = hclge_mac_mdio_config(hdev);
9392 dev_err(&hdev->pdev->dev,
9393 "mdio config fail ret=%d\n", ret);
9394 goto err_msi_irq_uninit;
9398 ret = hclge_init_umv_space(hdev);
9400 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9401 goto err_mdiobus_unreg;
9404 ret = hclge_mac_init(hdev);
9406 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9407 goto err_mdiobus_unreg;
9410 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9412 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9413 goto err_mdiobus_unreg;
9416 ret = hclge_config_gro(hdev, true);
9418 goto err_mdiobus_unreg;
9420 ret = hclge_init_vlan_config(hdev);
9422 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9423 goto err_mdiobus_unreg;
9426 ret = hclge_tm_schd_init(hdev);
9428 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9429 goto err_mdiobus_unreg;
9432 hclge_rss_init_cfg(hdev);
9433 ret = hclge_rss_init_hw(hdev);
9435 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9436 goto err_mdiobus_unreg;
9439 ret = init_mgr_tbl(hdev);
9441 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9442 goto err_mdiobus_unreg;
9445 ret = hclge_init_fd_config(hdev);
9448 "fd table init fail, ret=%d\n", ret);
9449 goto err_mdiobus_unreg;
9452 INIT_KFIFO(hdev->mac_tnl_log);
9454 hclge_dcb_ops_set(hdev);
9456 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9457 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9458 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9459 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9461 /* Setup affinity after service timer setup because add_timer_on
9462 * is called in affinity notify.
9464 hclge_misc_affinity_setup(hdev);
9466 hclge_clear_all_event_cause(hdev);
9467 hclge_clear_resetting_state(hdev);
9469 /* Log and clear the hw errors those already occurred */
9470 hclge_handle_all_hns_hw_errors(ae_dev);
9472 /* request delayed reset for the error recovery because an immediate
9473 * global reset on a PF affecting pending initialization of other PFs
9475 if (ae_dev->hw_err_reset_req) {
9476 enum hnae3_reset_type reset_level;
9478 reset_level = hclge_get_reset_level(ae_dev,
9479 &ae_dev->hw_err_reset_req);
9480 hclge_set_def_reset_request(ae_dev, reset_level);
9481 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9484 /* Enable MISC vector(vector0) */
9485 hclge_enable_vector(&hdev->misc_vector, true);
9487 hclge_state_init(hdev);
9488 hdev->last_reset_time = jiffies;
9490 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9496 if (hdev->hw.mac.phydev)
9497 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9499 hclge_misc_irq_uninit(hdev);
9501 pci_free_irq_vectors(pdev);
9503 hclge_cmd_uninit(hdev);
9505 pcim_iounmap(pdev, hdev->hw.io_base);
9506 pci_clear_master(pdev);
9507 pci_release_regions(pdev);
9508 pci_disable_device(pdev);
9513 static void hclge_stats_clear(struct hclge_dev *hdev)
9515 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9518 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9520 return hclge_config_switch_param(hdev, vf, enable,
9521 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9524 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9526 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9527 HCLGE_FILTER_FE_NIC_INGRESS_B,
9531 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9535 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9537 dev_err(&hdev->pdev->dev,
9538 "Set vf %d mac spoof check %s failed, ret=%d\n",
9539 vf, enable ? "on" : "off", ret);
9543 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9545 dev_err(&hdev->pdev->dev,
9546 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9547 vf, enable ? "on" : "off", ret);
9552 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9555 struct hclge_vport *vport = hclge_get_vport(handle);
9556 struct hclge_dev *hdev = vport->back;
9557 u32 new_spoofchk = enable ? 1 : 0;
9560 if (hdev->pdev->revision == 0x20)
9563 vport = hclge_get_vf_vport(hdev, vf);
9567 if (vport->vf_info.spoofchk == new_spoofchk)
9570 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9571 dev_warn(&hdev->pdev->dev,
9572 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9574 else if (enable && hclge_is_umv_space_full(vport))
9575 dev_warn(&hdev->pdev->dev,
9576 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9579 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9583 vport->vf_info.spoofchk = new_spoofchk;
9587 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9589 struct hclge_vport *vport = hdev->vport;
9593 if (hdev->pdev->revision == 0x20)
9596 /* resume the vf spoof check state after reset */
9597 for (i = 0; i < hdev->num_alloc_vport; i++) {
9598 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9599 vport->vf_info.spoofchk);
9609 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9611 struct hclge_vport *vport = hclge_get_vport(handle);
9612 struct hclge_dev *hdev = vport->back;
9613 u32 new_trusted = enable ? 1 : 0;
9617 vport = hclge_get_vf_vport(hdev, vf);
9621 if (vport->vf_info.trusted == new_trusted)
9624 /* Disable promisc mode for VF if it is not trusted any more. */
9625 if (!enable && vport->vf_info.promisc_enable) {
9626 en_bc_pmc = hdev->pdev->revision != 0x20;
9627 ret = hclge_set_vport_promisc_mode(vport, false, false,
9631 vport->vf_info.promisc_enable = 0;
9632 hclge_inform_vf_promisc_info(vport);
9635 vport->vf_info.trusted = new_trusted;
9640 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9645 /* reset vf rate to default value */
9646 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9647 struct hclge_vport *vport = &hdev->vport[vf];
9649 vport->vf_info.max_tx_rate = 0;
9650 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9652 dev_err(&hdev->pdev->dev,
9653 "vf%d failed to reset to default, ret=%d\n",
9654 vf - HCLGE_VF_VPORT_START_NUM, ret);
9658 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9659 int min_tx_rate, int max_tx_rate)
9661 if (min_tx_rate != 0 ||
9662 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9663 dev_err(&hdev->pdev->dev,
9664 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9665 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9672 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9673 int min_tx_rate, int max_tx_rate, bool force)
9675 struct hclge_vport *vport = hclge_get_vport(handle);
9676 struct hclge_dev *hdev = vport->back;
9679 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9683 vport = hclge_get_vf_vport(hdev, vf);
9687 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9690 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9694 vport->vf_info.max_tx_rate = max_tx_rate;
9699 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9701 struct hnae3_handle *handle = &hdev->vport->nic;
9702 struct hclge_vport *vport;
9706 /* resume the vf max_tx_rate after reset */
9707 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9708 vport = hclge_get_vf_vport(hdev, vf);
9712 /* zero means max rate, after reset, firmware already set it to
9713 * max rate, so just continue.
9715 if (!vport->vf_info.max_tx_rate)
9718 ret = hclge_set_vf_rate(handle, vf, 0,
9719 vport->vf_info.max_tx_rate, true);
9721 dev_err(&hdev->pdev->dev,
9722 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9723 vf, vport->vf_info.max_tx_rate, ret);
9731 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9733 struct hclge_vport *vport = hdev->vport;
9736 for (i = 0; i < hdev->num_alloc_vport; i++) {
9737 hclge_vport_stop(vport);
9742 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9744 struct hclge_dev *hdev = ae_dev->priv;
9745 struct pci_dev *pdev = ae_dev->pdev;
9748 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9750 hclge_stats_clear(hdev);
9751 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9752 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9754 ret = hclge_cmd_init(hdev);
9756 dev_err(&pdev->dev, "Cmd queue init failed\n");
9760 ret = hclge_map_tqp(hdev);
9762 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9766 hclge_reset_umv_space(hdev);
9768 ret = hclge_mac_init(hdev);
9770 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9774 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9776 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9780 ret = hclge_config_gro(hdev, true);
9784 ret = hclge_init_vlan_config(hdev);
9786 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9790 ret = hclge_tm_init_hw(hdev, true);
9792 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9796 ret = hclge_rss_init_hw(hdev);
9798 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9802 ret = hclge_init_fd_config(hdev);
9804 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9808 /* Log and clear the hw errors those already occurred */
9809 hclge_handle_all_hns_hw_errors(ae_dev);
9811 /* Re-enable the hw error interrupts because
9812 * the interrupts get disabled on global reset.
9814 ret = hclge_config_nic_hw_error(hdev, true);
9817 "fail(%d) to re-enable NIC hw error interrupts\n",
9822 if (hdev->roce_client) {
9823 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9826 "fail(%d) to re-enable roce ras interrupts\n",
9832 hclge_reset_vport_state(hdev);
9833 ret = hclge_reset_vport_spoofchk(hdev);
9837 ret = hclge_resume_vf_rate(hdev);
9841 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9847 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9849 struct hclge_dev *hdev = ae_dev->priv;
9850 struct hclge_mac *mac = &hdev->hw.mac;
9852 hclge_reset_vf_rate(hdev);
9853 hclge_misc_affinity_teardown(hdev);
9854 hclge_state_uninit(hdev);
9857 mdiobus_unregister(mac->mdio_bus);
9859 hclge_uninit_umv_space(hdev);
9861 /* Disable MISC vector(vector0) */
9862 hclge_enable_vector(&hdev->misc_vector, false);
9863 synchronize_irq(hdev->misc_vector.vector_irq);
9865 /* Disable all hw interrupts */
9866 hclge_config_mac_tnl_int(hdev, false);
9867 hclge_config_nic_hw_error(hdev, false);
9868 hclge_config_rocee_ras_interrupt(hdev, false);
9870 hclge_cmd_uninit(hdev);
9871 hclge_misc_irq_uninit(hdev);
9872 hclge_pci_uninit(hdev);
9873 mutex_destroy(&hdev->vport_lock);
9874 hclge_uninit_vport_mac_table(hdev);
9875 hclge_uninit_vport_vlan_table(hdev);
9876 mutex_destroy(&hdev->vport_cfg_mutex);
9877 ae_dev->priv = NULL;
9880 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9882 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9883 struct hclge_vport *vport = hclge_get_vport(handle);
9884 struct hclge_dev *hdev = vport->back;
9886 return min_t(u32, hdev->rss_size_max,
9887 vport->alloc_tqps / kinfo->num_tc);
9890 static void hclge_get_channels(struct hnae3_handle *handle,
9891 struct ethtool_channels *ch)
9893 ch->max_combined = hclge_get_max_channels(handle);
9894 ch->other_count = 1;
9896 ch->combined_count = handle->kinfo.rss_size;
9899 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9900 u16 *alloc_tqps, u16 *max_rss_size)
9902 struct hclge_vport *vport = hclge_get_vport(handle);
9903 struct hclge_dev *hdev = vport->back;
9905 *alloc_tqps = vport->alloc_tqps;
9906 *max_rss_size = hdev->rss_size_max;
9909 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9910 bool rxfh_configured)
9912 struct hclge_vport *vport = hclge_get_vport(handle);
9913 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9914 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9915 struct hclge_dev *hdev = vport->back;
9916 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9917 u16 cur_rss_size = kinfo->rss_size;
9918 u16 cur_tqps = kinfo->num_tqps;
9919 u16 tc_valid[HCLGE_MAX_TC_NUM];
9925 kinfo->req_rss_size = new_tqps_num;
9927 ret = hclge_tm_vport_map_update(hdev);
9929 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9933 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9934 roundup_size = ilog2(roundup_size);
9935 /* Set the RSS TC mode according to the new RSS size */
9936 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9939 if (!(hdev->hw_tc_map & BIT(i)))
9943 tc_size[i] = roundup_size;
9944 tc_offset[i] = kinfo->rss_size * i;
9946 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9950 /* RSS indirection table has been configuared by user */
9951 if (rxfh_configured)
9954 /* Reinitializes the rss indirect table according to the new RSS size */
9955 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9959 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9960 rss_indir[i] = i % kinfo->rss_size;
9962 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9964 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9971 dev_info(&hdev->pdev->dev,
9972 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
9973 cur_rss_size, kinfo->rss_size,
9974 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9979 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9980 u32 *regs_num_64_bit)
9982 struct hclge_desc desc;
9986 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9987 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9989 dev_err(&hdev->pdev->dev,
9990 "Query register number cmd failed, ret = %d.\n", ret);
9994 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9995 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9997 total_num = *regs_num_32_bit + *regs_num_64_bit;
10004 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10007 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10008 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10010 struct hclge_desc *desc;
10011 u32 *reg_val = data;
10021 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10022 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10023 HCLGE_32_BIT_REG_RTN_DATANUM);
10024 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10028 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10029 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10031 dev_err(&hdev->pdev->dev,
10032 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10037 for (i = 0; i < cmd_num; i++) {
10039 desc_data = (__le32 *)(&desc[i].data[0]);
10040 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10042 desc_data = (__le32 *)(&desc[i]);
10043 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10045 for (k = 0; k < n; k++) {
10046 *reg_val++ = le32_to_cpu(*desc_data++);
10058 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10061 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10062 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10064 struct hclge_desc *desc;
10065 u64 *reg_val = data;
10075 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10076 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10077 HCLGE_64_BIT_REG_RTN_DATANUM);
10078 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10082 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10083 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10085 dev_err(&hdev->pdev->dev,
10086 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10091 for (i = 0; i < cmd_num; i++) {
10093 desc_data = (__le64 *)(&desc[i].data[0]);
10094 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10096 desc_data = (__le64 *)(&desc[i]);
10097 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10099 for (k = 0; k < n; k++) {
10100 *reg_val++ = le64_to_cpu(*desc_data++);
10112 #define MAX_SEPARATE_NUM 4
10113 #define SEPARATOR_VALUE 0xFDFCFBFA
10114 #define REG_NUM_PER_LINE 4
10115 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10116 #define REG_SEPARATOR_LINE 1
10117 #define REG_NUM_REMAIN_MASK 3
10118 #define BD_LIST_MAX_NUM 30
10120 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10122 /*prepare 4 commands to query DFX BD number*/
10123 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10124 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10125 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10126 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10127 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10128 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10129 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10131 return hclge_cmd_send(&hdev->hw, desc, 4);
10134 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10138 #define HCLGE_DFX_REG_BD_NUM 4
10140 u32 entries_per_desc, desc_index, index, offset, i;
10141 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10144 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10146 dev_err(&hdev->pdev->dev,
10147 "Get dfx bd num fail, status is %d.\n", ret);
10151 entries_per_desc = ARRAY_SIZE(desc[0].data);
10152 for (i = 0; i < type_num; i++) {
10153 offset = hclge_dfx_bd_offset_list[i];
10154 index = offset % entries_per_desc;
10155 desc_index = offset / entries_per_desc;
10156 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10162 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10163 struct hclge_desc *desc_src, int bd_num,
10164 enum hclge_opcode_type cmd)
10166 struct hclge_desc *desc = desc_src;
10169 hclge_cmd_setup_basic_desc(desc, cmd, true);
10170 for (i = 0; i < bd_num - 1; i++) {
10171 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10173 hclge_cmd_setup_basic_desc(desc, cmd, true);
10177 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10179 dev_err(&hdev->pdev->dev,
10180 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10186 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10189 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10190 struct hclge_desc *desc = desc_src;
10193 entries_per_desc = ARRAY_SIZE(desc->data);
10194 reg_num = entries_per_desc * bd_num;
10195 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10196 for (i = 0; i < reg_num; i++) {
10197 index = i % entries_per_desc;
10198 desc_index = i / entries_per_desc;
10199 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10201 for (i = 0; i < separator_num; i++)
10202 *reg++ = SEPARATOR_VALUE;
10204 return reg_num + separator_num;
10207 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10209 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10210 int data_len_per_desc, data_len, bd_num, i;
10211 int bd_num_list[BD_LIST_MAX_NUM];
10214 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10216 dev_err(&hdev->pdev->dev,
10217 "Get dfx reg bd num fail, status is %d.\n", ret);
10221 data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
10223 for (i = 0; i < dfx_reg_type_num; i++) {
10224 bd_num = bd_num_list[i];
10225 data_len = data_len_per_desc * bd_num;
10226 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10232 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10234 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10235 int bd_num, bd_num_max, buf_len, i;
10236 int bd_num_list[BD_LIST_MAX_NUM];
10237 struct hclge_desc *desc_src;
10241 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10243 dev_err(&hdev->pdev->dev,
10244 "Get dfx reg bd num fail, status is %d.\n", ret);
10248 bd_num_max = bd_num_list[0];
10249 for (i = 1; i < dfx_reg_type_num; i++)
10250 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10252 buf_len = sizeof(*desc_src) * bd_num_max;
10253 desc_src = kzalloc(buf_len, GFP_KERNEL);
10255 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10259 for (i = 0; i < dfx_reg_type_num; i++) {
10260 bd_num = bd_num_list[i];
10261 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10262 hclge_dfx_reg_opcode_list[i]);
10264 dev_err(&hdev->pdev->dev,
10265 "Get dfx reg fail, status is %d.\n", ret);
10269 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10276 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10277 struct hnae3_knic_private_info *kinfo)
10279 #define HCLGE_RING_REG_OFFSET 0x200
10280 #define HCLGE_RING_INT_REG_OFFSET 0x4
10282 int i, j, reg_num, separator_num;
10286 /* fetching per-PF registers valus from PF PCIe register space */
10287 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10288 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10289 for (i = 0; i < reg_num; i++)
10290 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10291 for (i = 0; i < separator_num; i++)
10292 *reg++ = SEPARATOR_VALUE;
10293 data_num_sum = reg_num + separator_num;
10295 reg_num = ARRAY_SIZE(common_reg_addr_list);
10296 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10297 for (i = 0; i < reg_num; i++)
10298 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10299 for (i = 0; i < separator_num; i++)
10300 *reg++ = SEPARATOR_VALUE;
10301 data_num_sum += reg_num + separator_num;
10303 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10304 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10305 for (j = 0; j < kinfo->num_tqps; j++) {
10306 for (i = 0; i < reg_num; i++)
10307 *reg++ = hclge_read_dev(&hdev->hw,
10308 ring_reg_addr_list[i] +
10309 HCLGE_RING_REG_OFFSET * j);
10310 for (i = 0; i < separator_num; i++)
10311 *reg++ = SEPARATOR_VALUE;
10313 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10315 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10316 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10317 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10318 for (i = 0; i < reg_num; i++)
10319 *reg++ = hclge_read_dev(&hdev->hw,
10320 tqp_intr_reg_addr_list[i] +
10321 HCLGE_RING_INT_REG_OFFSET * j);
10322 for (i = 0; i < separator_num; i++)
10323 *reg++ = SEPARATOR_VALUE;
10325 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10327 return data_num_sum;
10330 static int hclge_get_regs_len(struct hnae3_handle *handle)
10332 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10333 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10334 struct hclge_vport *vport = hclge_get_vport(handle);
10335 struct hclge_dev *hdev = vport->back;
10336 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10337 int regs_lines_32_bit, regs_lines_64_bit;
10340 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10342 dev_err(&hdev->pdev->dev,
10343 "Get register number failed, ret = %d.\n", ret);
10347 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10349 dev_err(&hdev->pdev->dev,
10350 "Get dfx reg len failed, ret = %d.\n", ret);
10354 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10355 REG_SEPARATOR_LINE;
10356 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10357 REG_SEPARATOR_LINE;
10358 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10359 REG_SEPARATOR_LINE;
10360 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10361 REG_SEPARATOR_LINE;
10362 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10363 REG_SEPARATOR_LINE;
10364 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10365 REG_SEPARATOR_LINE;
10367 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10368 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10369 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10372 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10375 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10376 struct hclge_vport *vport = hclge_get_vport(handle);
10377 struct hclge_dev *hdev = vport->back;
10378 u32 regs_num_32_bit, regs_num_64_bit;
10379 int i, reg_num, separator_num, ret;
10382 *version = hdev->fw_version;
10384 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10386 dev_err(&hdev->pdev->dev,
10387 "Get register number failed, ret = %d.\n", ret);
10391 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10393 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10395 dev_err(&hdev->pdev->dev,
10396 "Get 32 bit register failed, ret = %d.\n", ret);
10399 reg_num = regs_num_32_bit;
10401 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10402 for (i = 0; i < separator_num; i++)
10403 *reg++ = SEPARATOR_VALUE;
10405 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10407 dev_err(&hdev->pdev->dev,
10408 "Get 64 bit register failed, ret = %d.\n", ret);
10411 reg_num = regs_num_64_bit * 2;
10413 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10414 for (i = 0; i < separator_num; i++)
10415 *reg++ = SEPARATOR_VALUE;
10417 ret = hclge_get_dfx_reg(hdev, reg);
10419 dev_err(&hdev->pdev->dev,
10420 "Get dfx register failed, ret = %d.\n", ret);
10423 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10425 struct hclge_set_led_state_cmd *req;
10426 struct hclge_desc desc;
10429 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10431 req = (struct hclge_set_led_state_cmd *)desc.data;
10432 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10433 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10435 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10437 dev_err(&hdev->pdev->dev,
10438 "Send set led state cmd error, ret =%d\n", ret);
10443 enum hclge_led_status {
10446 HCLGE_LED_NO_CHANGE = 0xFF,
10449 static int hclge_set_led_id(struct hnae3_handle *handle,
10450 enum ethtool_phys_id_state status)
10452 struct hclge_vport *vport = hclge_get_vport(handle);
10453 struct hclge_dev *hdev = vport->back;
10456 case ETHTOOL_ID_ACTIVE:
10457 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10458 case ETHTOOL_ID_INACTIVE:
10459 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10465 static void hclge_get_link_mode(struct hnae3_handle *handle,
10466 unsigned long *supported,
10467 unsigned long *advertising)
10469 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10470 struct hclge_vport *vport = hclge_get_vport(handle);
10471 struct hclge_dev *hdev = vport->back;
10472 unsigned int idx = 0;
10474 for (; idx < size; idx++) {
10475 supported[idx] = hdev->hw.mac.supported[idx];
10476 advertising[idx] = hdev->hw.mac.advertising[idx];
10480 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10482 struct hclge_vport *vport = hclge_get_vport(handle);
10483 struct hclge_dev *hdev = vport->back;
10485 return hclge_config_gro(hdev, enable);
10488 static const struct hnae3_ae_ops hclge_ops = {
10489 .init_ae_dev = hclge_init_ae_dev,
10490 .uninit_ae_dev = hclge_uninit_ae_dev,
10491 .flr_prepare = hclge_flr_prepare,
10492 .flr_done = hclge_flr_done,
10493 .init_client_instance = hclge_init_client_instance,
10494 .uninit_client_instance = hclge_uninit_client_instance,
10495 .map_ring_to_vector = hclge_map_ring_to_vector,
10496 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10497 .get_vector = hclge_get_vector,
10498 .put_vector = hclge_put_vector,
10499 .set_promisc_mode = hclge_set_promisc_mode,
10500 .set_loopback = hclge_set_loopback,
10501 .start = hclge_ae_start,
10502 .stop = hclge_ae_stop,
10503 .client_start = hclge_client_start,
10504 .client_stop = hclge_client_stop,
10505 .get_status = hclge_get_status,
10506 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10507 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10508 .get_media_type = hclge_get_media_type,
10509 .check_port_speed = hclge_check_port_speed,
10510 .get_fec = hclge_get_fec,
10511 .set_fec = hclge_set_fec,
10512 .get_rss_key_size = hclge_get_rss_key_size,
10513 .get_rss_indir_size = hclge_get_rss_indir_size,
10514 .get_rss = hclge_get_rss,
10515 .set_rss = hclge_set_rss,
10516 .set_rss_tuple = hclge_set_rss_tuple,
10517 .get_rss_tuple = hclge_get_rss_tuple,
10518 .get_tc_size = hclge_get_tc_size,
10519 .get_mac_addr = hclge_get_mac_addr,
10520 .set_mac_addr = hclge_set_mac_addr,
10521 .do_ioctl = hclge_do_ioctl,
10522 .add_uc_addr = hclge_add_uc_addr,
10523 .rm_uc_addr = hclge_rm_uc_addr,
10524 .add_mc_addr = hclge_add_mc_addr,
10525 .rm_mc_addr = hclge_rm_mc_addr,
10526 .set_autoneg = hclge_set_autoneg,
10527 .get_autoneg = hclge_get_autoneg,
10528 .restart_autoneg = hclge_restart_autoneg,
10529 .halt_autoneg = hclge_halt_autoneg,
10530 .get_pauseparam = hclge_get_pauseparam,
10531 .set_pauseparam = hclge_set_pauseparam,
10532 .set_mtu = hclge_set_mtu,
10533 .reset_queue = hclge_reset_tqp,
10534 .get_stats = hclge_get_stats,
10535 .get_mac_stats = hclge_get_mac_stat,
10536 .update_stats = hclge_update_stats,
10537 .get_strings = hclge_get_strings,
10538 .get_sset_count = hclge_get_sset_count,
10539 .get_fw_version = hclge_get_fw_version,
10540 .get_mdix_mode = hclge_get_mdix_mode,
10541 .enable_vlan_filter = hclge_enable_vlan_filter,
10542 .set_vlan_filter = hclge_set_vlan_filter,
10543 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10544 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10545 .reset_event = hclge_reset_event,
10546 .get_reset_level = hclge_get_reset_level,
10547 .set_default_reset_request = hclge_set_def_reset_request,
10548 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10549 .set_channels = hclge_set_channels,
10550 .get_channels = hclge_get_channels,
10551 .get_regs_len = hclge_get_regs_len,
10552 .get_regs = hclge_get_regs,
10553 .set_led_id = hclge_set_led_id,
10554 .get_link_mode = hclge_get_link_mode,
10555 .add_fd_entry = hclge_add_fd_entry,
10556 .del_fd_entry = hclge_del_fd_entry,
10557 .del_all_fd_entries = hclge_del_all_fd_entries,
10558 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10559 .get_fd_rule_info = hclge_get_fd_rule_info,
10560 .get_fd_all_rules = hclge_get_all_rules,
10561 .restore_fd_rules = hclge_restore_fd_entries,
10562 .enable_fd = hclge_enable_fd,
10563 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10564 .dbg_run_cmd = hclge_dbg_run_cmd,
10565 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10566 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10567 .ae_dev_resetting = hclge_ae_dev_resetting,
10568 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10569 .set_gro_en = hclge_gro_en,
10570 .get_global_queue_id = hclge_covert_handle_qid_global,
10571 .set_timer_task = hclge_set_timer_task,
10572 .mac_connect_phy = hclge_mac_connect_phy,
10573 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10574 .restore_vlan_table = hclge_restore_vlan_table,
10575 .get_vf_config = hclge_get_vf_config,
10576 .set_vf_link_state = hclge_set_vf_link_state,
10577 .set_vf_spoofchk = hclge_set_vf_spoofchk,
10578 .set_vf_trust = hclge_set_vf_trust,
10579 .set_vf_rate = hclge_set_vf_rate,
10580 .set_vf_mac = hclge_set_vf_mac,
10583 static struct hnae3_ae_algo ae_algo = {
10585 .pdev_id_table = ae_algo_pci_tbl,
10588 static int hclge_init(void)
10590 pr_info("%s is initializing\n", HCLGE_NAME);
10592 hnae3_register_ae_algo(&ae_algo);
10597 static void hclge_exit(void)
10599 hnae3_unregister_ae_algo(&ae_algo);
10601 module_init(hclge_init);
10602 module_exit(hclge_exit);
10604 MODULE_LICENSE("GPL");
10605 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10606 MODULE_DESCRIPTION("HCLGE Driver");
10607 MODULE_VERSION(HCLGE_MOD_VERSION);