1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
57 static int hclge_init_vlan_config(struct hclge_dev *hdev);
58 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
59 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
60 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
61 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
62 u16 *allocated_size, bool is_alloc);
63 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
64 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
65 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static struct hnae3_ae_algo ae_algo;
70 static const struct pci_device_id ae_algo_pci_tbl[] = {
71 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
78 /* required last entry */
82 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
84 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
85 HCLGE_CMDQ_TX_ADDR_H_REG,
86 HCLGE_CMDQ_TX_DEPTH_REG,
87 HCLGE_CMDQ_TX_TAIL_REG,
88 HCLGE_CMDQ_TX_HEAD_REG,
89 HCLGE_CMDQ_RX_ADDR_L_REG,
90 HCLGE_CMDQ_RX_ADDR_H_REG,
91 HCLGE_CMDQ_RX_DEPTH_REG,
92 HCLGE_CMDQ_RX_TAIL_REG,
93 HCLGE_CMDQ_RX_HEAD_REG,
94 HCLGE_VECTOR0_CMDQ_SRC_REG,
95 HCLGE_CMDQ_INTR_STS_REG,
96 HCLGE_CMDQ_INTR_EN_REG,
97 HCLGE_CMDQ_INTR_GEN_REG};
99 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
100 HCLGE_VECTOR0_OTER_EN_REG,
101 HCLGE_MISC_RESET_STS_REG,
102 HCLGE_MISC_VECTOR_INT_STS,
103 HCLGE_GLOBAL_RESET_REG,
107 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
108 HCLGE_RING_RX_ADDR_H_REG,
109 HCLGE_RING_RX_BD_NUM_REG,
110 HCLGE_RING_RX_BD_LENGTH_REG,
111 HCLGE_RING_RX_MERGE_EN_REG,
112 HCLGE_RING_RX_TAIL_REG,
113 HCLGE_RING_RX_HEAD_REG,
114 HCLGE_RING_RX_FBD_NUM_REG,
115 HCLGE_RING_RX_OFFSET_REG,
116 HCLGE_RING_RX_FBD_OFFSET_REG,
117 HCLGE_RING_RX_STASH_REG,
118 HCLGE_RING_RX_BD_ERR_REG,
119 HCLGE_RING_TX_ADDR_L_REG,
120 HCLGE_RING_TX_ADDR_H_REG,
121 HCLGE_RING_TX_BD_NUM_REG,
122 HCLGE_RING_TX_PRIORITY_REG,
123 HCLGE_RING_TX_TC_REG,
124 HCLGE_RING_TX_MERGE_EN_REG,
125 HCLGE_RING_TX_TAIL_REG,
126 HCLGE_RING_TX_HEAD_REG,
127 HCLGE_RING_TX_FBD_NUM_REG,
128 HCLGE_RING_TX_OFFSET_REG,
129 HCLGE_RING_TX_EBD_NUM_REG,
130 HCLGE_RING_TX_EBD_OFFSET_REG,
131 HCLGE_RING_TX_BD_ERR_REG,
134 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
135 HCLGE_TQP_INTR_GL0_REG,
136 HCLGE_TQP_INTR_GL1_REG,
137 HCLGE_TQP_INTR_GL2_REG,
138 HCLGE_TQP_INTR_RL_REG};
140 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
142 "Serdes serial Loopback test",
143 "Serdes parallel Loopback test",
147 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
148 {"mac_tx_mac_pause_num",
149 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
150 {"mac_rx_mac_pause_num",
151 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
152 {"mac_tx_control_pkt_num",
153 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
154 {"mac_rx_control_pkt_num",
155 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
156 {"mac_tx_pfc_pkt_num",
157 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
158 {"mac_tx_pfc_pri0_pkt_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
160 {"mac_tx_pfc_pri1_pkt_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
162 {"mac_tx_pfc_pri2_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
164 {"mac_tx_pfc_pri3_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
166 {"mac_tx_pfc_pri4_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
168 {"mac_tx_pfc_pri5_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
170 {"mac_tx_pfc_pri6_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
172 {"mac_tx_pfc_pri7_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
174 {"mac_rx_pfc_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
176 {"mac_rx_pfc_pri0_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
178 {"mac_rx_pfc_pri1_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
180 {"mac_rx_pfc_pri2_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
182 {"mac_rx_pfc_pri3_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
184 {"mac_rx_pfc_pri4_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
186 {"mac_rx_pfc_pri5_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
188 {"mac_rx_pfc_pri6_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
190 {"mac_rx_pfc_pri7_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
192 {"mac_tx_total_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
194 {"mac_tx_total_oct_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
196 {"mac_tx_good_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
198 {"mac_tx_bad_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
200 {"mac_tx_good_oct_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
202 {"mac_tx_bad_oct_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
204 {"mac_tx_uni_pkt_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
206 {"mac_tx_multi_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
208 {"mac_tx_broad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
210 {"mac_tx_undersize_pkt_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
212 {"mac_tx_oversize_pkt_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
214 {"mac_tx_64_oct_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
216 {"mac_tx_65_127_oct_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
218 {"mac_tx_128_255_oct_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
220 {"mac_tx_256_511_oct_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
222 {"mac_tx_512_1023_oct_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
224 {"mac_tx_1024_1518_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
226 {"mac_tx_1519_2047_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
228 {"mac_tx_2048_4095_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
230 {"mac_tx_4096_8191_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
232 {"mac_tx_8192_9216_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
234 {"mac_tx_9217_12287_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
236 {"mac_tx_12288_16383_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
238 {"mac_tx_1519_max_good_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
240 {"mac_tx_1519_max_bad_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
242 {"mac_rx_total_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
244 {"mac_rx_total_oct_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
246 {"mac_rx_good_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
248 {"mac_rx_bad_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
250 {"mac_rx_good_oct_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
252 {"mac_rx_bad_oct_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
254 {"mac_rx_uni_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
256 {"mac_rx_multi_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
258 {"mac_rx_broad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
260 {"mac_rx_undersize_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
262 {"mac_rx_oversize_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
264 {"mac_rx_64_oct_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
266 {"mac_rx_65_127_oct_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
268 {"mac_rx_128_255_oct_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
270 {"mac_rx_256_511_oct_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
272 {"mac_rx_512_1023_oct_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
274 {"mac_rx_1024_1518_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
276 {"mac_rx_1519_2047_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
278 {"mac_rx_2048_4095_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
280 {"mac_rx_4096_8191_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
282 {"mac_rx_8192_9216_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
284 {"mac_rx_9217_12287_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
286 {"mac_rx_12288_16383_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
288 {"mac_rx_1519_max_good_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
290 {"mac_rx_1519_max_bad_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
293 {"mac_tx_fragment_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
295 {"mac_tx_undermin_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
297 {"mac_tx_jabber_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
299 {"mac_tx_err_all_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
301 {"mac_tx_from_app_good_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
303 {"mac_tx_from_app_bad_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
305 {"mac_rx_fragment_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
307 {"mac_rx_undermin_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
309 {"mac_rx_jabber_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
311 {"mac_rx_fcs_err_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
313 {"mac_rx_send_app_good_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
315 {"mac_rx_send_app_bad_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
319 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
321 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
322 .ethter_type = cpu_to_le16(ETH_P_LLDP),
323 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
324 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
325 .i_port_bitmap = 0x1,
329 static const u8 hclge_hash_key[] = {
330 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
331 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
332 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
333 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
334 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
337 static const u32 hclge_dfx_bd_offset_list[] = {
338 HCLGE_DFX_BIOS_BD_OFFSET,
339 HCLGE_DFX_SSU_0_BD_OFFSET,
340 HCLGE_DFX_SSU_1_BD_OFFSET,
341 HCLGE_DFX_IGU_BD_OFFSET,
342 HCLGE_DFX_RPU_0_BD_OFFSET,
343 HCLGE_DFX_RPU_1_BD_OFFSET,
344 HCLGE_DFX_NCSI_BD_OFFSET,
345 HCLGE_DFX_RTC_BD_OFFSET,
346 HCLGE_DFX_PPP_BD_OFFSET,
347 HCLGE_DFX_RCB_BD_OFFSET,
348 HCLGE_DFX_TQP_BD_OFFSET,
349 HCLGE_DFX_SSU_2_BD_OFFSET
352 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
353 HCLGE_OPC_DFX_BIOS_COMMON_REG,
354 HCLGE_OPC_DFX_SSU_REG_0,
355 HCLGE_OPC_DFX_SSU_REG_1,
356 HCLGE_OPC_DFX_IGU_EGU_REG,
357 HCLGE_OPC_DFX_RPU_REG_0,
358 HCLGE_OPC_DFX_RPU_REG_1,
359 HCLGE_OPC_DFX_NCSI_REG,
360 HCLGE_OPC_DFX_RTC_REG,
361 HCLGE_OPC_DFX_PPP_REG,
362 HCLGE_OPC_DFX_RCB_REG,
363 HCLGE_OPC_DFX_TQP_REG,
364 HCLGE_OPC_DFX_SSU_REG_2
367 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
369 #define HCLGE_MAC_CMD_NUM 21
371 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
372 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
377 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
378 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
380 dev_err(&hdev->pdev->dev,
381 "Get MAC pkt stats fail, status = %d.\n", ret);
386 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
387 /* for special opcode 0032, only the first desc has the head */
388 if (unlikely(i == 0)) {
389 desc_data = (__le64 *)(&desc[i].data[0]);
390 n = HCLGE_RD_FIRST_STATS_NUM;
392 desc_data = (__le64 *)(&desc[i]);
393 n = HCLGE_RD_OTHER_STATS_NUM;
396 for (k = 0; k < n; k++) {
397 *data += le64_to_cpu(*desc_data);
406 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
408 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
409 struct hclge_desc *desc;
414 /* This may be called inside atomic sections,
415 * so GFP_ATOMIC is more suitalbe here
417 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
421 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
422 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
428 for (i = 0; i < desc_num; i++) {
429 /* for special opcode 0034, only the first desc has the head */
431 desc_data = (__le64 *)(&desc[i].data[0]);
432 n = HCLGE_RD_FIRST_STATS_NUM;
434 desc_data = (__le64 *)(&desc[i]);
435 n = HCLGE_RD_OTHER_STATS_NUM;
438 for (k = 0; k < n; k++) {
439 *data += le64_to_cpu(*desc_data);
450 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
452 struct hclge_desc desc;
457 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
458 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
462 desc_data = (__le32 *)(&desc.data[0]);
463 reg_num = le32_to_cpu(*desc_data);
465 *desc_num = 1 + ((reg_num - 3) >> 2) +
466 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
471 static int hclge_mac_update_stats(struct hclge_dev *hdev)
476 ret = hclge_mac_query_reg_num(hdev, &desc_num);
478 /* The firmware supports the new statistics acquisition method */
480 ret = hclge_mac_update_stats_complete(hdev, desc_num);
481 else if (ret == -EOPNOTSUPP)
482 ret = hclge_mac_update_stats_defective(hdev);
484 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
489 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
491 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
492 struct hclge_vport *vport = hclge_get_vport(handle);
493 struct hclge_dev *hdev = vport->back;
494 struct hnae3_queue *queue;
495 struct hclge_desc desc[1];
496 struct hclge_tqp *tqp;
499 for (i = 0; i < kinfo->num_tqps; i++) {
500 queue = handle->kinfo.tqp[i];
501 tqp = container_of(queue, struct hclge_tqp, q);
502 /* command : HCLGE_OPC_QUERY_IGU_STAT */
503 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
506 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
507 ret = hclge_cmd_send(&hdev->hw, desc, 1);
509 dev_err(&hdev->pdev->dev,
510 "Query tqp stat fail, status = %d,queue = %d\n",
514 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
515 le32_to_cpu(desc[0].data[1]);
518 for (i = 0; i < kinfo->num_tqps; i++) {
519 queue = handle->kinfo.tqp[i];
520 tqp = container_of(queue, struct hclge_tqp, q);
521 /* command : HCLGE_OPC_QUERY_IGU_STAT */
522 hclge_cmd_setup_basic_desc(&desc[0],
523 HCLGE_OPC_QUERY_TX_STATUS,
526 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
527 ret = hclge_cmd_send(&hdev->hw, desc, 1);
529 dev_err(&hdev->pdev->dev,
530 "Query tqp stat fail, status = %d,queue = %d\n",
534 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
535 le32_to_cpu(desc[0].data[1]);
541 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_tqp *tqp;
548 for (i = 0; i < kinfo->num_tqps; i++) {
549 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
550 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
553 for (i = 0; i < kinfo->num_tqps; i++) {
554 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
555 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
561 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
563 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
565 /* each tqp has TX & RX two queues */
566 return kinfo->num_tqps * (2);
569 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
571 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
575 for (i = 0; i < kinfo->num_tqps; i++) {
576 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
577 struct hclge_tqp, q);
578 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
580 buff = buff + ETH_GSTRING_LEN;
583 for (i = 0; i < kinfo->num_tqps; i++) {
584 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
585 struct hclge_tqp, q);
586 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
588 buff = buff + ETH_GSTRING_LEN;
594 static u64 *hclge_comm_get_stats(const void *comm_stats,
595 const struct hclge_comm_stats_str strs[],
601 for (i = 0; i < size; i++)
602 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
607 static u8 *hclge_comm_get_strings(u32 stringset,
608 const struct hclge_comm_stats_str strs[],
611 char *buff = (char *)data;
614 if (stringset != ETH_SS_STATS)
617 for (i = 0; i < size; i++) {
618 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
619 buff = buff + ETH_GSTRING_LEN;
625 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
627 struct hnae3_handle *handle;
630 handle = &hdev->vport[0].nic;
631 if (handle->client) {
632 status = hclge_tqps_update_stats(handle);
634 dev_err(&hdev->pdev->dev,
635 "Update TQPS stats fail, status = %d.\n",
640 status = hclge_mac_update_stats(hdev);
642 dev_err(&hdev->pdev->dev,
643 "Update MAC stats fail, status = %d.\n", status);
646 static void hclge_update_stats(struct hnae3_handle *handle,
647 struct net_device_stats *net_stats)
649 struct hclge_vport *vport = hclge_get_vport(handle);
650 struct hclge_dev *hdev = vport->back;
653 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
656 status = hclge_mac_update_stats(hdev);
658 dev_err(&hdev->pdev->dev,
659 "Update MAC stats fail, status = %d.\n",
662 status = hclge_tqps_update_stats(handle);
664 dev_err(&hdev->pdev->dev,
665 "Update TQPS stats fail, status = %d.\n",
668 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
671 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
673 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
674 HNAE3_SUPPORT_PHY_LOOPBACK |\
675 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
676 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
678 struct hclge_vport *vport = hclge_get_vport(handle);
679 struct hclge_dev *hdev = vport->back;
682 /* Loopback test support rules:
683 * mac: only GE mode support
684 * serdes: all mac mode will support include GE/XGE/LGE/CGE
685 * phy: only support when phy device exist on board
687 if (stringset == ETH_SS_TEST) {
688 /* clear loopback bit flags at first */
689 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
690 if (hdev->pdev->revision >= 0x21 ||
691 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
692 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
693 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
695 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
699 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
700 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
701 } else if (stringset == ETH_SS_STATS) {
702 count = ARRAY_SIZE(g_mac_stats_string) +
703 hclge_tqps_get_sset_count(handle, stringset);
709 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
712 u8 *p = (char *)data;
715 if (stringset == ETH_SS_STATS) {
716 size = ARRAY_SIZE(g_mac_stats_string);
717 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
719 p = hclge_tqps_get_strings(handle, p);
720 } else if (stringset == ETH_SS_TEST) {
721 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
722 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
724 p += ETH_GSTRING_LEN;
726 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
727 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
729 p += ETH_GSTRING_LEN;
731 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
733 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
735 p += ETH_GSTRING_LEN;
737 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
738 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
740 p += ETH_GSTRING_LEN;
745 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
747 struct hclge_vport *vport = hclge_get_vport(handle);
748 struct hclge_dev *hdev = vport->back;
751 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
752 ARRAY_SIZE(g_mac_stats_string), data);
753 p = hclge_tqps_get_stats(handle, p);
756 static void hclge_get_mac_stat(struct hnae3_handle *handle,
757 struct hns3_mac_stats *mac_stats)
759 struct hclge_vport *vport = hclge_get_vport(handle);
760 struct hclge_dev *hdev = vport->back;
762 hclge_update_stats(handle, NULL);
764 mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
765 mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
768 static int hclge_parse_func_status(struct hclge_dev *hdev,
769 struct hclge_func_status_cmd *status)
771 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
774 /* Set the pf to main pf */
775 if (status->pf_state & HCLGE_PF_STATE_MAIN)
776 hdev->flag |= HCLGE_FLAG_MAIN;
778 hdev->flag &= ~HCLGE_FLAG_MAIN;
783 static int hclge_query_function_status(struct hclge_dev *hdev)
785 #define HCLGE_QUERY_MAX_CNT 5
787 struct hclge_func_status_cmd *req;
788 struct hclge_desc desc;
792 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
793 req = (struct hclge_func_status_cmd *)desc.data;
796 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
798 dev_err(&hdev->pdev->dev,
799 "query function status failed %d.\n", ret);
803 /* Check pf reset is done */
806 usleep_range(1000, 2000);
807 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
809 ret = hclge_parse_func_status(hdev, req);
814 static int hclge_query_pf_resource(struct hclge_dev *hdev)
816 struct hclge_pf_res_cmd *req;
817 struct hclge_desc desc;
820 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
821 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
823 dev_err(&hdev->pdev->dev,
824 "query pf resource failed %d.\n", ret);
828 req = (struct hclge_pf_res_cmd *)desc.data;
829 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
830 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
832 if (req->tx_buf_size)
834 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
836 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
838 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
840 if (req->dv_buf_size)
842 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
844 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
846 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
848 if (hnae3_dev_roce_supported(hdev)) {
849 hdev->roce_base_msix_offset =
850 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
851 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
853 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
854 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
856 /* PF should have NIC vectors and Roce vectors,
857 * NIC vectors are queued before Roce vectors.
859 hdev->num_msi = hdev->num_roce_msi +
860 hdev->roce_base_msix_offset;
863 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
864 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
870 static int hclge_parse_speed(int speed_cmd, int *speed)
874 *speed = HCLGE_MAC_SPEED_10M;
877 *speed = HCLGE_MAC_SPEED_100M;
880 *speed = HCLGE_MAC_SPEED_1G;
883 *speed = HCLGE_MAC_SPEED_10G;
886 *speed = HCLGE_MAC_SPEED_25G;
889 *speed = HCLGE_MAC_SPEED_40G;
892 *speed = HCLGE_MAC_SPEED_50G;
895 *speed = HCLGE_MAC_SPEED_100G;
904 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
906 struct hclge_vport *vport = hclge_get_vport(handle);
907 struct hclge_dev *hdev = vport->back;
908 u32 speed_ability = hdev->hw.mac.speed_ability;
912 case HCLGE_MAC_SPEED_10M:
913 speed_bit = HCLGE_SUPPORT_10M_BIT;
915 case HCLGE_MAC_SPEED_100M:
916 speed_bit = HCLGE_SUPPORT_100M_BIT;
918 case HCLGE_MAC_SPEED_1G:
919 speed_bit = HCLGE_SUPPORT_1G_BIT;
921 case HCLGE_MAC_SPEED_10G:
922 speed_bit = HCLGE_SUPPORT_10G_BIT;
924 case HCLGE_MAC_SPEED_25G:
925 speed_bit = HCLGE_SUPPORT_25G_BIT;
927 case HCLGE_MAC_SPEED_40G:
928 speed_bit = HCLGE_SUPPORT_40G_BIT;
930 case HCLGE_MAC_SPEED_50G:
931 speed_bit = HCLGE_SUPPORT_50G_BIT;
933 case HCLGE_MAC_SPEED_100G:
934 speed_bit = HCLGE_SUPPORT_100G_BIT;
940 if (speed_bit & speed_ability)
946 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
948 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
949 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
951 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
965 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
967 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
968 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
970 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
971 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
973 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
974 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
976 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
977 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
979 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
980 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
984 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
986 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
987 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
989 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
990 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
992 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
993 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
995 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
996 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
998 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
999 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1003 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1005 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1006 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1008 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1009 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1011 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1012 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1014 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1015 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1017 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1020 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1025 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1027 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1028 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1030 switch (mac->speed) {
1031 case HCLGE_MAC_SPEED_10G:
1032 case HCLGE_MAC_SPEED_40G:
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1036 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1038 case HCLGE_MAC_SPEED_25G:
1039 case HCLGE_MAC_SPEED_50G:
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1043 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1044 BIT(HNAE3_FEC_AUTO);
1046 case HCLGE_MAC_SPEED_100G:
1047 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1048 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1051 mac->fec_ability = 0;
1056 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1059 struct hclge_mac *mac = &hdev->hw.mac;
1061 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1065 hclge_convert_setting_sr(mac, speed_ability);
1066 hclge_convert_setting_lr(mac, speed_ability);
1067 hclge_convert_setting_cr(mac, speed_ability);
1068 if (hdev->pdev->revision >= 0x21)
1069 hclge_convert_setting_fec(mac);
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1073 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1076 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1079 struct hclge_mac *mac = &hdev->hw.mac;
1081 hclge_convert_setting_kr(mac, speed_ability);
1082 if (hdev->pdev->revision >= 0x21)
1083 hclge_convert_setting_fec(mac);
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1085 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1089 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1092 unsigned long *supported = hdev->hw.mac.supported;
1094 /* default to support all speed for GE port */
1096 speed_ability = HCLGE_SUPPORT_GE;
1098 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1105 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1109 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1111 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1114 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1117 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1120 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1122 u8 media_type = hdev->hw.mac.media_type;
1124 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1125 hclge_parse_fiber_link_mode(hdev, speed_ability);
1126 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1127 hclge_parse_copper_link_mode(hdev, speed_ability);
1128 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1129 hclge_parse_backplane_link_mode(hdev, speed_ability);
1132 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1134 struct hclge_cfg_param_cmd *req;
1135 u64 mac_addr_tmp_high;
1139 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1141 /* get the configuration */
1142 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1145 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1146 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1147 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1148 HCLGE_CFG_TQP_DESC_N_M,
1149 HCLGE_CFG_TQP_DESC_N_S);
1151 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1152 HCLGE_CFG_PHY_ADDR_M,
1153 HCLGE_CFG_PHY_ADDR_S);
1154 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1155 HCLGE_CFG_MEDIA_TP_M,
1156 HCLGE_CFG_MEDIA_TP_S);
1157 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1158 HCLGE_CFG_RX_BUF_LEN_M,
1159 HCLGE_CFG_RX_BUF_LEN_S);
1160 /* get mac_address */
1161 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1162 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1163 HCLGE_CFG_MAC_ADDR_H_M,
1164 HCLGE_CFG_MAC_ADDR_H_S);
1166 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1168 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1169 HCLGE_CFG_DEFAULT_SPEED_M,
1170 HCLGE_CFG_DEFAULT_SPEED_S);
1171 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1172 HCLGE_CFG_RSS_SIZE_M,
1173 HCLGE_CFG_RSS_SIZE_S);
1175 for (i = 0; i < ETH_ALEN; i++)
1176 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1178 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1179 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1181 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1182 HCLGE_CFG_SPEED_ABILITY_M,
1183 HCLGE_CFG_SPEED_ABILITY_S);
1184 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1185 HCLGE_CFG_UMV_TBL_SPACE_M,
1186 HCLGE_CFG_UMV_TBL_SPACE_S);
1187 if (!cfg->umv_space)
1188 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1191 /* hclge_get_cfg: query the static parameter from flash
1192 * @hdev: pointer to struct hclge_dev
1193 * @hcfg: the config structure to be getted
1195 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1197 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1198 struct hclge_cfg_param_cmd *req;
1202 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1205 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1206 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1208 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1209 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1210 /* Len should be united by 4 bytes when send to hardware */
1211 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1212 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1213 req->offset = cpu_to_le32(offset);
1216 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1218 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1222 hclge_parse_cfg(hcfg, desc);
1227 static int hclge_get_cap(struct hclge_dev *hdev)
1231 ret = hclge_query_function_status(hdev);
1233 dev_err(&hdev->pdev->dev,
1234 "query function status error %d.\n", ret);
1238 /* get pf resource */
1239 ret = hclge_query_pf_resource(hdev);
1241 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1246 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1248 #define HCLGE_MIN_TX_DESC 64
1249 #define HCLGE_MIN_RX_DESC 64
1251 if (!is_kdump_kernel())
1254 dev_info(&hdev->pdev->dev,
1255 "Running kdump kernel. Using minimal resources\n");
1257 /* minimal queue pairs equals to the number of vports */
1258 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1259 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1260 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1263 static int hclge_configure(struct hclge_dev *hdev)
1265 struct hclge_cfg cfg;
1269 ret = hclge_get_cfg(hdev, &cfg);
1271 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1275 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1276 hdev->base_tqp_pid = 0;
1277 hdev->rss_size_max = cfg.rss_size_max;
1278 hdev->rx_buf_len = cfg.rx_buf_len;
1279 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1280 hdev->hw.mac.media_type = cfg.media_type;
1281 hdev->hw.mac.phy_addr = cfg.phy_addr;
1282 hdev->num_tx_desc = cfg.tqp_desc_num;
1283 hdev->num_rx_desc = cfg.tqp_desc_num;
1284 hdev->tm_info.num_pg = 1;
1285 hdev->tc_max = cfg.tc_num;
1286 hdev->tm_info.hw_pfc_map = 0;
1287 hdev->wanted_umv_size = cfg.umv_space;
1289 if (hnae3_dev_fd_supported(hdev)) {
1291 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1294 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1296 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1300 hclge_parse_link_mode(hdev, cfg.speed_ability);
1302 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1303 (hdev->tc_max < 1)) {
1304 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1309 /* Dev does not support DCB */
1310 if (!hnae3_dev_dcb_supported(hdev)) {
1314 hdev->pfc_max = hdev->tc_max;
1317 hdev->tm_info.num_tc = 1;
1319 /* Currently not support uncontiuous tc */
1320 for (i = 0; i < hdev->tm_info.num_tc; i++)
1321 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1323 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1325 hclge_init_kdump_kernel_config(hdev);
1327 /* Set the init affinity based on pci func number */
1328 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1329 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1330 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1331 &hdev->affinity_mask);
1336 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1337 unsigned int tso_mss_max)
1339 struct hclge_cfg_tso_status_cmd *req;
1340 struct hclge_desc desc;
1343 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1345 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1348 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1349 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1350 req->tso_mss_min = cpu_to_le16(tso_mss);
1353 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1354 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1355 req->tso_mss_max = cpu_to_le16(tso_mss);
1357 return hclge_cmd_send(&hdev->hw, &desc, 1);
1360 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1362 struct hclge_cfg_gro_status_cmd *req;
1363 struct hclge_desc desc;
1366 if (!hnae3_dev_gro_supported(hdev))
1369 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1370 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1372 req->gro_en = cpu_to_le16(en ? 1 : 0);
1374 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1376 dev_err(&hdev->pdev->dev,
1377 "GRO hardware config cmd failed, ret = %d\n", ret);
1382 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1384 struct hclge_tqp *tqp;
1387 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1388 sizeof(struct hclge_tqp), GFP_KERNEL);
1394 for (i = 0; i < hdev->num_tqps; i++) {
1395 tqp->dev = &hdev->pdev->dev;
1398 tqp->q.ae_algo = &ae_algo;
1399 tqp->q.buf_size = hdev->rx_buf_len;
1400 tqp->q.tx_desc_num = hdev->num_tx_desc;
1401 tqp->q.rx_desc_num = hdev->num_rx_desc;
1402 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1403 i * HCLGE_TQP_REG_SIZE;
1411 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1412 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1414 struct hclge_tqp_map_cmd *req;
1415 struct hclge_desc desc;
1418 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1420 req = (struct hclge_tqp_map_cmd *)desc.data;
1421 req->tqp_id = cpu_to_le16(tqp_pid);
1422 req->tqp_vf = func_id;
1423 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1425 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1426 req->tqp_vid = cpu_to_le16(tqp_vid);
1428 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1430 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1435 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1437 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1438 struct hclge_dev *hdev = vport->back;
1441 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1442 alloced < num_tqps; i++) {
1443 if (!hdev->htqp[i].alloced) {
1444 hdev->htqp[i].q.handle = &vport->nic;
1445 hdev->htqp[i].q.tqp_index = alloced;
1446 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1447 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1448 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1449 hdev->htqp[i].alloced = true;
1453 vport->alloc_tqps = alloced;
1454 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1455 vport->alloc_tqps / hdev->tm_info.num_tc);
1460 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1461 u16 num_tx_desc, u16 num_rx_desc)
1464 struct hnae3_handle *nic = &vport->nic;
1465 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1466 struct hclge_dev *hdev = vport->back;
1469 kinfo->num_tx_desc = num_tx_desc;
1470 kinfo->num_rx_desc = num_rx_desc;
1472 kinfo->rx_buf_len = hdev->rx_buf_len;
1474 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1475 sizeof(struct hnae3_queue *), GFP_KERNEL);
1479 ret = hclge_assign_tqp(vport, num_tqps);
1481 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1486 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1487 struct hclge_vport *vport)
1489 struct hnae3_handle *nic = &vport->nic;
1490 struct hnae3_knic_private_info *kinfo;
1493 kinfo = &nic->kinfo;
1494 for (i = 0; i < vport->alloc_tqps; i++) {
1495 struct hclge_tqp *q =
1496 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1500 is_pf = !(vport->vport_id);
1501 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1510 static int hclge_map_tqp(struct hclge_dev *hdev)
1512 struct hclge_vport *vport = hdev->vport;
1515 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1516 for (i = 0; i < num_vport; i++) {
1519 ret = hclge_map_tqp_to_vport(hdev, vport);
1529 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1531 struct hnae3_handle *nic = &vport->nic;
1532 struct hclge_dev *hdev = vport->back;
1535 nic->pdev = hdev->pdev;
1536 nic->ae_algo = &ae_algo;
1537 nic->numa_node_mask = hdev->numa_node_mask;
1539 ret = hclge_knic_setup(vport, num_tqps,
1540 hdev->num_tx_desc, hdev->num_rx_desc);
1542 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1547 static int hclge_alloc_vport(struct hclge_dev *hdev)
1549 struct pci_dev *pdev = hdev->pdev;
1550 struct hclge_vport *vport;
1556 /* We need to alloc a vport for main NIC of PF */
1557 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1559 if (hdev->num_tqps < num_vport) {
1560 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1561 hdev->num_tqps, num_vport);
1565 /* Alloc the same number of TQPs for every vport */
1566 tqp_per_vport = hdev->num_tqps / num_vport;
1567 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1569 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1574 hdev->vport = vport;
1575 hdev->num_alloc_vport = num_vport;
1577 if (IS_ENABLED(CONFIG_PCI_IOV))
1578 hdev->num_alloc_vfs = hdev->num_req_vfs;
1580 for (i = 0; i < num_vport; i++) {
1582 vport->vport_id = i;
1583 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1584 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1585 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1586 INIT_LIST_HEAD(&vport->vlan_list);
1587 INIT_LIST_HEAD(&vport->uc_mac_list);
1588 INIT_LIST_HEAD(&vport->mc_mac_list);
1591 ret = hclge_vport_setup(vport, tqp_main_vport);
1593 ret = hclge_vport_setup(vport, tqp_per_vport);
1596 "vport setup failed for vport %d, %d\n",
1607 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 /* TX buffer size is unit by 128 byte */
1611 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1612 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1613 struct hclge_tx_buff_alloc_cmd *req;
1614 struct hclge_desc desc;
1618 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1620 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1621 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1622 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1624 req->tx_pkt_buff[i] =
1625 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1626 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1629 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1631 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1637 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1638 struct hclge_pkt_buf_alloc *buf_alloc)
1640 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1643 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1648 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1653 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1654 if (hdev->hw_tc_map & BIT(i))
1659 /* Get the number of pfc enabled TCs, which have private buffer */
1660 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1661 struct hclge_pkt_buf_alloc *buf_alloc)
1663 struct hclge_priv_buf *priv;
1667 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1668 priv = &buf_alloc->priv_buf[i];
1669 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1677 /* Get the number of pfc disabled TCs, which have private buffer */
1678 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1679 struct hclge_pkt_buf_alloc *buf_alloc)
1681 struct hclge_priv_buf *priv;
1685 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1686 priv = &buf_alloc->priv_buf[i];
1687 if (hdev->hw_tc_map & BIT(i) &&
1688 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1696 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1698 struct hclge_priv_buf *priv;
1702 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1703 priv = &buf_alloc->priv_buf[i];
1705 rx_priv += priv->buf_size;
1710 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1712 u32 i, total_tx_size = 0;
1714 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1715 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1717 return total_tx_size;
1720 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1721 struct hclge_pkt_buf_alloc *buf_alloc,
1724 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1725 u32 tc_num = hclge_get_tc_num(hdev);
1726 u32 shared_buf, aligned_mps;
1730 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1732 if (hnae3_dev_dcb_supported(hdev))
1733 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1736 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1737 + hdev->dv_buf_size;
1739 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1740 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1741 HCLGE_BUF_SIZE_UNIT);
1743 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1744 if (rx_all < rx_priv + shared_std)
1747 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1748 buf_alloc->s_buf.buf_size = shared_buf;
1749 if (hnae3_dev_dcb_supported(hdev)) {
1750 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1751 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1752 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1753 HCLGE_BUF_SIZE_UNIT);
1755 buf_alloc->s_buf.self.high = aligned_mps +
1756 HCLGE_NON_DCB_ADDITIONAL_BUF;
1757 buf_alloc->s_buf.self.low = aligned_mps;
1760 if (hnae3_dev_dcb_supported(hdev)) {
1761 hi_thrd = shared_buf - hdev->dv_buf_size;
1763 if (tc_num <= NEED_RESERVE_TC_NUM)
1764 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1768 hi_thrd = hi_thrd / tc_num;
1770 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1771 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1772 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1774 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1775 lo_thrd = aligned_mps;
1778 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1779 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1780 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1786 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1787 struct hclge_pkt_buf_alloc *buf_alloc)
1791 total_size = hdev->pkt_buf_size;
1793 /* alloc tx buffer for all enabled tc */
1794 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1795 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 if (hdev->hw_tc_map & BIT(i)) {
1798 if (total_size < hdev->tx_buf_size)
1801 priv->tx_buf_size = hdev->tx_buf_size;
1803 priv->tx_buf_size = 0;
1806 total_size -= priv->tx_buf_size;
1812 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1813 struct hclge_pkt_buf_alloc *buf_alloc)
1815 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1816 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1819 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1820 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1827 if (!(hdev->hw_tc_map & BIT(i)))
1832 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1833 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1834 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1835 HCLGE_BUF_SIZE_UNIT);
1838 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1842 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1845 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1848 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1849 struct hclge_pkt_buf_alloc *buf_alloc)
1851 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1852 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1855 /* let the last to be cleared first */
1856 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1857 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1858 unsigned int mask = BIT((unsigned int)i);
1860 if (hdev->hw_tc_map & mask &&
1861 !(hdev->tm_info.hw_pfc_map & mask)) {
1862 /* Clear the no pfc TC private buffer */
1870 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1871 no_pfc_priv_num == 0)
1875 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1878 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1879 struct hclge_pkt_buf_alloc *buf_alloc)
1881 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1882 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1885 /* let the last to be cleared first */
1886 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1887 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1888 unsigned int mask = BIT((unsigned int)i);
1890 if (hdev->hw_tc_map & mask &&
1891 hdev->tm_info.hw_pfc_map & mask) {
1892 /* Reduce the number of pfc TC with private buffer */
1900 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1905 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1908 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1909 struct hclge_pkt_buf_alloc *buf_alloc)
1911 #define COMPENSATE_BUFFER 0x3C00
1912 #define COMPENSATE_HALF_MPS_NUM 5
1913 #define PRIV_WL_GAP 0x1800
1915 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1916 u32 tc_num = hclge_get_tc_num(hdev);
1917 u32 half_mps = hdev->mps >> 1;
1922 rx_priv = rx_priv / tc_num;
1924 if (tc_num <= NEED_RESERVE_TC_NUM)
1925 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1927 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1928 COMPENSATE_HALF_MPS_NUM * half_mps;
1929 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1930 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1932 if (rx_priv < min_rx_priv)
1935 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1936 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1943 if (!(hdev->hw_tc_map & BIT(i)))
1947 priv->buf_size = rx_priv;
1948 priv->wl.high = rx_priv - hdev->dv_buf_size;
1949 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1952 buf_alloc->s_buf.buf_size = 0;
1957 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1958 * @hdev: pointer to struct hclge_dev
1959 * @buf_alloc: pointer to buffer calculation data
1960 * @return: 0: calculate sucessful, negative: fail
1962 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1963 struct hclge_pkt_buf_alloc *buf_alloc)
1965 /* When DCB is not supported, rx private buffer is not allocated. */
1966 if (!hnae3_dev_dcb_supported(hdev)) {
1967 u32 rx_all = hdev->pkt_buf_size;
1969 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1970 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1976 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1979 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1982 /* try to decrease the buffer size */
1983 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1986 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1989 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1995 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1996 struct hclge_pkt_buf_alloc *buf_alloc)
1998 struct hclge_rx_priv_buff_cmd *req;
1999 struct hclge_desc desc;
2003 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2004 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2006 /* Alloc private buffer TCs */
2007 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2008 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2011 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2013 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2017 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2018 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2020 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2022 dev_err(&hdev->pdev->dev,
2023 "rx private buffer alloc cmd failed %d\n", ret);
2028 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2029 struct hclge_pkt_buf_alloc *buf_alloc)
2031 struct hclge_rx_priv_wl_buf *req;
2032 struct hclge_priv_buf *priv;
2033 struct hclge_desc desc[2];
2037 for (i = 0; i < 2; i++) {
2038 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2040 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2042 /* The first descriptor set the NEXT bit to 1 */
2044 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2046 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2048 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2049 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2051 priv = &buf_alloc->priv_buf[idx];
2052 req->tc_wl[j].high =
2053 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2054 req->tc_wl[j].high |=
2055 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2057 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2058 req->tc_wl[j].low |=
2059 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2063 /* Send 2 descriptor at one time */
2064 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2066 dev_err(&hdev->pdev->dev,
2067 "rx private waterline config cmd failed %d\n",
2072 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2073 struct hclge_pkt_buf_alloc *buf_alloc)
2075 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2076 struct hclge_rx_com_thrd *req;
2077 struct hclge_desc desc[2];
2078 struct hclge_tc_thrd *tc;
2082 for (i = 0; i < 2; i++) {
2083 hclge_cmd_setup_basic_desc(&desc[i],
2084 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2085 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2087 /* The first descriptor set the NEXT bit to 1 */
2089 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2091 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2093 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2094 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2096 req->com_thrd[j].high =
2097 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2098 req->com_thrd[j].high |=
2099 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2100 req->com_thrd[j].low =
2101 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2102 req->com_thrd[j].low |=
2103 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2107 /* Send 2 descriptors at one time */
2108 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2110 dev_err(&hdev->pdev->dev,
2111 "common threshold config cmd failed %d\n", ret);
2115 static int hclge_common_wl_config(struct hclge_dev *hdev,
2116 struct hclge_pkt_buf_alloc *buf_alloc)
2118 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2119 struct hclge_rx_com_wl *req;
2120 struct hclge_desc desc;
2123 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2125 req = (struct hclge_rx_com_wl *)desc.data;
2126 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2127 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2129 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2130 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2132 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2134 dev_err(&hdev->pdev->dev,
2135 "common waterline config cmd failed %d\n", ret);
2140 int hclge_buffer_alloc(struct hclge_dev *hdev)
2142 struct hclge_pkt_buf_alloc *pkt_buf;
2145 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2149 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2151 dev_err(&hdev->pdev->dev,
2152 "could not calc tx buffer size for all TCs %d\n", ret);
2156 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2158 dev_err(&hdev->pdev->dev,
2159 "could not alloc tx buffers %d\n", ret);
2163 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2165 dev_err(&hdev->pdev->dev,
2166 "could not calc rx priv buffer size for all TCs %d\n",
2171 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2173 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2178 if (hnae3_dev_dcb_supported(hdev)) {
2179 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2181 dev_err(&hdev->pdev->dev,
2182 "could not configure rx private waterline %d\n",
2187 ret = hclge_common_thrd_config(hdev, pkt_buf);
2189 dev_err(&hdev->pdev->dev,
2190 "could not configure common threshold %d\n",
2196 ret = hclge_common_wl_config(hdev, pkt_buf);
2198 dev_err(&hdev->pdev->dev,
2199 "could not configure common waterline %d\n", ret);
2206 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2208 struct hnae3_handle *roce = &vport->roce;
2209 struct hnae3_handle *nic = &vport->nic;
2211 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2213 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2214 vport->back->num_msi_left == 0)
2217 roce->rinfo.base_vector = vport->back->roce_base_vector;
2219 roce->rinfo.netdev = nic->kinfo.netdev;
2220 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2222 roce->pdev = nic->pdev;
2223 roce->ae_algo = nic->ae_algo;
2224 roce->numa_node_mask = nic->numa_node_mask;
2229 static int hclge_init_msi(struct hclge_dev *hdev)
2231 struct pci_dev *pdev = hdev->pdev;
2235 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2236 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2239 "failed(%d) to allocate MSI/MSI-X vectors\n",
2243 if (vectors < hdev->num_msi)
2244 dev_warn(&hdev->pdev->dev,
2245 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2246 hdev->num_msi, vectors);
2248 hdev->num_msi = vectors;
2249 hdev->num_msi_left = vectors;
2250 hdev->base_msi_vector = pdev->irq;
2251 hdev->roce_base_vector = hdev->base_msi_vector +
2252 hdev->roce_base_msix_offset;
2254 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2255 sizeof(u16), GFP_KERNEL);
2256 if (!hdev->vector_status) {
2257 pci_free_irq_vectors(pdev);
2261 for (i = 0; i < hdev->num_msi; i++)
2262 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2264 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2265 sizeof(int), GFP_KERNEL);
2266 if (!hdev->vector_irq) {
2267 pci_free_irq_vectors(pdev);
2274 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2276 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2277 duplex = HCLGE_MAC_FULL;
2282 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2285 struct hclge_config_mac_speed_dup_cmd *req;
2286 struct hclge_desc desc;
2289 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2294 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2297 case HCLGE_MAC_SPEED_10M:
2298 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2299 HCLGE_CFG_SPEED_S, 6);
2301 case HCLGE_MAC_SPEED_100M:
2302 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2303 HCLGE_CFG_SPEED_S, 7);
2305 case HCLGE_MAC_SPEED_1G:
2306 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2307 HCLGE_CFG_SPEED_S, 0);
2309 case HCLGE_MAC_SPEED_10G:
2310 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2311 HCLGE_CFG_SPEED_S, 1);
2313 case HCLGE_MAC_SPEED_25G:
2314 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2315 HCLGE_CFG_SPEED_S, 2);
2317 case HCLGE_MAC_SPEED_40G:
2318 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2319 HCLGE_CFG_SPEED_S, 3);
2321 case HCLGE_MAC_SPEED_50G:
2322 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2323 HCLGE_CFG_SPEED_S, 4);
2325 case HCLGE_MAC_SPEED_100G:
2326 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2327 HCLGE_CFG_SPEED_S, 5);
2330 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2334 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2337 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2339 dev_err(&hdev->pdev->dev,
2340 "mac speed/duplex config cmd failed %d.\n", ret);
2347 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2351 duplex = hclge_check_speed_dup(duplex, speed);
2352 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2355 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2359 hdev->hw.mac.speed = speed;
2360 hdev->hw.mac.duplex = duplex;
2365 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2368 struct hclge_vport *vport = hclge_get_vport(handle);
2369 struct hclge_dev *hdev = vport->back;
2371 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2374 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2376 struct hclge_config_auto_neg_cmd *req;
2377 struct hclge_desc desc;
2381 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2383 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2385 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2386 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2388 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2390 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2396 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2398 struct hclge_vport *vport = hclge_get_vport(handle);
2399 struct hclge_dev *hdev = vport->back;
2401 if (!hdev->hw.mac.support_autoneg) {
2403 dev_err(&hdev->pdev->dev,
2404 "autoneg is not supported by current port\n");
2411 return hclge_set_autoneg_en(hdev, enable);
2414 static int hclge_get_autoneg(struct hnae3_handle *handle)
2416 struct hclge_vport *vport = hclge_get_vport(handle);
2417 struct hclge_dev *hdev = vport->back;
2418 struct phy_device *phydev = hdev->hw.mac.phydev;
2421 return phydev->autoneg;
2423 return hdev->hw.mac.autoneg;
2426 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2428 struct hclge_vport *vport = hclge_get_vport(handle);
2429 struct hclge_dev *hdev = vport->back;
2432 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2434 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2437 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2440 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2442 struct hclge_vport *vport = hclge_get_vport(handle);
2443 struct hclge_dev *hdev = vport->back;
2445 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2446 return hclge_set_autoneg_en(hdev, !halt);
2451 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2453 struct hclge_config_fec_cmd *req;
2454 struct hclge_desc desc;
2457 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2459 req = (struct hclge_config_fec_cmd *)desc.data;
2460 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2461 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2462 if (fec_mode & BIT(HNAE3_FEC_RS))
2463 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2464 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2465 if (fec_mode & BIT(HNAE3_FEC_BASER))
2466 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2467 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2469 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2471 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2476 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2478 struct hclge_vport *vport = hclge_get_vport(handle);
2479 struct hclge_dev *hdev = vport->back;
2480 struct hclge_mac *mac = &hdev->hw.mac;
2483 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2484 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2488 ret = hclge_set_fec_hw(hdev, fec_mode);
2492 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2496 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2499 struct hclge_vport *vport = hclge_get_vport(handle);
2500 struct hclge_dev *hdev = vport->back;
2501 struct hclge_mac *mac = &hdev->hw.mac;
2504 *fec_ability = mac->fec_ability;
2506 *fec_mode = mac->fec_mode;
2509 static int hclge_mac_init(struct hclge_dev *hdev)
2511 struct hclge_mac *mac = &hdev->hw.mac;
2514 hdev->support_sfp_query = true;
2515 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2516 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2517 hdev->hw.mac.duplex);
2519 dev_err(&hdev->pdev->dev,
2520 "Config mac speed dup fail ret=%d\n", ret);
2524 if (hdev->hw.mac.support_autoneg) {
2525 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2527 dev_err(&hdev->pdev->dev,
2528 "Config mac autoneg fail ret=%d\n", ret);
2535 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2536 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2538 dev_err(&hdev->pdev->dev,
2539 "Fec mode init fail, ret = %d\n", ret);
2544 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2546 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2550 ret = hclge_buffer_alloc(hdev);
2552 dev_err(&hdev->pdev->dev,
2553 "allocate buffer fail, ret=%d\n", ret);
2558 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2560 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2561 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2562 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2563 &hdev->mbx_service_task);
2566 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2568 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2569 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2570 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2571 &hdev->rst_service_task);
2574 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2576 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2577 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2578 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2579 hdev->hw_stats.stats_timer++;
2580 hdev->fd_arfs_expire_timer++;
2581 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2582 system_wq, &hdev->service_task,
2587 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2589 struct hclge_link_status_cmd *req;
2590 struct hclge_desc desc;
2594 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2595 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2597 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2602 req = (struct hclge_link_status_cmd *)desc.data;
2603 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2605 return !!link_status;
2608 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2610 unsigned int mac_state;
2613 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2616 mac_state = hclge_get_mac_link_status(hdev);
2618 if (hdev->hw.mac.phydev) {
2619 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2620 link_stat = mac_state &
2621 hdev->hw.mac.phydev->link;
2626 link_stat = mac_state;
2632 static void hclge_update_link_status(struct hclge_dev *hdev)
2634 struct hnae3_client *rclient = hdev->roce_client;
2635 struct hnae3_client *client = hdev->nic_client;
2636 struct hnae3_handle *rhandle;
2637 struct hnae3_handle *handle;
2643 state = hclge_get_mac_phy_link(hdev);
2644 if (state != hdev->hw.mac.link) {
2645 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2646 handle = &hdev->vport[i].nic;
2647 client->ops->link_status_change(handle, state);
2648 hclge_config_mac_tnl_int(hdev, state);
2649 rhandle = &hdev->vport[i].roce;
2650 if (rclient && rclient->ops->link_status_change)
2651 rclient->ops->link_status_change(rhandle,
2654 hdev->hw.mac.link = state;
2658 static void hclge_update_port_capability(struct hclge_mac *mac)
2660 /* update fec ability by speed */
2661 hclge_convert_setting_fec(mac);
2663 /* firmware can not identify back plane type, the media type
2664 * read from configuration can help deal it
2666 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2667 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2668 mac->module_type = HNAE3_MODULE_TYPE_KR;
2669 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2670 mac->module_type = HNAE3_MODULE_TYPE_TP;
2672 if (mac->support_autoneg == true) {
2673 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2674 linkmode_copy(mac->advertising, mac->supported);
2676 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2678 linkmode_zero(mac->advertising);
2682 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2684 struct hclge_sfp_info_cmd *resp;
2685 struct hclge_desc desc;
2688 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2689 resp = (struct hclge_sfp_info_cmd *)desc.data;
2690 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2691 if (ret == -EOPNOTSUPP) {
2692 dev_warn(&hdev->pdev->dev,
2693 "IMP do not support get SFP speed %d\n", ret);
2696 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2700 *speed = le32_to_cpu(resp->speed);
2705 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2707 struct hclge_sfp_info_cmd *resp;
2708 struct hclge_desc desc;
2711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2712 resp = (struct hclge_sfp_info_cmd *)desc.data;
2714 resp->query_type = QUERY_ACTIVE_SPEED;
2716 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2717 if (ret == -EOPNOTSUPP) {
2718 dev_warn(&hdev->pdev->dev,
2719 "IMP does not support get SFP info %d\n", ret);
2722 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2726 mac->speed = le32_to_cpu(resp->speed);
2727 /* if resp->speed_ability is 0, it means it's an old version
2728 * firmware, do not update these params
2730 if (resp->speed_ability) {
2731 mac->module_type = le32_to_cpu(resp->module_type);
2732 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2733 mac->autoneg = resp->autoneg;
2734 mac->support_autoneg = resp->autoneg_ability;
2735 mac->speed_type = QUERY_ACTIVE_SPEED;
2736 if (!resp->active_fec)
2739 mac->fec_mode = BIT(resp->active_fec);
2741 mac->speed_type = QUERY_SFP_SPEED;
2747 static int hclge_update_port_info(struct hclge_dev *hdev)
2749 struct hclge_mac *mac = &hdev->hw.mac;
2750 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2753 /* get the port info from SFP cmd if not copper port */
2754 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2757 /* if IMP does not support get SFP/qSFP info, return directly */
2758 if (!hdev->support_sfp_query)
2761 if (hdev->pdev->revision >= 0x21)
2762 ret = hclge_get_sfp_info(hdev, mac);
2764 ret = hclge_get_sfp_speed(hdev, &speed);
2766 if (ret == -EOPNOTSUPP) {
2767 hdev->support_sfp_query = false;
2773 if (hdev->pdev->revision >= 0x21) {
2774 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2775 hclge_update_port_capability(mac);
2778 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2781 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2782 return 0; /* do nothing if no SFP */
2784 /* must config full duplex for SFP */
2785 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2789 static int hclge_get_status(struct hnae3_handle *handle)
2791 struct hclge_vport *vport = hclge_get_vport(handle);
2792 struct hclge_dev *hdev = vport->back;
2794 hclge_update_link_status(hdev);
2796 return hdev->hw.mac.link;
2799 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2801 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2803 /* fetch the events from their corresponding regs */
2804 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2805 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2806 msix_src_reg = hclge_read_dev(&hdev->hw,
2807 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2809 /* Assumption: If by any chance reset and mailbox events are reported
2810 * together then we will only process reset event in this go and will
2811 * defer the processing of the mailbox events. Since, we would have not
2812 * cleared RX CMDQ event this time we would receive again another
2813 * interrupt from H/W just for the mailbox.
2815 * check for vector0 reset event sources
2817 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2818 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2819 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2820 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2821 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2822 hdev->rst_stats.imp_rst_cnt++;
2823 return HCLGE_VECTOR0_EVENT_RST;
2826 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2827 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2828 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2829 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2830 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2831 hdev->rst_stats.global_rst_cnt++;
2832 return HCLGE_VECTOR0_EVENT_RST;
2835 /* check for vector0 msix event source */
2836 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2837 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2839 *clearval = msix_src_reg;
2840 return HCLGE_VECTOR0_EVENT_ERR;
2843 /* check for vector0 mailbox(=CMDQ RX) event source */
2844 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2845 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2846 *clearval = cmdq_src_reg;
2847 return HCLGE_VECTOR0_EVENT_MBX;
2850 /* print other vector0 event source */
2851 dev_info(&hdev->pdev->dev,
2852 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2853 cmdq_src_reg, msix_src_reg);
2854 *clearval = msix_src_reg;
2856 return HCLGE_VECTOR0_EVENT_OTHER;
2859 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2862 switch (event_type) {
2863 case HCLGE_VECTOR0_EVENT_RST:
2864 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2866 case HCLGE_VECTOR0_EVENT_MBX:
2867 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2874 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2876 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2877 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2878 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2879 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2880 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2883 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2885 writel(enable ? 1 : 0, vector->addr);
2888 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2890 struct hclge_dev *hdev = data;
2894 hclge_enable_vector(&hdev->misc_vector, false);
2895 event_cause = hclge_check_event_cause(hdev, &clearval);
2897 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2898 switch (event_cause) {
2899 case HCLGE_VECTOR0_EVENT_ERR:
2900 /* we do not know what type of reset is required now. This could
2901 * only be decided after we fetch the type of errors which
2902 * caused this event. Therefore, we will do below for now:
2903 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2904 * have defered type of reset to be used.
2905 * 2. Schedule the reset serivce task.
2906 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2907 * will fetch the correct type of reset. This would be done
2908 * by first decoding the types of errors.
2910 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2912 case HCLGE_VECTOR0_EVENT_RST:
2913 hclge_reset_task_schedule(hdev);
2915 case HCLGE_VECTOR0_EVENT_MBX:
2916 /* If we are here then,
2917 * 1. Either we are not handling any mbx task and we are not
2920 * 2. We could be handling a mbx task but nothing more is
2922 * In both cases, we should schedule mbx task as there are more
2923 * mbx messages reported by this interrupt.
2925 hclge_mbx_task_schedule(hdev);
2928 dev_warn(&hdev->pdev->dev,
2929 "received unknown or unhandled event of vector0\n");
2933 hclge_clear_event_cause(hdev, event_cause, clearval);
2935 /* Enable interrupt if it is not cause by reset. And when
2936 * clearval equal to 0, it means interrupt status may be
2937 * cleared by hardware before driver reads status register.
2938 * For this case, vector0 interrupt also should be enabled.
2941 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2942 hclge_enable_vector(&hdev->misc_vector, true);
2948 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2950 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2951 dev_warn(&hdev->pdev->dev,
2952 "vector(vector_id %d) has been freed.\n", vector_id);
2956 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2957 hdev->num_msi_left += 1;
2958 hdev->num_msi_used -= 1;
2961 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2963 struct hclge_misc_vector *vector = &hdev->misc_vector;
2965 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2967 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2968 hdev->vector_status[0] = 0;
2970 hdev->num_msi_left -= 1;
2971 hdev->num_msi_used += 1;
2974 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
2975 const cpumask_t *mask)
2977 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
2980 cpumask_copy(&hdev->affinity_mask, mask);
2983 static void hclge_irq_affinity_release(struct kref *ref)
2987 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
2989 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
2990 &hdev->affinity_mask);
2992 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
2993 hdev->affinity_notify.release = hclge_irq_affinity_release;
2994 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
2995 &hdev->affinity_notify);
2998 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3000 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3001 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3004 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3008 hclge_get_misc_vector(hdev);
3010 /* this would be explicitly freed in the end */
3011 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3012 0, "hclge_misc", hdev);
3014 hclge_free_vector(hdev, 0);
3015 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3016 hdev->misc_vector.vector_irq);
3022 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3024 free_irq(hdev->misc_vector.vector_irq, hdev);
3025 hclge_free_vector(hdev, 0);
3028 int hclge_notify_client(struct hclge_dev *hdev,
3029 enum hnae3_reset_notify_type type)
3031 struct hnae3_client *client = hdev->nic_client;
3034 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3037 if (!client->ops->reset_notify)
3040 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3041 struct hnae3_handle *handle = &hdev->vport[i].nic;
3044 ret = client->ops->reset_notify(handle, type);
3046 dev_err(&hdev->pdev->dev,
3047 "notify nic client failed %d(%d)\n", type, ret);
3055 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3056 enum hnae3_reset_notify_type type)
3058 struct hnae3_client *client = hdev->roce_client;
3062 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3065 if (!client->ops->reset_notify)
3068 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3069 struct hnae3_handle *handle = &hdev->vport[i].roce;
3071 ret = client->ops->reset_notify(handle, type);
3073 dev_err(&hdev->pdev->dev,
3074 "notify roce client failed %d(%d)",
3083 static int hclge_reset_wait(struct hclge_dev *hdev)
3085 #define HCLGE_RESET_WATI_MS 100
3086 #define HCLGE_RESET_WAIT_CNT 200
3087 u32 val, reg, reg_bit;
3090 switch (hdev->reset_type) {
3091 case HNAE3_IMP_RESET:
3092 reg = HCLGE_GLOBAL_RESET_REG;
3093 reg_bit = HCLGE_IMP_RESET_BIT;
3095 case HNAE3_GLOBAL_RESET:
3096 reg = HCLGE_GLOBAL_RESET_REG;
3097 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3099 case HNAE3_FUNC_RESET:
3100 reg = HCLGE_FUN_RST_ING;
3101 reg_bit = HCLGE_FUN_RST_ING_B;
3103 case HNAE3_FLR_RESET:
3106 dev_err(&hdev->pdev->dev,
3107 "Wait for unsupported reset type: %d\n",
3112 if (hdev->reset_type == HNAE3_FLR_RESET) {
3113 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3114 cnt++ < HCLGE_RESET_WAIT_CNT)
3115 msleep(HCLGE_RESET_WATI_MS);
3117 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3118 dev_err(&hdev->pdev->dev,
3119 "flr wait timeout: %d\n", cnt);
3126 val = hclge_read_dev(&hdev->hw, reg);
3127 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3128 msleep(HCLGE_RESET_WATI_MS);
3129 val = hclge_read_dev(&hdev->hw, reg);
3133 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3134 dev_warn(&hdev->pdev->dev,
3135 "Wait for reset timeout: %d\n", hdev->reset_type);
3142 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3144 struct hclge_vf_rst_cmd *req;
3145 struct hclge_desc desc;
3147 req = (struct hclge_vf_rst_cmd *)desc.data;
3148 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3149 req->dest_vfid = func_id;
3154 return hclge_cmd_send(&hdev->hw, &desc, 1);
3157 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3161 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3162 struct hclge_vport *vport = &hdev->vport[i];
3165 /* Send cmd to set/clear VF's FUNC_RST_ING */
3166 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3168 dev_err(&hdev->pdev->dev,
3169 "set vf(%d) rst failed %d!\n",
3170 vport->vport_id, ret);
3174 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3177 /* Inform VF to process the reset.
3178 * hclge_inform_reset_assert_to_vf may fail if VF
3179 * driver is not loaded.
3181 ret = hclge_inform_reset_assert_to_vf(vport);
3183 dev_warn(&hdev->pdev->dev,
3184 "inform reset to vf(%d) failed %d!\n",
3185 vport->vport_id, ret);
3191 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3193 struct hclge_pf_rst_sync_cmd *req;
3194 struct hclge_desc desc;
3198 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3199 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3202 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3203 /* for compatible with old firmware, wait
3204 * 100 ms for VF to stop IO
3206 if (ret == -EOPNOTSUPP) {
3207 msleep(HCLGE_RESET_SYNC_TIME);
3210 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3213 } else if (req->all_vf_ready) {
3216 msleep(HCLGE_PF_RESET_SYNC_TIME);
3217 hclge_cmd_reuse_desc(&desc, true);
3218 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3220 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3224 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3226 struct hclge_desc desc;
3227 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3230 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3231 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3232 req->fun_reset_vfid = func_id;
3234 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3236 dev_err(&hdev->pdev->dev,
3237 "send function reset cmd fail, status =%d\n", ret);
3242 static void hclge_do_reset(struct hclge_dev *hdev)
3244 struct hnae3_handle *handle = &hdev->vport[0].nic;
3245 struct pci_dev *pdev = hdev->pdev;
3248 if (hclge_get_hw_reset_stat(handle)) {
3249 dev_info(&pdev->dev, "Hardware reset not finish\n");
3250 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3251 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3252 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3256 switch (hdev->reset_type) {
3257 case HNAE3_GLOBAL_RESET:
3258 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3259 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3260 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3261 dev_info(&pdev->dev, "Global Reset requested\n");
3263 case HNAE3_FUNC_RESET:
3264 dev_info(&pdev->dev, "PF Reset requested\n");
3265 /* schedule again to check later */
3266 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3267 hclge_reset_task_schedule(hdev);
3269 case HNAE3_FLR_RESET:
3270 dev_info(&pdev->dev, "FLR requested\n");
3271 /* schedule again to check later */
3272 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3273 hclge_reset_task_schedule(hdev);
3276 dev_warn(&pdev->dev,
3277 "Unsupported reset type: %d\n", hdev->reset_type);
3282 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3283 unsigned long *addr)
3285 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3286 struct hclge_dev *hdev = ae_dev->priv;
3288 /* first, resolve any unknown reset type to the known type(s) */
3289 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3290 /* we will intentionally ignore any errors from this function
3291 * as we will end up in *some* reset request in any case
3293 hclge_handle_hw_msix_error(hdev, addr);
3294 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3295 /* We defered the clearing of the error event which caused
3296 * interrupt since it was not posssible to do that in
3297 * interrupt context (and this is the reason we introduced
3298 * new UNKNOWN reset type). Now, the errors have been
3299 * handled and cleared in hardware we can safely enable
3300 * interrupts. This is an exception to the norm.
3302 hclge_enable_vector(&hdev->misc_vector, true);
3305 /* return the highest priority reset level amongst all */
3306 if (test_bit(HNAE3_IMP_RESET, addr)) {
3307 rst_level = HNAE3_IMP_RESET;
3308 clear_bit(HNAE3_IMP_RESET, addr);
3309 clear_bit(HNAE3_GLOBAL_RESET, addr);
3310 clear_bit(HNAE3_FUNC_RESET, addr);
3311 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3312 rst_level = HNAE3_GLOBAL_RESET;
3313 clear_bit(HNAE3_GLOBAL_RESET, addr);
3314 clear_bit(HNAE3_FUNC_RESET, addr);
3315 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3316 rst_level = HNAE3_FUNC_RESET;
3317 clear_bit(HNAE3_FUNC_RESET, addr);
3318 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3319 rst_level = HNAE3_FLR_RESET;
3320 clear_bit(HNAE3_FLR_RESET, addr);
3323 if (hdev->reset_type != HNAE3_NONE_RESET &&
3324 rst_level < hdev->reset_type)
3325 return HNAE3_NONE_RESET;
3330 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3334 switch (hdev->reset_type) {
3335 case HNAE3_IMP_RESET:
3336 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3338 case HNAE3_GLOBAL_RESET:
3339 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3348 /* For revision 0x20, the reset interrupt source
3349 * can only be cleared after hardware reset done
3351 if (hdev->pdev->revision == 0x20)
3352 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3355 hclge_enable_vector(&hdev->misc_vector, true);
3358 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3362 switch (hdev->reset_type) {
3363 case HNAE3_FUNC_RESET:
3365 case HNAE3_FLR_RESET:
3366 ret = hclge_set_all_vf_rst(hdev, true);
3375 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3379 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3381 reg_val |= HCLGE_NIC_SW_RST_RDY;
3383 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3385 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3388 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3393 switch (hdev->reset_type) {
3394 case HNAE3_FUNC_RESET:
3395 /* to confirm whether all running VF is ready
3396 * before request PF reset
3398 ret = hclge_func_reset_sync_vf(hdev);
3402 ret = hclge_func_reset_cmd(hdev, 0);
3404 dev_err(&hdev->pdev->dev,
3405 "asserting function reset fail %d!\n", ret);
3409 /* After performaning pf reset, it is not necessary to do the
3410 * mailbox handling or send any command to firmware, because
3411 * any mailbox handling or command to firmware is only valid
3412 * after hclge_cmd_init is called.
3414 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3415 hdev->rst_stats.pf_rst_cnt++;
3417 case HNAE3_FLR_RESET:
3418 /* to confirm whether all running VF is ready
3419 * before request PF reset
3421 ret = hclge_func_reset_sync_vf(hdev);
3425 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3426 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3427 hdev->rst_stats.flr_rst_cnt++;
3429 case HNAE3_IMP_RESET:
3430 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3431 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3432 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3438 /* inform hardware that preparatory work is done */
3439 msleep(HCLGE_RESET_SYNC_TIME);
3440 hclge_reset_handshake(hdev, true);
3441 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3446 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3448 #define MAX_RESET_FAIL_CNT 5
3450 if (hdev->reset_pending) {
3451 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3452 hdev->reset_pending);
3454 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3455 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3456 BIT(HCLGE_IMP_RESET_BIT))) {
3457 dev_info(&hdev->pdev->dev,
3458 "reset failed because IMP Reset is pending\n");
3459 hclge_clear_reset_cause(hdev);
3461 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3462 hdev->reset_fail_cnt++;
3463 set_bit(hdev->reset_type, &hdev->reset_pending);
3464 dev_info(&hdev->pdev->dev,
3465 "re-schedule reset task(%d)\n",
3466 hdev->reset_fail_cnt);
3470 hclge_clear_reset_cause(hdev);
3472 /* recover the handshake status when reset fail */
3473 hclge_reset_handshake(hdev, true);
3475 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3479 static int hclge_set_rst_done(struct hclge_dev *hdev)
3481 struct hclge_pf_rst_done_cmd *req;
3482 struct hclge_desc desc;
3484 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3485 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3486 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3488 return hclge_cmd_send(&hdev->hw, &desc, 1);
3491 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3495 switch (hdev->reset_type) {
3496 case HNAE3_FUNC_RESET:
3498 case HNAE3_FLR_RESET:
3499 ret = hclge_set_all_vf_rst(hdev, false);
3501 case HNAE3_GLOBAL_RESET:
3503 case HNAE3_IMP_RESET:
3504 ret = hclge_set_rst_done(hdev);
3510 /* clear up the handshake status after re-initialize done */
3511 hclge_reset_handshake(hdev, false);
3516 static int hclge_reset_stack(struct hclge_dev *hdev)
3520 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3524 ret = hclge_reset_ae_dev(hdev->ae_dev);
3528 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3532 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3535 static void hclge_reset(struct hclge_dev *hdev)
3537 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3540 /* Initialize ae_dev reset status as well, in case enet layer wants to
3541 * know if device is undergoing reset
3543 ae_dev->reset_type = hdev->reset_type;
3544 hdev->rst_stats.reset_cnt++;
3545 /* perform reset of the stack & ae device for a client */
3546 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3550 ret = hclge_reset_prepare_down(hdev);
3555 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3557 goto err_reset_lock;
3561 ret = hclge_reset_prepare_wait(hdev);
3565 if (hclge_reset_wait(hdev))
3568 hdev->rst_stats.hw_reset_done_cnt++;
3570 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3576 ret = hclge_reset_stack(hdev);
3578 goto err_reset_lock;
3580 hclge_clear_reset_cause(hdev);
3582 ret = hclge_reset_prepare_up(hdev);
3584 goto err_reset_lock;
3588 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3589 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3592 if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3597 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3599 goto err_reset_lock;
3603 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3607 hdev->last_reset_time = jiffies;
3608 hdev->reset_fail_cnt = 0;
3609 hdev->rst_stats.reset_done_cnt++;
3610 ae_dev->reset_type = HNAE3_NONE_RESET;
3612 /* if default_reset_request has a higher level reset request,
3613 * it should be handled as soon as possible. since some errors
3614 * need this kind of reset to fix.
3616 hdev->reset_level = hclge_get_reset_level(ae_dev,
3617 &hdev->default_reset_request);
3618 if (hdev->reset_level != HNAE3_NONE_RESET)
3619 set_bit(hdev->reset_level, &hdev->reset_request);
3626 if (hclge_reset_err_handle(hdev))
3627 hclge_reset_task_schedule(hdev);
3630 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3632 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3633 struct hclge_dev *hdev = ae_dev->priv;
3635 /* We might end up getting called broadly because of 2 below cases:
3636 * 1. Recoverable error was conveyed through APEI and only way to bring
3637 * normalcy is to reset.
3638 * 2. A new reset request from the stack due to timeout
3640 * For the first case,error event might not have ae handle available.
3641 * check if this is a new reset request and we are not here just because
3642 * last reset attempt did not succeed and watchdog hit us again. We will
3643 * know this if last reset request did not occur very recently (watchdog
3644 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3645 * In case of new request we reset the "reset level" to PF reset.
3646 * And if it is a repeat reset request of the most recent one then we
3647 * want to make sure we throttle the reset request. Therefore, we will
3648 * not allow it again before 3*HZ times.
3651 handle = &hdev->vport[0].nic;
3653 if (time_before(jiffies, (hdev->last_reset_time +
3654 HCLGE_RESET_INTERVAL))) {
3655 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3657 } else if (hdev->default_reset_request)
3659 hclge_get_reset_level(ae_dev,
3660 &hdev->default_reset_request);
3661 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3662 hdev->reset_level = HNAE3_FUNC_RESET;
3664 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3667 /* request reset & schedule reset task */
3668 set_bit(hdev->reset_level, &hdev->reset_request);
3669 hclge_reset_task_schedule(hdev);
3671 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3672 hdev->reset_level++;
3675 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3676 enum hnae3_reset_type rst_type)
3678 struct hclge_dev *hdev = ae_dev->priv;
3680 set_bit(rst_type, &hdev->default_reset_request);
3683 static void hclge_reset_timer(struct timer_list *t)
3685 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3687 /* if default_reset_request has no value, it means that this reset
3688 * request has already be handled, so just return here
3690 if (!hdev->default_reset_request)
3693 dev_info(&hdev->pdev->dev,
3694 "triggering reset in reset timer\n");
3695 hclge_reset_event(hdev->pdev, NULL);
3698 static void hclge_reset_subtask(struct hclge_dev *hdev)
3700 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3702 /* check if there is any ongoing reset in the hardware. This status can
3703 * be checked from reset_pending. If there is then, we need to wait for
3704 * hardware to complete reset.
3705 * a. If we are able to figure out in reasonable time that hardware
3706 * has fully resetted then, we can proceed with driver, client
3708 * b. else, we can come back later to check this status so re-sched
3711 hdev->last_reset_time = jiffies;
3712 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3713 if (hdev->reset_type != HNAE3_NONE_RESET)
3716 /* check if we got any *new* reset requests to be honored */
3717 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3718 if (hdev->reset_type != HNAE3_NONE_RESET)
3719 hclge_do_reset(hdev);
3721 hdev->reset_type = HNAE3_NONE_RESET;
3724 static void hclge_reset_service_task(struct work_struct *work)
3726 struct hclge_dev *hdev =
3727 container_of(work, struct hclge_dev, rst_service_task);
3729 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3732 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3734 hclge_reset_subtask(hdev);
3736 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3739 static void hclge_mailbox_service_task(struct work_struct *work)
3741 struct hclge_dev *hdev =
3742 container_of(work, struct hclge_dev, mbx_service_task);
3744 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3747 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3749 hclge_mbx_handler(hdev);
3751 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3754 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3758 /* start from vport 1 for PF is always alive */
3759 for (i = 1; i < hdev->num_alloc_vport; i++) {
3760 struct hclge_vport *vport = &hdev->vport[i];
3762 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3763 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3765 /* If vf is not alive, set to default value */
3766 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3767 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3771 static void hclge_service_task(struct work_struct *work)
3773 struct hclge_dev *hdev =
3774 container_of(work, struct hclge_dev, service_task.work);
3776 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3778 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3779 hclge_update_stats_for_all(hdev);
3780 hdev->hw_stats.stats_timer = 0;
3783 hclge_update_port_info(hdev);
3784 hclge_update_link_status(hdev);
3785 hclge_update_vport_alive(hdev);
3786 hclge_sync_vlan_filter(hdev);
3787 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3788 hclge_rfs_filter_expire(hdev);
3789 hdev->fd_arfs_expire_timer = 0;
3792 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3795 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3797 /* VF handle has no client */
3798 if (!handle->client)
3799 return container_of(handle, struct hclge_vport, nic);
3800 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3801 return container_of(handle, struct hclge_vport, roce);
3803 return container_of(handle, struct hclge_vport, nic);
3806 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3807 struct hnae3_vector_info *vector_info)
3809 struct hclge_vport *vport = hclge_get_vport(handle);
3810 struct hnae3_vector_info *vector = vector_info;
3811 struct hclge_dev *hdev = vport->back;
3815 vector_num = min(hdev->num_msi_left, vector_num);
3817 for (j = 0; j < vector_num; j++) {
3818 for (i = 1; i < hdev->num_msi; i++) {
3819 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3820 vector->vector = pci_irq_vector(hdev->pdev, i);
3821 vector->io_addr = hdev->hw.io_base +
3822 HCLGE_VECTOR_REG_BASE +
3823 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3825 HCLGE_VECTOR_VF_OFFSET;
3826 hdev->vector_status[i] = vport->vport_id;
3827 hdev->vector_irq[i] = vector->vector;
3836 hdev->num_msi_left -= alloc;
3837 hdev->num_msi_used += alloc;
3842 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3846 for (i = 0; i < hdev->num_msi; i++)
3847 if (vector == hdev->vector_irq[i])
3853 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3855 struct hclge_vport *vport = hclge_get_vport(handle);
3856 struct hclge_dev *hdev = vport->back;
3859 vector_id = hclge_get_vector_index(hdev, vector);
3860 if (vector_id < 0) {
3861 dev_err(&hdev->pdev->dev,
3862 "Get vector index fail. vector_id =%d\n", vector_id);
3866 hclge_free_vector(hdev, vector_id);
3871 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3873 return HCLGE_RSS_KEY_SIZE;
3876 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3878 return HCLGE_RSS_IND_TBL_SIZE;
3881 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3882 const u8 hfunc, const u8 *key)
3884 struct hclge_rss_config_cmd *req;
3885 unsigned int key_offset = 0;
3886 struct hclge_desc desc;
3891 key_counts = HCLGE_RSS_KEY_SIZE;
3892 req = (struct hclge_rss_config_cmd *)desc.data;
3894 while (key_counts) {
3895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3898 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3899 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3901 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3902 memcpy(req->hash_key,
3903 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3905 key_counts -= key_size;
3907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3909 dev_err(&hdev->pdev->dev,
3910 "Configure RSS config fail, status = %d\n",
3918 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3920 struct hclge_rss_indirection_table_cmd *req;
3921 struct hclge_desc desc;
3925 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3927 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3928 hclge_cmd_setup_basic_desc
3929 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3931 req->start_table_index =
3932 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3933 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3935 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3936 req->rss_result[j] =
3937 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3939 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3941 dev_err(&hdev->pdev->dev,
3942 "Configure rss indir table fail,status = %d\n",
3950 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3951 u16 *tc_size, u16 *tc_offset)
3953 struct hclge_rss_tc_mode_cmd *req;
3954 struct hclge_desc desc;
3958 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3959 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3961 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3964 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3965 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3966 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3967 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3968 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3970 req->rss_tc_mode[i] = cpu_to_le16(mode);
3973 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3975 dev_err(&hdev->pdev->dev,
3976 "Configure rss tc mode fail, status = %d\n", ret);
3981 static void hclge_get_rss_type(struct hclge_vport *vport)
3983 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3984 vport->rss_tuple_sets.ipv4_udp_en ||
3985 vport->rss_tuple_sets.ipv4_sctp_en ||
3986 vport->rss_tuple_sets.ipv6_tcp_en ||
3987 vport->rss_tuple_sets.ipv6_udp_en ||
3988 vport->rss_tuple_sets.ipv6_sctp_en)
3989 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3990 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3991 vport->rss_tuple_sets.ipv6_fragment_en)
3992 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3994 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3997 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3999 struct hclge_rss_input_tuple_cmd *req;
4000 struct hclge_desc desc;
4003 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4005 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4007 /* Get the tuple cfg from pf */
4008 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4009 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4010 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4011 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4012 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4013 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4014 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4015 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4016 hclge_get_rss_type(&hdev->vport[0]);
4017 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4019 dev_err(&hdev->pdev->dev,
4020 "Configure rss input fail, status = %d\n", ret);
4024 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4027 struct hclge_vport *vport = hclge_get_vport(handle);
4030 /* Get hash algorithm */
4032 switch (vport->rss_algo) {
4033 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4034 *hfunc = ETH_RSS_HASH_TOP;
4036 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4037 *hfunc = ETH_RSS_HASH_XOR;
4040 *hfunc = ETH_RSS_HASH_UNKNOWN;
4045 /* Get the RSS Key required by the user */
4047 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4049 /* Get indirect table */
4051 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4052 indir[i] = vport->rss_indirection_tbl[i];
4057 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4058 const u8 *key, const u8 hfunc)
4060 struct hclge_vport *vport = hclge_get_vport(handle);
4061 struct hclge_dev *hdev = vport->back;
4065 /* Set the RSS Hash Key if specififed by the user */
4068 case ETH_RSS_HASH_TOP:
4069 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4071 case ETH_RSS_HASH_XOR:
4072 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4074 case ETH_RSS_HASH_NO_CHANGE:
4075 hash_algo = vport->rss_algo;
4081 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4085 /* Update the shadow RSS key with user specified qids */
4086 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4087 vport->rss_algo = hash_algo;
4090 /* Update the shadow RSS table with user specified qids */
4091 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4092 vport->rss_indirection_tbl[i] = indir[i];
4094 /* Update the hardware */
4095 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4098 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4100 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4102 if (nfc->data & RXH_L4_B_2_3)
4103 hash_sets |= HCLGE_D_PORT_BIT;
4105 hash_sets &= ~HCLGE_D_PORT_BIT;
4107 if (nfc->data & RXH_IP_SRC)
4108 hash_sets |= HCLGE_S_IP_BIT;
4110 hash_sets &= ~HCLGE_S_IP_BIT;
4112 if (nfc->data & RXH_IP_DST)
4113 hash_sets |= HCLGE_D_IP_BIT;
4115 hash_sets &= ~HCLGE_D_IP_BIT;
4117 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4118 hash_sets |= HCLGE_V_TAG_BIT;
4123 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4124 struct ethtool_rxnfc *nfc)
4126 struct hclge_vport *vport = hclge_get_vport(handle);
4127 struct hclge_dev *hdev = vport->back;
4128 struct hclge_rss_input_tuple_cmd *req;
4129 struct hclge_desc desc;
4133 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4134 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4137 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4138 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4140 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4141 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4142 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4143 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4144 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4145 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4146 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4147 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4149 tuple_sets = hclge_get_rss_hash_bits(nfc);
4150 switch (nfc->flow_type) {
4152 req->ipv4_tcp_en = tuple_sets;
4155 req->ipv6_tcp_en = tuple_sets;
4158 req->ipv4_udp_en = tuple_sets;
4161 req->ipv6_udp_en = tuple_sets;
4164 req->ipv4_sctp_en = tuple_sets;
4167 if ((nfc->data & RXH_L4_B_0_1) ||
4168 (nfc->data & RXH_L4_B_2_3))
4171 req->ipv6_sctp_en = tuple_sets;
4174 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4177 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4183 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4185 dev_err(&hdev->pdev->dev,
4186 "Set rss tuple fail, status = %d\n", ret);
4190 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4191 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4192 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4193 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4194 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4195 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4196 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4197 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4198 hclge_get_rss_type(vport);
4202 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4203 struct ethtool_rxnfc *nfc)
4205 struct hclge_vport *vport = hclge_get_vport(handle);
4210 switch (nfc->flow_type) {
4212 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4215 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4218 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4221 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4224 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4227 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4231 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4240 if (tuple_sets & HCLGE_D_PORT_BIT)
4241 nfc->data |= RXH_L4_B_2_3;
4242 if (tuple_sets & HCLGE_S_PORT_BIT)
4243 nfc->data |= RXH_L4_B_0_1;
4244 if (tuple_sets & HCLGE_D_IP_BIT)
4245 nfc->data |= RXH_IP_DST;
4246 if (tuple_sets & HCLGE_S_IP_BIT)
4247 nfc->data |= RXH_IP_SRC;
4252 static int hclge_get_tc_size(struct hnae3_handle *handle)
4254 struct hclge_vport *vport = hclge_get_vport(handle);
4255 struct hclge_dev *hdev = vport->back;
4257 return hdev->rss_size_max;
4260 int hclge_rss_init_hw(struct hclge_dev *hdev)
4262 struct hclge_vport *vport = hdev->vport;
4263 u8 *rss_indir = vport[0].rss_indirection_tbl;
4264 u16 rss_size = vport[0].alloc_rss_size;
4265 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4266 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4267 u8 *key = vport[0].rss_hash_key;
4268 u8 hfunc = vport[0].rss_algo;
4269 u16 tc_valid[HCLGE_MAX_TC_NUM];
4274 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4278 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4282 ret = hclge_set_rss_input_tuple(hdev);
4286 /* Each TC have the same queue size, and tc_size set to hardware is
4287 * the log2 of roundup power of two of rss_size, the acutal queue
4288 * size is limited by indirection table.
4290 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4291 dev_err(&hdev->pdev->dev,
4292 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4297 roundup_size = roundup_pow_of_two(rss_size);
4298 roundup_size = ilog2(roundup_size);
4300 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4303 if (!(hdev->hw_tc_map & BIT(i)))
4307 tc_size[i] = roundup_size;
4308 tc_offset[i] = rss_size * i;
4311 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4314 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4316 struct hclge_vport *vport = hdev->vport;
4319 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4320 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4321 vport[j].rss_indirection_tbl[i] =
4322 i % vport[j].alloc_rss_size;
4326 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4328 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4329 struct hclge_vport *vport = hdev->vport;
4331 if (hdev->pdev->revision >= 0x21)
4332 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4334 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4335 vport[i].rss_tuple_sets.ipv4_tcp_en =
4336 HCLGE_RSS_INPUT_TUPLE_OTHER;
4337 vport[i].rss_tuple_sets.ipv4_udp_en =
4338 HCLGE_RSS_INPUT_TUPLE_OTHER;
4339 vport[i].rss_tuple_sets.ipv4_sctp_en =
4340 HCLGE_RSS_INPUT_TUPLE_SCTP;
4341 vport[i].rss_tuple_sets.ipv4_fragment_en =
4342 HCLGE_RSS_INPUT_TUPLE_OTHER;
4343 vport[i].rss_tuple_sets.ipv6_tcp_en =
4344 HCLGE_RSS_INPUT_TUPLE_OTHER;
4345 vport[i].rss_tuple_sets.ipv6_udp_en =
4346 HCLGE_RSS_INPUT_TUPLE_OTHER;
4347 vport[i].rss_tuple_sets.ipv6_sctp_en =
4348 HCLGE_RSS_INPUT_TUPLE_SCTP;
4349 vport[i].rss_tuple_sets.ipv6_fragment_en =
4350 HCLGE_RSS_INPUT_TUPLE_OTHER;
4352 vport[i].rss_algo = rss_algo;
4354 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4355 HCLGE_RSS_KEY_SIZE);
4358 hclge_rss_indir_init_cfg(hdev);
4361 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4362 int vector_id, bool en,
4363 struct hnae3_ring_chain_node *ring_chain)
4365 struct hclge_dev *hdev = vport->back;
4366 struct hnae3_ring_chain_node *node;
4367 struct hclge_desc desc;
4368 struct hclge_ctrl_vector_chain_cmd *req =
4369 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4370 enum hclge_cmd_status status;
4371 enum hclge_opcode_type op;
4372 u16 tqp_type_and_id;
4375 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4376 hclge_cmd_setup_basic_desc(&desc, op, false);
4377 req->int_vector_id = vector_id;
4380 for (node = ring_chain; node; node = node->next) {
4381 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4382 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4384 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4385 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4386 HCLGE_TQP_ID_S, node->tqp_index);
4387 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4389 hnae3_get_field(node->int_gl_idx,
4390 HNAE3_RING_GL_IDX_M,
4391 HNAE3_RING_GL_IDX_S));
4392 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4393 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4394 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4395 req->vfid = vport->vport_id;
4397 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4399 dev_err(&hdev->pdev->dev,
4400 "Map TQP fail, status is %d.\n",
4406 hclge_cmd_setup_basic_desc(&desc,
4409 req->int_vector_id = vector_id;
4414 req->int_cause_num = i;
4415 req->vfid = vport->vport_id;
4416 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4418 dev_err(&hdev->pdev->dev,
4419 "Map TQP fail, status is %d.\n", status);
4427 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4428 struct hnae3_ring_chain_node *ring_chain)
4430 struct hclge_vport *vport = hclge_get_vport(handle);
4431 struct hclge_dev *hdev = vport->back;
4434 vector_id = hclge_get_vector_index(hdev, vector);
4435 if (vector_id < 0) {
4436 dev_err(&hdev->pdev->dev,
4437 "Get vector index fail. vector_id =%d\n", vector_id);
4441 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4444 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4445 struct hnae3_ring_chain_node *ring_chain)
4447 struct hclge_vport *vport = hclge_get_vport(handle);
4448 struct hclge_dev *hdev = vport->back;
4451 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4454 vector_id = hclge_get_vector_index(hdev, vector);
4455 if (vector_id < 0) {
4456 dev_err(&handle->pdev->dev,
4457 "Get vector index fail. ret =%d\n", vector_id);
4461 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4463 dev_err(&handle->pdev->dev,
4464 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4470 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4471 struct hclge_promisc_param *param)
4473 struct hclge_promisc_cfg_cmd *req;
4474 struct hclge_desc desc;
4477 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4479 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4480 req->vf_id = param->vf_id;
4482 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4483 * pdev revision(0x20), new revision support them. The
4484 * value of this two fields will not return error when driver
4485 * send command to fireware in revision(0x20).
4487 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4488 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4490 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4492 dev_err(&hdev->pdev->dev,
4493 "Set promisc mode fail, status is %d.\n", ret);
4498 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4499 bool en_mc, bool en_bc, int vport_id)
4504 memset(param, 0, sizeof(struct hclge_promisc_param));
4506 param->enable = HCLGE_PROMISC_EN_UC;
4508 param->enable |= HCLGE_PROMISC_EN_MC;
4510 param->enable |= HCLGE_PROMISC_EN_BC;
4511 param->vf_id = vport_id;
4514 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4517 struct hclge_vport *vport = hclge_get_vport(handle);
4518 struct hclge_dev *hdev = vport->back;
4519 struct hclge_promisc_param param;
4520 bool en_bc_pmc = true;
4522 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4523 * always bypassed. So broadcast promisc should be disabled until
4524 * user enable promisc mode
4526 if (handle->pdev->revision == 0x20)
4527 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4529 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4531 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4534 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4536 struct hclge_get_fd_mode_cmd *req;
4537 struct hclge_desc desc;
4540 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4542 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4544 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4546 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4550 *fd_mode = req->mode;
4555 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4556 u32 *stage1_entry_num,
4557 u32 *stage2_entry_num,
4558 u16 *stage1_counter_num,
4559 u16 *stage2_counter_num)
4561 struct hclge_get_fd_allocation_cmd *req;
4562 struct hclge_desc desc;
4565 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4567 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4569 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4571 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4576 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4577 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4578 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4579 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4584 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4586 struct hclge_set_fd_key_config_cmd *req;
4587 struct hclge_fd_key_cfg *stage;
4588 struct hclge_desc desc;
4591 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4593 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4594 stage = &hdev->fd_cfg.key_cfg[stage_num];
4595 req->stage = stage_num;
4596 req->key_select = stage->key_sel;
4597 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4598 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4599 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4600 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4601 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4602 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4604 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4606 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4611 static int hclge_init_fd_config(struct hclge_dev *hdev)
4613 #define LOW_2_WORDS 0x03
4614 struct hclge_fd_key_cfg *key_cfg;
4617 if (!hnae3_dev_fd_supported(hdev))
4620 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4624 switch (hdev->fd_cfg.fd_mode) {
4625 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4626 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4628 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4629 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4632 dev_err(&hdev->pdev->dev,
4633 "Unsupported flow director mode %d\n",
4634 hdev->fd_cfg.fd_mode);
4638 hdev->fd_cfg.proto_support =
4639 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4640 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4641 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4642 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4643 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4644 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4645 key_cfg->outer_sipv6_word_en = 0;
4646 key_cfg->outer_dipv6_word_en = 0;
4648 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4649 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4650 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4651 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4653 /* If use max 400bit key, we can support tuples for ether type */
4654 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4655 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4656 key_cfg->tuple_active |=
4657 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4660 /* roce_type is used to filter roce frames
4661 * dst_vport is used to specify the rule
4663 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4665 ret = hclge_get_fd_allocation(hdev,
4666 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4667 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4668 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4669 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4673 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4676 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4677 int loc, u8 *key, bool is_add)
4679 struct hclge_fd_tcam_config_1_cmd *req1;
4680 struct hclge_fd_tcam_config_2_cmd *req2;
4681 struct hclge_fd_tcam_config_3_cmd *req3;
4682 struct hclge_desc desc[3];
4685 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4686 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4687 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4688 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4689 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4691 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4692 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4693 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4695 req1->stage = stage;
4696 req1->xy_sel = sel_x ? 1 : 0;
4697 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4698 req1->index = cpu_to_le32(loc);
4699 req1->entry_vld = sel_x ? is_add : 0;
4702 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4703 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4704 sizeof(req2->tcam_data));
4705 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4706 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4709 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4711 dev_err(&hdev->pdev->dev,
4712 "config tcam key fail, ret=%d\n",
4718 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4719 struct hclge_fd_ad_data *action)
4721 struct hclge_fd_ad_config_cmd *req;
4722 struct hclge_desc desc;
4726 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4728 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4729 req->index = cpu_to_le32(loc);
4732 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4733 action->write_rule_id_to_bd);
4734 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4737 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4738 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4739 action->forward_to_direct_queue);
4740 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4742 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4743 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4744 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4745 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4746 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4747 action->counter_id);
4749 req->ad_data = cpu_to_le64(ad_data);
4750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4752 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4757 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4758 struct hclge_fd_rule *rule)
4760 u16 tmp_x_s, tmp_y_s;
4761 u32 tmp_x_l, tmp_y_l;
4764 if (rule->unused_tuple & tuple_bit)
4767 switch (tuple_bit) {
4770 case BIT(INNER_DST_MAC):
4771 for (i = 0; i < ETH_ALEN; i++) {
4772 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4773 rule->tuples_mask.dst_mac[i]);
4774 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4775 rule->tuples_mask.dst_mac[i]);
4779 case BIT(INNER_SRC_MAC):
4780 for (i = 0; i < ETH_ALEN; i++) {
4781 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4782 rule->tuples.src_mac[i]);
4783 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4784 rule->tuples.src_mac[i]);
4788 case BIT(INNER_VLAN_TAG_FST):
4789 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4790 rule->tuples_mask.vlan_tag1);
4791 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4792 rule->tuples_mask.vlan_tag1);
4793 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4794 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4797 case BIT(INNER_ETH_TYPE):
4798 calc_x(tmp_x_s, rule->tuples.ether_proto,
4799 rule->tuples_mask.ether_proto);
4800 calc_y(tmp_y_s, rule->tuples.ether_proto,
4801 rule->tuples_mask.ether_proto);
4802 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4803 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4806 case BIT(INNER_IP_TOS):
4807 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4808 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4811 case BIT(INNER_IP_PROTO):
4812 calc_x(*key_x, rule->tuples.ip_proto,
4813 rule->tuples_mask.ip_proto);
4814 calc_y(*key_y, rule->tuples.ip_proto,
4815 rule->tuples_mask.ip_proto);
4818 case BIT(INNER_SRC_IP):
4819 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4820 rule->tuples_mask.src_ip[IPV4_INDEX]);
4821 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4822 rule->tuples_mask.src_ip[IPV4_INDEX]);
4823 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4824 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4827 case BIT(INNER_DST_IP):
4828 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4829 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4830 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4831 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4832 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4833 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4836 case BIT(INNER_SRC_PORT):
4837 calc_x(tmp_x_s, rule->tuples.src_port,
4838 rule->tuples_mask.src_port);
4839 calc_y(tmp_y_s, rule->tuples.src_port,
4840 rule->tuples_mask.src_port);
4841 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4842 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4845 case BIT(INNER_DST_PORT):
4846 calc_x(tmp_x_s, rule->tuples.dst_port,
4847 rule->tuples_mask.dst_port);
4848 calc_y(tmp_y_s, rule->tuples.dst_port,
4849 rule->tuples_mask.dst_port);
4850 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4851 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4859 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4860 u8 vf_id, u8 network_port_id)
4862 u32 port_number = 0;
4864 if (port_type == HOST_PORT) {
4865 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4867 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4869 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4871 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4872 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4873 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4879 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4880 __le32 *key_x, __le32 *key_y,
4881 struct hclge_fd_rule *rule)
4883 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4884 u8 cur_pos = 0, tuple_size, shift_bits;
4887 for (i = 0; i < MAX_META_DATA; i++) {
4888 tuple_size = meta_data_key_info[i].key_length;
4889 tuple_bit = key_cfg->meta_data_active & BIT(i);
4891 switch (tuple_bit) {
4892 case BIT(ROCE_TYPE):
4893 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4894 cur_pos += tuple_size;
4896 case BIT(DST_VPORT):
4897 port_number = hclge_get_port_number(HOST_PORT, 0,
4899 hnae3_set_field(meta_data,
4900 GENMASK(cur_pos + tuple_size, cur_pos),
4901 cur_pos, port_number);
4902 cur_pos += tuple_size;
4909 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4910 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4911 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4913 *key_x = cpu_to_le32(tmp_x << shift_bits);
4914 *key_y = cpu_to_le32(tmp_y << shift_bits);
4917 /* A complete key is combined with meta data key and tuple key.
4918 * Meta data key is stored at the MSB region, and tuple key is stored at
4919 * the LSB region, unused bits will be filled 0.
4921 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4922 struct hclge_fd_rule *rule)
4924 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4925 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4926 u8 *cur_key_x, *cur_key_y;
4928 int ret, tuple_size;
4929 u8 meta_data_region;
4931 memset(key_x, 0, sizeof(key_x));
4932 memset(key_y, 0, sizeof(key_y));
4936 for (i = 0 ; i < MAX_TUPLE; i++) {
4940 tuple_size = tuple_key_info[i].key_length / 8;
4941 check_tuple = key_cfg->tuple_active & BIT(i);
4943 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4946 cur_key_x += tuple_size;
4947 cur_key_y += tuple_size;
4951 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4952 MAX_META_DATA_LENGTH / 8;
4954 hclge_fd_convert_meta_data(key_cfg,
4955 (__le32 *)(key_x + meta_data_region),
4956 (__le32 *)(key_y + meta_data_region),
4959 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4962 dev_err(&hdev->pdev->dev,
4963 "fd key_y config fail, loc=%d, ret=%d\n",
4964 rule->queue_id, ret);
4968 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4971 dev_err(&hdev->pdev->dev,
4972 "fd key_x config fail, loc=%d, ret=%d\n",
4973 rule->queue_id, ret);
4977 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4978 struct hclge_fd_rule *rule)
4980 struct hclge_fd_ad_data ad_data;
4982 ad_data.ad_id = rule->location;
4984 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4985 ad_data.drop_packet = true;
4986 ad_data.forward_to_direct_queue = false;
4987 ad_data.queue_id = 0;
4989 ad_data.drop_packet = false;
4990 ad_data.forward_to_direct_queue = true;
4991 ad_data.queue_id = rule->queue_id;
4994 ad_data.use_counter = false;
4995 ad_data.counter_id = 0;
4997 ad_data.use_next_stage = false;
4998 ad_data.next_input_key = 0;
5000 ad_data.write_rule_id_to_bd = true;
5001 ad_data.rule_id = rule->location;
5003 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5006 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5007 struct ethtool_rx_flow_spec *fs, u32 *unused)
5009 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5010 struct ethtool_usrip4_spec *usr_ip4_spec;
5011 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5012 struct ethtool_usrip6_spec *usr_ip6_spec;
5013 struct ethhdr *ether_spec;
5015 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5018 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5021 if ((fs->flow_type & FLOW_EXT) &&
5022 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5023 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5027 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5031 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5032 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5034 if (!tcp_ip4_spec->ip4src)
5035 *unused |= BIT(INNER_SRC_IP);
5037 if (!tcp_ip4_spec->ip4dst)
5038 *unused |= BIT(INNER_DST_IP);
5040 if (!tcp_ip4_spec->psrc)
5041 *unused |= BIT(INNER_SRC_PORT);
5043 if (!tcp_ip4_spec->pdst)
5044 *unused |= BIT(INNER_DST_PORT);
5046 if (!tcp_ip4_spec->tos)
5047 *unused |= BIT(INNER_IP_TOS);
5051 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5052 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5053 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5055 if (!usr_ip4_spec->ip4src)
5056 *unused |= BIT(INNER_SRC_IP);
5058 if (!usr_ip4_spec->ip4dst)
5059 *unused |= BIT(INNER_DST_IP);
5061 if (!usr_ip4_spec->tos)
5062 *unused |= BIT(INNER_IP_TOS);
5064 if (!usr_ip4_spec->proto)
5065 *unused |= BIT(INNER_IP_PROTO);
5067 if (usr_ip4_spec->l4_4_bytes)
5070 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5077 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5078 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5081 /* check whether src/dst ip address used */
5082 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5083 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5084 *unused |= BIT(INNER_SRC_IP);
5086 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5087 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5088 *unused |= BIT(INNER_DST_IP);
5090 if (!tcp_ip6_spec->psrc)
5091 *unused |= BIT(INNER_SRC_PORT);
5093 if (!tcp_ip6_spec->pdst)
5094 *unused |= BIT(INNER_DST_PORT);
5096 if (tcp_ip6_spec->tclass)
5100 case IPV6_USER_FLOW:
5101 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5102 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5103 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5104 BIT(INNER_DST_PORT);
5106 /* check whether src/dst ip address used */
5107 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5108 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5109 *unused |= BIT(INNER_SRC_IP);
5111 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5112 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5113 *unused |= BIT(INNER_DST_IP);
5115 if (!usr_ip6_spec->l4_proto)
5116 *unused |= BIT(INNER_IP_PROTO);
5118 if (usr_ip6_spec->tclass)
5121 if (usr_ip6_spec->l4_4_bytes)
5126 ether_spec = &fs->h_u.ether_spec;
5127 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5128 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5129 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5131 if (is_zero_ether_addr(ether_spec->h_source))
5132 *unused |= BIT(INNER_SRC_MAC);
5134 if (is_zero_ether_addr(ether_spec->h_dest))
5135 *unused |= BIT(INNER_DST_MAC);
5137 if (!ether_spec->h_proto)
5138 *unused |= BIT(INNER_ETH_TYPE);
5145 if ((fs->flow_type & FLOW_EXT)) {
5146 if (fs->h_ext.vlan_etype)
5148 if (!fs->h_ext.vlan_tci)
5149 *unused |= BIT(INNER_VLAN_TAG_FST);
5151 if (fs->m_ext.vlan_tci) {
5152 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5156 *unused |= BIT(INNER_VLAN_TAG_FST);
5159 if (fs->flow_type & FLOW_MAC_EXT) {
5160 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5163 if (is_zero_ether_addr(fs->h_ext.h_dest))
5164 *unused |= BIT(INNER_DST_MAC);
5166 *unused &= ~(BIT(INNER_DST_MAC));
5172 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5174 struct hclge_fd_rule *rule = NULL;
5175 struct hlist_node *node2;
5177 spin_lock_bh(&hdev->fd_rule_lock);
5178 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5179 if (rule->location >= location)
5183 spin_unlock_bh(&hdev->fd_rule_lock);
5185 return rule && rule->location == location;
5188 /* make sure being called after lock up with fd_rule_lock */
5189 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5190 struct hclge_fd_rule *new_rule,
5194 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5195 struct hlist_node *node2;
5197 if (is_add && !new_rule)
5200 hlist_for_each_entry_safe(rule, node2,
5201 &hdev->fd_rule_list, rule_node) {
5202 if (rule->location >= location)
5207 if (rule && rule->location == location) {
5208 hlist_del(&rule->rule_node);
5210 hdev->hclge_fd_rule_num--;
5213 if (!hdev->hclge_fd_rule_num)
5214 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5215 clear_bit(location, hdev->fd_bmap);
5219 } else if (!is_add) {
5220 dev_err(&hdev->pdev->dev,
5221 "delete fail, rule %d is inexistent\n",
5226 INIT_HLIST_NODE(&new_rule->rule_node);
5229 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5231 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5233 set_bit(location, hdev->fd_bmap);
5234 hdev->hclge_fd_rule_num++;
5235 hdev->fd_active_type = new_rule->rule_type;
5240 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5241 struct ethtool_rx_flow_spec *fs,
5242 struct hclge_fd_rule *rule)
5244 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5246 switch (flow_type) {
5250 rule->tuples.src_ip[IPV4_INDEX] =
5251 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5252 rule->tuples_mask.src_ip[IPV4_INDEX] =
5253 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5255 rule->tuples.dst_ip[IPV4_INDEX] =
5256 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5257 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5258 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5260 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5261 rule->tuples_mask.src_port =
5262 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5264 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5265 rule->tuples_mask.dst_port =
5266 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5268 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5269 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5271 rule->tuples.ether_proto = ETH_P_IP;
5272 rule->tuples_mask.ether_proto = 0xFFFF;
5276 rule->tuples.src_ip[IPV4_INDEX] =
5277 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5278 rule->tuples_mask.src_ip[IPV4_INDEX] =
5279 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5281 rule->tuples.dst_ip[IPV4_INDEX] =
5282 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5283 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5284 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5286 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5287 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5289 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5290 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5292 rule->tuples.ether_proto = ETH_P_IP;
5293 rule->tuples_mask.ether_proto = 0xFFFF;
5299 be32_to_cpu_array(rule->tuples.src_ip,
5300 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5301 be32_to_cpu_array(rule->tuples_mask.src_ip,
5302 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5304 be32_to_cpu_array(rule->tuples.dst_ip,
5305 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5306 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5307 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5309 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5310 rule->tuples_mask.src_port =
5311 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5313 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5314 rule->tuples_mask.dst_port =
5315 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5317 rule->tuples.ether_proto = ETH_P_IPV6;
5318 rule->tuples_mask.ether_proto = 0xFFFF;
5321 case IPV6_USER_FLOW:
5322 be32_to_cpu_array(rule->tuples.src_ip,
5323 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5324 be32_to_cpu_array(rule->tuples_mask.src_ip,
5325 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5327 be32_to_cpu_array(rule->tuples.dst_ip,
5328 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5329 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5330 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5332 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5333 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5335 rule->tuples.ether_proto = ETH_P_IPV6;
5336 rule->tuples_mask.ether_proto = 0xFFFF;
5340 ether_addr_copy(rule->tuples.src_mac,
5341 fs->h_u.ether_spec.h_source);
5342 ether_addr_copy(rule->tuples_mask.src_mac,
5343 fs->m_u.ether_spec.h_source);
5345 ether_addr_copy(rule->tuples.dst_mac,
5346 fs->h_u.ether_spec.h_dest);
5347 ether_addr_copy(rule->tuples_mask.dst_mac,
5348 fs->m_u.ether_spec.h_dest);
5350 rule->tuples.ether_proto =
5351 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5352 rule->tuples_mask.ether_proto =
5353 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5360 switch (flow_type) {
5363 rule->tuples.ip_proto = IPPROTO_SCTP;
5364 rule->tuples_mask.ip_proto = 0xFF;
5368 rule->tuples.ip_proto = IPPROTO_TCP;
5369 rule->tuples_mask.ip_proto = 0xFF;
5373 rule->tuples.ip_proto = IPPROTO_UDP;
5374 rule->tuples_mask.ip_proto = 0xFF;
5380 if ((fs->flow_type & FLOW_EXT)) {
5381 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5382 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5385 if (fs->flow_type & FLOW_MAC_EXT) {
5386 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5387 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5393 /* make sure being called after lock up with fd_rule_lock */
5394 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5395 struct hclge_fd_rule *rule)
5400 dev_err(&hdev->pdev->dev,
5401 "The flow director rule is NULL\n");
5405 /* it will never fail here, so needn't to check return value */
5406 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5408 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5412 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5419 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5423 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5424 struct ethtool_rxnfc *cmd)
5426 struct hclge_vport *vport = hclge_get_vport(handle);
5427 struct hclge_dev *hdev = vport->back;
5428 u16 dst_vport_id = 0, q_index = 0;
5429 struct ethtool_rx_flow_spec *fs;
5430 struct hclge_fd_rule *rule;
5435 if (!hnae3_dev_fd_supported(hdev))
5439 dev_warn(&hdev->pdev->dev,
5440 "Please enable flow director first\n");
5444 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5446 ret = hclge_fd_check_spec(hdev, fs, &unused);
5448 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5452 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5453 action = HCLGE_FD_ACTION_DROP_PACKET;
5455 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5456 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5459 if (vf > hdev->num_req_vfs) {
5460 dev_err(&hdev->pdev->dev,
5461 "Error: vf id (%d) > max vf num (%d)\n",
5462 vf, hdev->num_req_vfs);
5466 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5467 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5470 dev_err(&hdev->pdev->dev,
5471 "Error: queue id (%d) > max tqp num (%d)\n",
5476 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5480 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5484 ret = hclge_fd_get_tuple(hdev, fs, rule);
5490 rule->flow_type = fs->flow_type;
5492 rule->location = fs->location;
5493 rule->unused_tuple = unused;
5494 rule->vf_id = dst_vport_id;
5495 rule->queue_id = q_index;
5496 rule->action = action;
5497 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5499 /* to avoid rule conflict, when user configure rule by ethtool,
5500 * we need to clear all arfs rules
5502 hclge_clear_arfs_rules(handle);
5504 spin_lock_bh(&hdev->fd_rule_lock);
5505 ret = hclge_fd_config_rule(hdev, rule);
5507 spin_unlock_bh(&hdev->fd_rule_lock);
5512 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5513 struct ethtool_rxnfc *cmd)
5515 struct hclge_vport *vport = hclge_get_vport(handle);
5516 struct hclge_dev *hdev = vport->back;
5517 struct ethtool_rx_flow_spec *fs;
5520 if (!hnae3_dev_fd_supported(hdev))
5523 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5525 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5528 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5529 dev_err(&hdev->pdev->dev,
5530 "Delete fail, rule %d is inexistent\n", fs->location);
5534 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5539 spin_lock_bh(&hdev->fd_rule_lock);
5540 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5542 spin_unlock_bh(&hdev->fd_rule_lock);
5547 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5550 struct hclge_vport *vport = hclge_get_vport(handle);
5551 struct hclge_dev *hdev = vport->back;
5552 struct hclge_fd_rule *rule;
5553 struct hlist_node *node;
5556 if (!hnae3_dev_fd_supported(hdev))
5559 spin_lock_bh(&hdev->fd_rule_lock);
5560 for_each_set_bit(location, hdev->fd_bmap,
5561 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5562 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5566 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5568 hlist_del(&rule->rule_node);
5571 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5572 hdev->hclge_fd_rule_num = 0;
5573 bitmap_zero(hdev->fd_bmap,
5574 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5577 spin_unlock_bh(&hdev->fd_rule_lock);
5580 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5582 struct hclge_vport *vport = hclge_get_vport(handle);
5583 struct hclge_dev *hdev = vport->back;
5584 struct hclge_fd_rule *rule;
5585 struct hlist_node *node;
5588 /* Return ok here, because reset error handling will check this
5589 * return value. If error is returned here, the reset process will
5592 if (!hnae3_dev_fd_supported(hdev))
5595 /* if fd is disabled, should not restore it when reset */
5599 spin_lock_bh(&hdev->fd_rule_lock);
5600 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5601 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5603 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5606 dev_warn(&hdev->pdev->dev,
5607 "Restore rule %d failed, remove it\n",
5609 clear_bit(rule->location, hdev->fd_bmap);
5610 hlist_del(&rule->rule_node);
5612 hdev->hclge_fd_rule_num--;
5616 if (hdev->hclge_fd_rule_num)
5617 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5619 spin_unlock_bh(&hdev->fd_rule_lock);
5624 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5625 struct ethtool_rxnfc *cmd)
5627 struct hclge_vport *vport = hclge_get_vport(handle);
5628 struct hclge_dev *hdev = vport->back;
5630 if (!hnae3_dev_fd_supported(hdev))
5633 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5634 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5639 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5640 struct ethtool_rxnfc *cmd)
5642 struct hclge_vport *vport = hclge_get_vport(handle);
5643 struct hclge_fd_rule *rule = NULL;
5644 struct hclge_dev *hdev = vport->back;
5645 struct ethtool_rx_flow_spec *fs;
5646 struct hlist_node *node2;
5648 if (!hnae3_dev_fd_supported(hdev))
5651 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5653 spin_lock_bh(&hdev->fd_rule_lock);
5655 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5656 if (rule->location >= fs->location)
5660 if (!rule || fs->location != rule->location) {
5661 spin_unlock_bh(&hdev->fd_rule_lock);
5666 fs->flow_type = rule->flow_type;
5667 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5671 fs->h_u.tcp_ip4_spec.ip4src =
5672 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5673 fs->m_u.tcp_ip4_spec.ip4src =
5674 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5675 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5677 fs->h_u.tcp_ip4_spec.ip4dst =
5678 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5679 fs->m_u.tcp_ip4_spec.ip4dst =
5680 rule->unused_tuple & BIT(INNER_DST_IP) ?
5681 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5683 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5684 fs->m_u.tcp_ip4_spec.psrc =
5685 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5686 0 : cpu_to_be16(rule->tuples_mask.src_port);
5688 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5689 fs->m_u.tcp_ip4_spec.pdst =
5690 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5691 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5693 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5694 fs->m_u.tcp_ip4_spec.tos =
5695 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5696 0 : rule->tuples_mask.ip_tos;
5700 fs->h_u.usr_ip4_spec.ip4src =
5701 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5702 fs->m_u.tcp_ip4_spec.ip4src =
5703 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5704 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5706 fs->h_u.usr_ip4_spec.ip4dst =
5707 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5708 fs->m_u.usr_ip4_spec.ip4dst =
5709 rule->unused_tuple & BIT(INNER_DST_IP) ?
5710 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5712 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5713 fs->m_u.usr_ip4_spec.tos =
5714 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5715 0 : rule->tuples_mask.ip_tos;
5717 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5718 fs->m_u.usr_ip4_spec.proto =
5719 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5720 0 : rule->tuples_mask.ip_proto;
5722 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5728 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5729 rule->tuples.src_ip, IPV6_SIZE);
5730 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5731 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5732 sizeof(int) * IPV6_SIZE);
5734 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5735 rule->tuples_mask.src_ip, IPV6_SIZE);
5737 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5738 rule->tuples.dst_ip, IPV6_SIZE);
5739 if (rule->unused_tuple & BIT(INNER_DST_IP))
5740 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5741 sizeof(int) * IPV6_SIZE);
5743 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5744 rule->tuples_mask.dst_ip, IPV6_SIZE);
5746 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5747 fs->m_u.tcp_ip6_spec.psrc =
5748 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5749 0 : cpu_to_be16(rule->tuples_mask.src_port);
5751 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5752 fs->m_u.tcp_ip6_spec.pdst =
5753 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5754 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5757 case IPV6_USER_FLOW:
5758 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5759 rule->tuples.src_ip, IPV6_SIZE);
5760 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5761 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5762 sizeof(int) * IPV6_SIZE);
5764 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5765 rule->tuples_mask.src_ip, IPV6_SIZE);
5767 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5768 rule->tuples.dst_ip, IPV6_SIZE);
5769 if (rule->unused_tuple & BIT(INNER_DST_IP))
5770 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5771 sizeof(int) * IPV6_SIZE);
5773 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5774 rule->tuples_mask.dst_ip, IPV6_SIZE);
5776 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5777 fs->m_u.usr_ip6_spec.l4_proto =
5778 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5779 0 : rule->tuples_mask.ip_proto;
5783 ether_addr_copy(fs->h_u.ether_spec.h_source,
5784 rule->tuples.src_mac);
5785 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5786 eth_zero_addr(fs->m_u.ether_spec.h_source);
5788 ether_addr_copy(fs->m_u.ether_spec.h_source,
5789 rule->tuples_mask.src_mac);
5791 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5792 rule->tuples.dst_mac);
5793 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5794 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5796 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5797 rule->tuples_mask.dst_mac);
5799 fs->h_u.ether_spec.h_proto =
5800 cpu_to_be16(rule->tuples.ether_proto);
5801 fs->m_u.ether_spec.h_proto =
5802 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5803 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5807 spin_unlock_bh(&hdev->fd_rule_lock);
5811 if (fs->flow_type & FLOW_EXT) {
5812 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5813 fs->m_ext.vlan_tci =
5814 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5815 cpu_to_be16(VLAN_VID_MASK) :
5816 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5819 if (fs->flow_type & FLOW_MAC_EXT) {
5820 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5821 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5822 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5824 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5825 rule->tuples_mask.dst_mac);
5828 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5829 fs->ring_cookie = RX_CLS_FLOW_DISC;
5833 fs->ring_cookie = rule->queue_id;
5834 vf_id = rule->vf_id;
5835 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5836 fs->ring_cookie |= vf_id;
5839 spin_unlock_bh(&hdev->fd_rule_lock);
5844 static int hclge_get_all_rules(struct hnae3_handle *handle,
5845 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5847 struct hclge_vport *vport = hclge_get_vport(handle);
5848 struct hclge_dev *hdev = vport->back;
5849 struct hclge_fd_rule *rule;
5850 struct hlist_node *node2;
5853 if (!hnae3_dev_fd_supported(hdev))
5856 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5858 spin_lock_bh(&hdev->fd_rule_lock);
5859 hlist_for_each_entry_safe(rule, node2,
5860 &hdev->fd_rule_list, rule_node) {
5861 if (cnt == cmd->rule_cnt) {
5862 spin_unlock_bh(&hdev->fd_rule_lock);
5866 rule_locs[cnt] = rule->location;
5870 spin_unlock_bh(&hdev->fd_rule_lock);
5872 cmd->rule_cnt = cnt;
5877 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5878 struct hclge_fd_rule_tuples *tuples)
5880 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5881 tuples->ip_proto = fkeys->basic.ip_proto;
5882 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5884 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5885 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5886 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5888 memcpy(tuples->src_ip,
5889 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5890 sizeof(tuples->src_ip));
5891 memcpy(tuples->dst_ip,
5892 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5893 sizeof(tuples->dst_ip));
5897 /* traverse all rules, check whether an existed rule has the same tuples */
5898 static struct hclge_fd_rule *
5899 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5900 const struct hclge_fd_rule_tuples *tuples)
5902 struct hclge_fd_rule *rule = NULL;
5903 struct hlist_node *node;
5905 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5906 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5913 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5914 struct hclge_fd_rule *rule)
5916 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5917 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5918 BIT(INNER_SRC_PORT);
5921 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5922 if (tuples->ether_proto == ETH_P_IP) {
5923 if (tuples->ip_proto == IPPROTO_TCP)
5924 rule->flow_type = TCP_V4_FLOW;
5926 rule->flow_type = UDP_V4_FLOW;
5928 if (tuples->ip_proto == IPPROTO_TCP)
5929 rule->flow_type = TCP_V6_FLOW;
5931 rule->flow_type = UDP_V6_FLOW;
5933 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5934 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5937 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5938 u16 flow_id, struct flow_keys *fkeys)
5940 struct hclge_vport *vport = hclge_get_vport(handle);
5941 struct hclge_fd_rule_tuples new_tuples;
5942 struct hclge_dev *hdev = vport->back;
5943 struct hclge_fd_rule *rule;
5948 if (!hnae3_dev_fd_supported(hdev))
5951 memset(&new_tuples, 0, sizeof(new_tuples));
5952 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5954 spin_lock_bh(&hdev->fd_rule_lock);
5956 /* when there is already fd rule existed add by user,
5957 * arfs should not work
5959 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5960 spin_unlock_bh(&hdev->fd_rule_lock);
5965 /* check is there flow director filter existed for this flow,
5966 * if not, create a new filter for it;
5967 * if filter exist with different queue id, modify the filter;
5968 * if filter exist with same queue id, do nothing
5970 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5972 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5973 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5974 spin_unlock_bh(&hdev->fd_rule_lock);
5979 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
5981 spin_unlock_bh(&hdev->fd_rule_lock);
5986 set_bit(bit_id, hdev->fd_bmap);
5987 rule->location = bit_id;
5988 rule->flow_id = flow_id;
5989 rule->queue_id = queue_id;
5990 hclge_fd_build_arfs_rule(&new_tuples, rule);
5991 ret = hclge_fd_config_rule(hdev, rule);
5993 spin_unlock_bh(&hdev->fd_rule_lock);
5998 return rule->location;
6001 spin_unlock_bh(&hdev->fd_rule_lock);
6003 if (rule->queue_id == queue_id)
6004 return rule->location;
6006 tmp_queue_id = rule->queue_id;
6007 rule->queue_id = queue_id;
6008 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6010 rule->queue_id = tmp_queue_id;
6014 return rule->location;
6017 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6019 #ifdef CONFIG_RFS_ACCEL
6020 struct hnae3_handle *handle = &hdev->vport[0].nic;
6021 struct hclge_fd_rule *rule;
6022 struct hlist_node *node;
6023 HLIST_HEAD(del_list);
6025 spin_lock_bh(&hdev->fd_rule_lock);
6026 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6027 spin_unlock_bh(&hdev->fd_rule_lock);
6030 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6031 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6032 rule->flow_id, rule->location)) {
6033 hlist_del_init(&rule->rule_node);
6034 hlist_add_head(&rule->rule_node, &del_list);
6035 hdev->hclge_fd_rule_num--;
6036 clear_bit(rule->location, hdev->fd_bmap);
6039 spin_unlock_bh(&hdev->fd_rule_lock);
6041 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6042 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6043 rule->location, NULL, false);
6049 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6051 #ifdef CONFIG_RFS_ACCEL
6052 struct hclge_vport *vport = hclge_get_vport(handle);
6053 struct hclge_dev *hdev = vport->back;
6055 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6056 hclge_del_all_fd_entries(handle, true);
6060 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6062 struct hclge_vport *vport = hclge_get_vport(handle);
6063 struct hclge_dev *hdev = vport->back;
6065 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6066 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6069 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6071 struct hclge_vport *vport = hclge_get_vport(handle);
6072 struct hclge_dev *hdev = vport->back;
6074 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6077 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6079 struct hclge_vport *vport = hclge_get_vport(handle);
6080 struct hclge_dev *hdev = vport->back;
6082 return hdev->rst_stats.hw_reset_done_cnt;
6085 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6087 struct hclge_vport *vport = hclge_get_vport(handle);
6088 struct hclge_dev *hdev = vport->back;
6091 hdev->fd_en = enable;
6092 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
6094 hclge_del_all_fd_entries(handle, clear);
6096 hclge_restore_fd_entries(handle);
6099 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6101 struct hclge_desc desc;
6102 struct hclge_config_mac_mode_cmd *req =
6103 (struct hclge_config_mac_mode_cmd *)desc.data;
6107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6110 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6111 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6112 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6113 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6114 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6115 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6116 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6117 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6118 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6119 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6122 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6124 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6126 dev_err(&hdev->pdev->dev,
6127 "mac enable fail, ret =%d.\n", ret);
6130 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6132 struct hclge_config_mac_mode_cmd *req;
6133 struct hclge_desc desc;
6137 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6138 /* 1 Read out the MAC mode config at first */
6139 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6140 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6142 dev_err(&hdev->pdev->dev,
6143 "mac loopback get fail, ret =%d.\n", ret);
6147 /* 2 Then setup the loopback flag */
6148 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6149 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6150 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6151 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6153 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6155 /* 3 Config mac work mode with loopback flag
6156 * and its original configure parameters
6158 hclge_cmd_reuse_desc(&desc, false);
6159 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6161 dev_err(&hdev->pdev->dev,
6162 "mac loopback set fail, ret =%d.\n", ret);
6166 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6167 enum hnae3_loop loop_mode)
6169 #define HCLGE_SERDES_RETRY_MS 10
6170 #define HCLGE_SERDES_RETRY_NUM 100
6172 #define HCLGE_MAC_LINK_STATUS_MS 10
6173 #define HCLGE_MAC_LINK_STATUS_NUM 100
6174 #define HCLGE_MAC_LINK_STATUS_DOWN 0
6175 #define HCLGE_MAC_LINK_STATUS_UP 1
6177 struct hclge_serdes_lb_cmd *req;
6178 struct hclge_desc desc;
6179 int mac_link_ret = 0;
6183 req = (struct hclge_serdes_lb_cmd *)desc.data;
6184 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6186 switch (loop_mode) {
6187 case HNAE3_LOOP_SERIAL_SERDES:
6188 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6190 case HNAE3_LOOP_PARALLEL_SERDES:
6191 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6194 dev_err(&hdev->pdev->dev,
6195 "unsupported serdes loopback mode %d\n", loop_mode);
6200 req->enable = loop_mode_b;
6201 req->mask = loop_mode_b;
6202 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6204 req->mask = loop_mode_b;
6205 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6208 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6210 dev_err(&hdev->pdev->dev,
6211 "serdes loopback set fail, ret = %d\n", ret);
6216 msleep(HCLGE_SERDES_RETRY_MS);
6217 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6219 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6221 dev_err(&hdev->pdev->dev,
6222 "serdes loopback get, ret = %d\n", ret);
6225 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6226 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6228 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6229 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6231 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6232 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6236 hclge_cfg_mac_mode(hdev, en);
6240 /* serdes Internal loopback, independent of the network cable.*/
6241 msleep(HCLGE_MAC_LINK_STATUS_MS);
6242 ret = hclge_get_mac_link_status(hdev);
6243 if (ret == mac_link_ret)
6245 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6247 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6252 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6253 int stream_id, bool enable)
6255 struct hclge_desc desc;
6256 struct hclge_cfg_com_tqp_queue_cmd *req =
6257 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6260 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6261 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6262 req->stream_id = cpu_to_le16(stream_id);
6264 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6268 dev_err(&hdev->pdev->dev,
6269 "Tqp enable fail, status =%d.\n", ret);
6273 static int hclge_set_loopback(struct hnae3_handle *handle,
6274 enum hnae3_loop loop_mode, bool en)
6276 struct hclge_vport *vport = hclge_get_vport(handle);
6277 struct hnae3_knic_private_info *kinfo;
6278 struct hclge_dev *hdev = vport->back;
6281 switch (loop_mode) {
6282 case HNAE3_LOOP_APP:
6283 ret = hclge_set_app_loopback(hdev, en);
6285 case HNAE3_LOOP_SERIAL_SERDES:
6286 case HNAE3_LOOP_PARALLEL_SERDES:
6287 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6291 dev_err(&hdev->pdev->dev,
6292 "loop_mode %d is not supported\n", loop_mode);
6299 kinfo = &vport->nic.kinfo;
6300 for (i = 0; i < kinfo->num_tqps; i++) {
6301 ret = hclge_tqp_enable(hdev, i, 0, en);
6309 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6311 struct hclge_vport *vport = hclge_get_vport(handle);
6312 struct hnae3_knic_private_info *kinfo;
6313 struct hnae3_queue *queue;
6314 struct hclge_tqp *tqp;
6317 kinfo = &vport->nic.kinfo;
6318 for (i = 0; i < kinfo->num_tqps; i++) {
6319 queue = handle->kinfo.tqp[i];
6320 tqp = container_of(queue, struct hclge_tqp, q);
6321 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6325 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6327 struct hclge_vport *vport = hclge_get_vport(handle);
6328 struct hclge_dev *hdev = vport->back;
6331 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6333 /* Set the DOWN flag here to disable the service to be
6336 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6337 cancel_delayed_work_sync(&hdev->service_task);
6338 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6342 static int hclge_ae_start(struct hnae3_handle *handle)
6344 struct hclge_vport *vport = hclge_get_vport(handle);
6345 struct hclge_dev *hdev = vport->back;
6348 hclge_cfg_mac_mode(hdev, true);
6349 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6350 hdev->hw.mac.link = 0;
6352 /* reset tqp stats */
6353 hclge_reset_tqp_stats(handle);
6355 hclge_mac_start_phy(hdev);
6360 static void hclge_ae_stop(struct hnae3_handle *handle)
6362 struct hclge_vport *vport = hclge_get_vport(handle);
6363 struct hclge_dev *hdev = vport->back;
6366 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6368 hclge_clear_arfs_rules(handle);
6370 /* If it is not PF reset, the firmware will disable the MAC,
6371 * so it only need to stop phy here.
6373 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6374 hdev->reset_type != HNAE3_FUNC_RESET) {
6375 hclge_mac_stop_phy(hdev);
6376 hclge_update_link_status(hdev);
6380 for (i = 0; i < handle->kinfo.num_tqps; i++)
6381 hclge_reset_tqp(handle, i);
6384 hclge_cfg_mac_mode(hdev, false);
6386 hclge_mac_stop_phy(hdev);
6388 /* reset tqp stats */
6389 hclge_reset_tqp_stats(handle);
6390 hclge_update_link_status(hdev);
6393 int hclge_vport_start(struct hclge_vport *vport)
6395 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6396 vport->last_active_jiffies = jiffies;
6400 void hclge_vport_stop(struct hclge_vport *vport)
6402 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6405 static int hclge_client_start(struct hnae3_handle *handle)
6407 struct hclge_vport *vport = hclge_get_vport(handle);
6409 return hclge_vport_start(vport);
6412 static void hclge_client_stop(struct hnae3_handle *handle)
6414 struct hclge_vport *vport = hclge_get_vport(handle);
6416 hclge_vport_stop(vport);
6419 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6420 u16 cmdq_resp, u8 resp_code,
6421 enum hclge_mac_vlan_tbl_opcode op)
6423 struct hclge_dev *hdev = vport->back;
6426 dev_err(&hdev->pdev->dev,
6427 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6432 if (op == HCLGE_MAC_VLAN_ADD) {
6433 if ((!resp_code) || (resp_code == 1)) {
6435 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6436 dev_err(&hdev->pdev->dev,
6437 "add mac addr failed for uc_overflow.\n");
6439 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6440 dev_err(&hdev->pdev->dev,
6441 "add mac addr failed for mc_overflow.\n");
6445 dev_err(&hdev->pdev->dev,
6446 "add mac addr failed for undefined, code=%u.\n",
6449 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6452 } else if (resp_code == 1) {
6453 dev_dbg(&hdev->pdev->dev,
6454 "remove mac addr failed for miss.\n");
6458 dev_err(&hdev->pdev->dev,
6459 "remove mac addr failed for undefined, code=%u.\n",
6462 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6465 } else if (resp_code == 1) {
6466 dev_dbg(&hdev->pdev->dev,
6467 "lookup mac addr failed for miss.\n");
6471 dev_err(&hdev->pdev->dev,
6472 "lookup mac addr failed for undefined, code=%u.\n",
6477 dev_err(&hdev->pdev->dev,
6478 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6483 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6485 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6487 unsigned int word_num;
6488 unsigned int bit_num;
6490 if (vfid > 255 || vfid < 0)
6493 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6494 word_num = vfid / 32;
6495 bit_num = vfid % 32;
6497 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6499 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6501 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6502 bit_num = vfid % 32;
6504 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6506 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6512 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6514 #define HCLGE_DESC_NUMBER 3
6515 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6518 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6519 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6520 if (desc[i].data[j])
6526 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6527 const u8 *addr, bool is_mc)
6529 const unsigned char *mac_addr = addr;
6530 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6531 (mac_addr[0]) | (mac_addr[1] << 8);
6532 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6534 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6536 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6537 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6540 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6541 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6544 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6545 struct hclge_mac_vlan_tbl_entry_cmd *req)
6547 struct hclge_dev *hdev = vport->back;
6548 struct hclge_desc desc;
6553 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6555 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6557 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6559 dev_err(&hdev->pdev->dev,
6560 "del mac addr failed for cmd_send, ret =%d.\n",
6564 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6565 retval = le16_to_cpu(desc.retval);
6567 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6568 HCLGE_MAC_VLAN_REMOVE);
6571 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6572 struct hclge_mac_vlan_tbl_entry_cmd *req,
6573 struct hclge_desc *desc,
6576 struct hclge_dev *hdev = vport->back;
6581 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6583 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6584 memcpy(desc[0].data,
6586 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6587 hclge_cmd_setup_basic_desc(&desc[1],
6588 HCLGE_OPC_MAC_VLAN_ADD,
6590 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6591 hclge_cmd_setup_basic_desc(&desc[2],
6592 HCLGE_OPC_MAC_VLAN_ADD,
6594 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6596 memcpy(desc[0].data,
6598 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6599 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6602 dev_err(&hdev->pdev->dev,
6603 "lookup mac addr failed for cmd_send, ret =%d.\n",
6607 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6608 retval = le16_to_cpu(desc[0].retval);
6610 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6611 HCLGE_MAC_VLAN_LKUP);
6614 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6615 struct hclge_mac_vlan_tbl_entry_cmd *req,
6616 struct hclge_desc *mc_desc)
6618 struct hclge_dev *hdev = vport->back;
6625 struct hclge_desc desc;
6627 hclge_cmd_setup_basic_desc(&desc,
6628 HCLGE_OPC_MAC_VLAN_ADD,
6630 memcpy(desc.data, req,
6631 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6632 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6633 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6634 retval = le16_to_cpu(desc.retval);
6636 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6638 HCLGE_MAC_VLAN_ADD);
6640 hclge_cmd_reuse_desc(&mc_desc[0], false);
6641 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6642 hclge_cmd_reuse_desc(&mc_desc[1], false);
6643 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6644 hclge_cmd_reuse_desc(&mc_desc[2], false);
6645 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6646 memcpy(mc_desc[0].data, req,
6647 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6648 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6649 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6650 retval = le16_to_cpu(mc_desc[0].retval);
6652 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6654 HCLGE_MAC_VLAN_ADD);
6658 dev_err(&hdev->pdev->dev,
6659 "add mac addr failed for cmd_send, ret =%d.\n",
6667 static int hclge_init_umv_space(struct hclge_dev *hdev)
6669 u16 allocated_size = 0;
6672 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6677 if (allocated_size < hdev->wanted_umv_size)
6678 dev_warn(&hdev->pdev->dev,
6679 "Alloc umv space failed, want %d, get %d\n",
6680 hdev->wanted_umv_size, allocated_size);
6682 mutex_init(&hdev->umv_mutex);
6683 hdev->max_umv_size = allocated_size;
6684 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6685 * preserve some unicast mac vlan table entries shared by pf
6688 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6689 hdev->share_umv_size = hdev->priv_umv_size +
6690 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6695 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6699 if (hdev->max_umv_size > 0) {
6700 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6704 hdev->max_umv_size = 0;
6706 mutex_destroy(&hdev->umv_mutex);
6711 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6712 u16 *allocated_size, bool is_alloc)
6714 struct hclge_umv_spc_alc_cmd *req;
6715 struct hclge_desc desc;
6718 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6719 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6721 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6723 req->space_size = cpu_to_le32(space_size);
6725 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6727 dev_err(&hdev->pdev->dev,
6728 "%s umv space failed for cmd_send, ret =%d\n",
6729 is_alloc ? "allocate" : "free", ret);
6733 if (is_alloc && allocated_size)
6734 *allocated_size = le32_to_cpu(desc.data[1]);
6739 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6741 struct hclge_vport *vport;
6744 for (i = 0; i < hdev->num_alloc_vport; i++) {
6745 vport = &hdev->vport[i];
6746 vport->used_umv_num = 0;
6749 mutex_lock(&hdev->umv_mutex);
6750 hdev->share_umv_size = hdev->priv_umv_size +
6751 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6752 mutex_unlock(&hdev->umv_mutex);
6755 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6757 struct hclge_dev *hdev = vport->back;
6760 mutex_lock(&hdev->umv_mutex);
6761 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6762 hdev->share_umv_size == 0);
6763 mutex_unlock(&hdev->umv_mutex);
6768 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6770 struct hclge_dev *hdev = vport->back;
6772 mutex_lock(&hdev->umv_mutex);
6774 if (vport->used_umv_num > hdev->priv_umv_size)
6775 hdev->share_umv_size++;
6777 if (vport->used_umv_num > 0)
6778 vport->used_umv_num--;
6780 if (vport->used_umv_num >= hdev->priv_umv_size &&
6781 hdev->share_umv_size > 0)
6782 hdev->share_umv_size--;
6783 vport->used_umv_num++;
6785 mutex_unlock(&hdev->umv_mutex);
6788 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6789 const unsigned char *addr)
6791 struct hclge_vport *vport = hclge_get_vport(handle);
6793 return hclge_add_uc_addr_common(vport, addr);
6796 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6797 const unsigned char *addr)
6799 struct hclge_dev *hdev = vport->back;
6800 struct hclge_mac_vlan_tbl_entry_cmd req;
6801 struct hclge_desc desc;
6802 u16 egress_port = 0;
6805 /* mac addr check */
6806 if (is_zero_ether_addr(addr) ||
6807 is_broadcast_ether_addr(addr) ||
6808 is_multicast_ether_addr(addr)) {
6809 dev_err(&hdev->pdev->dev,
6810 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6811 addr, is_zero_ether_addr(addr),
6812 is_broadcast_ether_addr(addr),
6813 is_multicast_ether_addr(addr));
6817 memset(&req, 0, sizeof(req));
6819 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6820 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6822 req.egress_port = cpu_to_le16(egress_port);
6824 hclge_prepare_mac_addr(&req, addr, false);
6826 /* Lookup the mac address in the mac_vlan table, and add
6827 * it if the entry is inexistent. Repeated unicast entry
6828 * is not allowed in the mac vlan table.
6830 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6831 if (ret == -ENOENT) {
6832 if (!hclge_is_umv_space_full(vport)) {
6833 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6835 hclge_update_umv_space(vport, false);
6839 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6840 hdev->priv_umv_size);
6845 /* check if we just hit the duplicate */
6847 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6848 vport->vport_id, addr);
6852 dev_err(&hdev->pdev->dev,
6853 "PF failed to add unicast entry(%pM) in the MAC table\n",
6859 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6860 const unsigned char *addr)
6862 struct hclge_vport *vport = hclge_get_vport(handle);
6864 return hclge_rm_uc_addr_common(vport, addr);
6867 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6868 const unsigned char *addr)
6870 struct hclge_dev *hdev = vport->back;
6871 struct hclge_mac_vlan_tbl_entry_cmd req;
6874 /* mac addr check */
6875 if (is_zero_ether_addr(addr) ||
6876 is_broadcast_ether_addr(addr) ||
6877 is_multicast_ether_addr(addr)) {
6878 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6883 memset(&req, 0, sizeof(req));
6884 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6885 hclge_prepare_mac_addr(&req, addr, false);
6886 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6888 hclge_update_umv_space(vport, true);
6893 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6894 const unsigned char *addr)
6896 struct hclge_vport *vport = hclge_get_vport(handle);
6898 return hclge_add_mc_addr_common(vport, addr);
6901 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6902 const unsigned char *addr)
6904 struct hclge_dev *hdev = vport->back;
6905 struct hclge_mac_vlan_tbl_entry_cmd req;
6906 struct hclge_desc desc[3];
6909 /* mac addr check */
6910 if (!is_multicast_ether_addr(addr)) {
6911 dev_err(&hdev->pdev->dev,
6912 "Add mc mac err! invalid mac:%pM.\n",
6916 memset(&req, 0, sizeof(req));
6917 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6918 hclge_prepare_mac_addr(&req, addr, true);
6919 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6921 /* This mac addr do not exist, add new entry for it */
6922 memset(desc[0].data, 0, sizeof(desc[0].data));
6923 memset(desc[1].data, 0, sizeof(desc[0].data));
6924 memset(desc[2].data, 0, sizeof(desc[0].data));
6926 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6929 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6931 if (status == -ENOSPC)
6932 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6937 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6938 const unsigned char *addr)
6940 struct hclge_vport *vport = hclge_get_vport(handle);
6942 return hclge_rm_mc_addr_common(vport, addr);
6945 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6946 const unsigned char *addr)
6948 struct hclge_dev *hdev = vport->back;
6949 struct hclge_mac_vlan_tbl_entry_cmd req;
6950 enum hclge_cmd_status status;
6951 struct hclge_desc desc[3];
6953 /* mac addr check */
6954 if (!is_multicast_ether_addr(addr)) {
6955 dev_dbg(&hdev->pdev->dev,
6956 "Remove mc mac err! invalid mac:%pM.\n",
6961 memset(&req, 0, sizeof(req));
6962 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6963 hclge_prepare_mac_addr(&req, addr, true);
6964 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6966 /* This mac addr exist, remove this handle's VFID for it */
6967 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6971 if (hclge_is_all_function_id_zero(desc))
6972 /* All the vfid is zero, so need to delete this entry */
6973 status = hclge_remove_mac_vlan_tbl(vport, &req);
6975 /* Not all the vfid is zero, update the vfid */
6976 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6979 /* Maybe this mac address is in mta table, but it cannot be
6980 * deleted here because an entry of mta represents an address
6981 * range rather than a specific address. the delete action to
6982 * all entries will take effect in update_mta_status called by
6983 * hns3_nic_set_rx_mode.
6991 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6992 enum HCLGE_MAC_ADDR_TYPE mac_type)
6994 struct hclge_vport_mac_addr_cfg *mac_cfg;
6995 struct list_head *list;
6997 if (!vport->vport_id)
7000 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7004 mac_cfg->hd_tbl_status = true;
7005 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7007 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7008 &vport->uc_mac_list : &vport->mc_mac_list;
7010 list_add_tail(&mac_cfg->node, list);
7013 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7015 enum HCLGE_MAC_ADDR_TYPE mac_type)
7017 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7018 struct list_head *list;
7019 bool uc_flag, mc_flag;
7021 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7022 &vport->uc_mac_list : &vport->mc_mac_list;
7024 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7025 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7027 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7028 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7029 if (uc_flag && mac_cfg->hd_tbl_status)
7030 hclge_rm_uc_addr_common(vport, mac_addr);
7032 if (mc_flag && mac_cfg->hd_tbl_status)
7033 hclge_rm_mc_addr_common(vport, mac_addr);
7035 list_del(&mac_cfg->node);
7042 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7043 enum HCLGE_MAC_ADDR_TYPE mac_type)
7045 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7046 struct list_head *list;
7048 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7049 &vport->uc_mac_list : &vport->mc_mac_list;
7051 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7052 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7053 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7055 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7056 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7058 mac_cfg->hd_tbl_status = false;
7060 list_del(&mac_cfg->node);
7066 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7068 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7069 struct hclge_vport *vport;
7072 mutex_lock(&hdev->vport_cfg_mutex);
7073 for (i = 0; i < hdev->num_alloc_vport; i++) {
7074 vport = &hdev->vport[i];
7075 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7076 list_del(&mac->node);
7080 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7081 list_del(&mac->node);
7085 mutex_unlock(&hdev->vport_cfg_mutex);
7088 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7089 u16 cmdq_resp, u8 resp_code)
7091 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7092 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7093 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7094 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7099 dev_err(&hdev->pdev->dev,
7100 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7105 switch (resp_code) {
7106 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7107 case HCLGE_ETHERTYPE_ALREADY_ADD:
7110 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7111 dev_err(&hdev->pdev->dev,
7112 "add mac ethertype failed for manager table overflow.\n");
7113 return_status = -EIO;
7115 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7116 dev_err(&hdev->pdev->dev,
7117 "add mac ethertype failed for key conflict.\n");
7118 return_status = -EIO;
7121 dev_err(&hdev->pdev->dev,
7122 "add mac ethertype failed for undefined, code=%d.\n",
7124 return_status = -EIO;
7127 return return_status;
7130 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7131 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7133 struct hclge_desc desc;
7138 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7139 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7141 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7143 dev_err(&hdev->pdev->dev,
7144 "add mac ethertype failed for cmd_send, ret =%d.\n",
7149 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7150 retval = le16_to_cpu(desc.retval);
7152 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7155 static int init_mgr_tbl(struct hclge_dev *hdev)
7160 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7161 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7163 dev_err(&hdev->pdev->dev,
7164 "add mac ethertype failed, ret =%d.\n",
7173 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7175 struct hclge_vport *vport = hclge_get_vport(handle);
7176 struct hclge_dev *hdev = vport->back;
7178 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7181 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7184 const unsigned char *new_addr = (const unsigned char *)p;
7185 struct hclge_vport *vport = hclge_get_vport(handle);
7186 struct hclge_dev *hdev = vport->back;
7189 /* mac addr check */
7190 if (is_zero_ether_addr(new_addr) ||
7191 is_broadcast_ether_addr(new_addr) ||
7192 is_multicast_ether_addr(new_addr)) {
7193 dev_err(&hdev->pdev->dev,
7194 "Change uc mac err! invalid mac:%p.\n",
7199 if ((!is_first || is_kdump_kernel()) &&
7200 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7201 dev_warn(&hdev->pdev->dev,
7202 "remove old uc mac address fail.\n");
7204 ret = hclge_add_uc_addr(handle, new_addr);
7206 dev_err(&hdev->pdev->dev,
7207 "add uc mac address fail, ret =%d.\n",
7211 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7212 dev_err(&hdev->pdev->dev,
7213 "restore uc mac address fail.\n");
7218 ret = hclge_pause_addr_cfg(hdev, new_addr);
7220 dev_err(&hdev->pdev->dev,
7221 "configure mac pause address fail, ret =%d.\n",
7226 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7231 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7234 struct hclge_vport *vport = hclge_get_vport(handle);
7235 struct hclge_dev *hdev = vport->back;
7237 if (!hdev->hw.mac.phydev)
7240 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7243 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7244 u8 fe_type, bool filter_en, u8 vf_id)
7246 struct hclge_vlan_filter_ctrl_cmd *req;
7247 struct hclge_desc desc;
7250 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7252 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7253 req->vlan_type = vlan_type;
7254 req->vlan_fe = filter_en ? fe_type : 0;
7257 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7259 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7265 #define HCLGE_FILTER_TYPE_VF 0
7266 #define HCLGE_FILTER_TYPE_PORT 1
7267 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7268 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7269 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7270 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7271 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7272 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7273 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7274 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7275 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7277 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7279 struct hclge_vport *vport = hclge_get_vport(handle);
7280 struct hclge_dev *hdev = vport->back;
7282 if (hdev->pdev->revision >= 0x21) {
7283 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7284 HCLGE_FILTER_FE_EGRESS, enable, 0);
7285 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7286 HCLGE_FILTER_FE_INGRESS, enable, 0);
7288 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7289 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7293 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7295 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7298 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7299 bool is_kill, u16 vlan, u8 qos,
7302 #define HCLGE_MAX_VF_BYTES 16
7303 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7304 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7305 struct hclge_desc desc[2];
7310 /* if vf vlan table is full, firmware will close vf vlan filter, it
7311 * is unable and unnecessary to add new vlan id to vf vlan filter
7313 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7316 hclge_cmd_setup_basic_desc(&desc[0],
7317 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7318 hclge_cmd_setup_basic_desc(&desc[1],
7319 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7321 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7323 vf_byte_off = vfid / 8;
7324 vf_byte_val = 1 << (vfid % 8);
7326 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7327 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7329 req0->vlan_id = cpu_to_le16(vlan);
7330 req0->vlan_cfg = is_kill;
7332 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7333 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7335 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7337 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7339 dev_err(&hdev->pdev->dev,
7340 "Send vf vlan command fail, ret =%d.\n",
7346 #define HCLGE_VF_VLAN_NO_ENTRY 2
7347 if (!req0->resp_code || req0->resp_code == 1)
7350 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7351 set_bit(vfid, hdev->vf_vlan_full);
7352 dev_warn(&hdev->pdev->dev,
7353 "vf vlan table is full, vf vlan filter is disabled\n");
7357 dev_err(&hdev->pdev->dev,
7358 "Add vf vlan filter fail, ret =%d.\n",
7361 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7362 if (!req0->resp_code)
7365 /* vf vlan filter is disabled when vf vlan table is full,
7366 * then new vlan id will not be added into vf vlan table.
7367 * Just return 0 without warning, avoid massive verbose
7368 * print logs when unload.
7370 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7373 dev_err(&hdev->pdev->dev,
7374 "Kill vf vlan filter fail, ret =%d.\n",
7381 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7382 u16 vlan_id, bool is_kill)
7384 struct hclge_vlan_filter_pf_cfg_cmd *req;
7385 struct hclge_desc desc;
7386 u8 vlan_offset_byte_val;
7387 u8 vlan_offset_byte;
7391 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7393 vlan_offset_160 = vlan_id / 160;
7394 vlan_offset_byte = (vlan_id % 160) / 8;
7395 vlan_offset_byte_val = 1 << (vlan_id % 8);
7397 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7398 req->vlan_offset = vlan_offset_160;
7399 req->vlan_cfg = is_kill;
7400 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7402 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7404 dev_err(&hdev->pdev->dev,
7405 "port vlan command, send fail, ret =%d.\n", ret);
7409 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7410 u16 vport_id, u16 vlan_id, u8 qos,
7413 u16 vport_idx, vport_num = 0;
7416 if (is_kill && !vlan_id)
7419 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7422 dev_err(&hdev->pdev->dev,
7423 "Set %d vport vlan filter config fail, ret =%d.\n",
7428 /* vlan 0 may be added twice when 8021q module is enabled */
7429 if (!is_kill && !vlan_id &&
7430 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7433 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7434 dev_err(&hdev->pdev->dev,
7435 "Add port vlan failed, vport %d is already in vlan %d\n",
7441 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7442 dev_err(&hdev->pdev->dev,
7443 "Delete port vlan failed, vport %d is not in vlan %d\n",
7448 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7451 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7452 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7458 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7460 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7461 struct hclge_vport_vtag_tx_cfg_cmd *req;
7462 struct hclge_dev *hdev = vport->back;
7463 struct hclge_desc desc;
7466 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7468 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7469 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7470 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7471 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7472 vcfg->accept_tag1 ? 1 : 0);
7473 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7474 vcfg->accept_untag1 ? 1 : 0);
7475 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7476 vcfg->accept_tag2 ? 1 : 0);
7477 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7478 vcfg->accept_untag2 ? 1 : 0);
7479 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7480 vcfg->insert_tag1_en ? 1 : 0);
7481 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7482 vcfg->insert_tag2_en ? 1 : 0);
7483 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7485 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7486 req->vf_bitmap[req->vf_offset] =
7487 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7489 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7491 dev_err(&hdev->pdev->dev,
7492 "Send port txvlan cfg command fail, ret =%d\n",
7498 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7500 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7501 struct hclge_vport_vtag_rx_cfg_cmd *req;
7502 struct hclge_dev *hdev = vport->back;
7503 struct hclge_desc desc;
7506 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7508 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7509 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7510 vcfg->strip_tag1_en ? 1 : 0);
7511 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7512 vcfg->strip_tag2_en ? 1 : 0);
7513 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7514 vcfg->vlan1_vlan_prionly ? 1 : 0);
7515 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7516 vcfg->vlan2_vlan_prionly ? 1 : 0);
7518 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7519 req->vf_bitmap[req->vf_offset] =
7520 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7522 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7524 dev_err(&hdev->pdev->dev,
7525 "Send port rxvlan cfg command fail, ret =%d\n",
7531 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7532 u16 port_base_vlan_state,
7537 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7538 vport->txvlan_cfg.accept_tag1 = true;
7539 vport->txvlan_cfg.insert_tag1_en = false;
7540 vport->txvlan_cfg.default_tag1 = 0;
7542 vport->txvlan_cfg.accept_tag1 = false;
7543 vport->txvlan_cfg.insert_tag1_en = true;
7544 vport->txvlan_cfg.default_tag1 = vlan_tag;
7547 vport->txvlan_cfg.accept_untag1 = true;
7549 /* accept_tag2 and accept_untag2 are not supported on
7550 * pdev revision(0x20), new revision support them,
7551 * this two fields can not be configured by user.
7553 vport->txvlan_cfg.accept_tag2 = true;
7554 vport->txvlan_cfg.accept_untag2 = true;
7555 vport->txvlan_cfg.insert_tag2_en = false;
7556 vport->txvlan_cfg.default_tag2 = 0;
7558 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7559 vport->rxvlan_cfg.strip_tag1_en = false;
7560 vport->rxvlan_cfg.strip_tag2_en =
7561 vport->rxvlan_cfg.rx_vlan_offload_en;
7563 vport->rxvlan_cfg.strip_tag1_en =
7564 vport->rxvlan_cfg.rx_vlan_offload_en;
7565 vport->rxvlan_cfg.strip_tag2_en = true;
7567 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7568 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7570 ret = hclge_set_vlan_tx_offload_cfg(vport);
7574 return hclge_set_vlan_rx_offload_cfg(vport);
7577 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7579 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7580 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7581 struct hclge_desc desc;
7584 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7585 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7586 rx_req->ot_fst_vlan_type =
7587 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7588 rx_req->ot_sec_vlan_type =
7589 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7590 rx_req->in_fst_vlan_type =
7591 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7592 rx_req->in_sec_vlan_type =
7593 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7595 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7597 dev_err(&hdev->pdev->dev,
7598 "Send rxvlan protocol type command fail, ret =%d\n",
7603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7605 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7606 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7607 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7609 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7611 dev_err(&hdev->pdev->dev,
7612 "Send txvlan protocol type command fail, ret =%d\n",
7618 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7620 #define HCLGE_DEF_VLAN_TYPE 0x8100
7622 struct hnae3_handle *handle = &hdev->vport[0].nic;
7623 struct hclge_vport *vport;
7627 if (hdev->pdev->revision >= 0x21) {
7628 /* for revision 0x21, vf vlan filter is per function */
7629 for (i = 0; i < hdev->num_alloc_vport; i++) {
7630 vport = &hdev->vport[i];
7631 ret = hclge_set_vlan_filter_ctrl(hdev,
7632 HCLGE_FILTER_TYPE_VF,
7633 HCLGE_FILTER_FE_EGRESS,
7640 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7641 HCLGE_FILTER_FE_INGRESS, true,
7646 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7647 HCLGE_FILTER_FE_EGRESS_V1_B,
7653 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7655 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7656 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7657 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7658 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7659 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7660 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7662 ret = hclge_set_vlan_protocol_type(hdev);
7666 for (i = 0; i < hdev->num_alloc_vport; i++) {
7669 vport = &hdev->vport[i];
7670 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7672 ret = hclge_vlan_offload_cfg(vport,
7673 vport->port_base_vlan_cfg.state,
7679 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7682 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7685 struct hclge_vport_vlan_cfg *vlan;
7687 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7691 vlan->hd_tbl_status = writen_to_tbl;
7692 vlan->vlan_id = vlan_id;
7694 list_add_tail(&vlan->node, &vport->vlan_list);
7697 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7699 struct hclge_vport_vlan_cfg *vlan, *tmp;
7700 struct hclge_dev *hdev = vport->back;
7703 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7704 if (!vlan->hd_tbl_status) {
7705 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7707 vlan->vlan_id, 0, false);
7709 dev_err(&hdev->pdev->dev,
7710 "restore vport vlan list failed, ret=%d\n",
7715 vlan->hd_tbl_status = true;
7721 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7724 struct hclge_vport_vlan_cfg *vlan, *tmp;
7725 struct hclge_dev *hdev = vport->back;
7727 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7728 if (vlan->vlan_id == vlan_id) {
7729 if (is_write_tbl && vlan->hd_tbl_status)
7730 hclge_set_vlan_filter_hw(hdev,
7736 list_del(&vlan->node);
7743 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7745 struct hclge_vport_vlan_cfg *vlan, *tmp;
7746 struct hclge_dev *hdev = vport->back;
7748 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7749 if (vlan->hd_tbl_status)
7750 hclge_set_vlan_filter_hw(hdev,
7756 vlan->hd_tbl_status = false;
7758 list_del(&vlan->node);
7764 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7766 struct hclge_vport_vlan_cfg *vlan, *tmp;
7767 struct hclge_vport *vport;
7770 mutex_lock(&hdev->vport_cfg_mutex);
7771 for (i = 0; i < hdev->num_alloc_vport; i++) {
7772 vport = &hdev->vport[i];
7773 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7774 list_del(&vlan->node);
7778 mutex_unlock(&hdev->vport_cfg_mutex);
7781 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7783 struct hclge_vport *vport = hclge_get_vport(handle);
7784 struct hclge_vport_vlan_cfg *vlan, *tmp;
7785 struct hclge_dev *hdev = vport->back;
7786 u16 vlan_proto, qos;
7790 mutex_lock(&hdev->vport_cfg_mutex);
7791 for (i = 0; i < hdev->num_alloc_vport; i++) {
7792 vport = &hdev->vport[i];
7793 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7794 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7795 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7796 state = vport->port_base_vlan_cfg.state;
7798 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7799 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7800 vport->vport_id, vlan_id, qos,
7805 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7806 if (vlan->hd_tbl_status)
7807 hclge_set_vlan_filter_hw(hdev,
7815 mutex_unlock(&hdev->vport_cfg_mutex);
7818 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7820 struct hclge_vport *vport = hclge_get_vport(handle);
7822 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7823 vport->rxvlan_cfg.strip_tag1_en = false;
7824 vport->rxvlan_cfg.strip_tag2_en = enable;
7826 vport->rxvlan_cfg.strip_tag1_en = enable;
7827 vport->rxvlan_cfg.strip_tag2_en = true;
7829 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7830 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7831 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7833 return hclge_set_vlan_rx_offload_cfg(vport);
7836 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7837 u16 port_base_vlan_state,
7838 struct hclge_vlan_info *new_info,
7839 struct hclge_vlan_info *old_info)
7841 struct hclge_dev *hdev = vport->back;
7844 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7845 hclge_rm_vport_all_vlan_table(vport, false);
7846 return hclge_set_vlan_filter_hw(hdev,
7847 htons(new_info->vlan_proto),
7850 new_info->qos, false);
7853 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7854 vport->vport_id, old_info->vlan_tag,
7855 old_info->qos, true);
7859 return hclge_add_vport_all_vlan_table(vport);
7862 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7863 struct hclge_vlan_info *vlan_info)
7865 struct hnae3_handle *nic = &vport->nic;
7866 struct hclge_vlan_info *old_vlan_info;
7867 struct hclge_dev *hdev = vport->back;
7870 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7872 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7876 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7877 /* add new VLAN tag */
7878 ret = hclge_set_vlan_filter_hw(hdev,
7879 htons(vlan_info->vlan_proto),
7881 vlan_info->vlan_tag,
7882 vlan_info->qos, false);
7886 /* remove old VLAN tag */
7887 ret = hclge_set_vlan_filter_hw(hdev,
7888 htons(old_vlan_info->vlan_proto),
7890 old_vlan_info->vlan_tag,
7891 old_vlan_info->qos, true);
7898 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7903 /* update state only when disable/enable port based VLAN */
7904 vport->port_base_vlan_cfg.state = state;
7905 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7906 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7908 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7911 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7912 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7913 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7918 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7919 enum hnae3_port_base_vlan_state state,
7922 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7924 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7926 return HNAE3_PORT_BASE_VLAN_ENABLE;
7929 return HNAE3_PORT_BASE_VLAN_DISABLE;
7930 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7931 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7933 return HNAE3_PORT_BASE_VLAN_MODIFY;
7937 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7938 u16 vlan, u8 qos, __be16 proto)
7940 struct hclge_vport *vport = hclge_get_vport(handle);
7941 struct hclge_dev *hdev = vport->back;
7942 struct hclge_vlan_info vlan_info;
7946 if (hdev->pdev->revision == 0x20)
7949 /* qos is a 3 bits value, so can not be bigger than 7 */
7950 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7952 if (proto != htons(ETH_P_8021Q))
7953 return -EPROTONOSUPPORT;
7955 vport = &hdev->vport[vfid];
7956 state = hclge_get_port_base_vlan_state(vport,
7957 vport->port_base_vlan_cfg.state,
7959 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7962 vlan_info.vlan_tag = vlan;
7963 vlan_info.qos = qos;
7964 vlan_info.vlan_proto = ntohs(proto);
7966 /* update port based VLAN for PF */
7968 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7969 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7970 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7975 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7976 return hclge_update_port_base_vlan_cfg(vport, state,
7979 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7987 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7988 u16 vlan_id, bool is_kill)
7990 struct hclge_vport *vport = hclge_get_vport(handle);
7991 struct hclge_dev *hdev = vport->back;
7992 bool writen_to_tbl = false;
7995 /* When device is resetting, firmware is unable to handle
7996 * mailbox. Just record the vlan id, and remove it after
7999 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8000 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8004 /* when port base vlan enabled, we use port base vlan as the vlan
8005 * filter entry. In this case, we don't update vlan filter table
8006 * when user add new vlan or remove exist vlan, just update the vport
8007 * vlan list. The vlan id in vlan list will be writen in vlan filter
8008 * table until port base vlan disabled
8010 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8011 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8012 vlan_id, 0, is_kill);
8013 writen_to_tbl = true;
8018 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8020 hclge_add_vport_vlan_table(vport, vlan_id,
8022 } else if (is_kill) {
8023 /* when remove hw vlan filter failed, record the vlan id,
8024 * and try to remove it from hw later, to be consistence
8027 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8032 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8034 #define HCLGE_MAX_SYNC_COUNT 60
8036 int i, ret, sync_cnt = 0;
8039 /* start from vport 1 for PF is always alive */
8040 for (i = 0; i < hdev->num_alloc_vport; i++) {
8041 struct hclge_vport *vport = &hdev->vport[i];
8043 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8045 while (vlan_id != VLAN_N_VID) {
8046 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8047 vport->vport_id, vlan_id,
8049 if (ret && ret != -EINVAL)
8052 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8053 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8056 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8059 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8065 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8067 struct hclge_config_max_frm_size_cmd *req;
8068 struct hclge_desc desc;
8070 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8072 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8073 req->max_frm_size = cpu_to_le16(new_mps);
8074 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8076 return hclge_cmd_send(&hdev->hw, &desc, 1);
8079 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8081 struct hclge_vport *vport = hclge_get_vport(handle);
8083 return hclge_set_vport_mtu(vport, new_mtu);
8086 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8088 struct hclge_dev *hdev = vport->back;
8089 int i, max_frm_size, ret;
8091 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8092 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8093 max_frm_size > HCLGE_MAC_MAX_FRAME)
8096 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8097 mutex_lock(&hdev->vport_lock);
8098 /* VF's mps must fit within hdev->mps */
8099 if (vport->vport_id && max_frm_size > hdev->mps) {
8100 mutex_unlock(&hdev->vport_lock);
8102 } else if (vport->vport_id) {
8103 vport->mps = max_frm_size;
8104 mutex_unlock(&hdev->vport_lock);
8108 /* PF's mps must be greater then VF's mps */
8109 for (i = 1; i < hdev->num_alloc_vport; i++)
8110 if (max_frm_size < hdev->vport[i].mps) {
8111 mutex_unlock(&hdev->vport_lock);
8115 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8117 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8119 dev_err(&hdev->pdev->dev,
8120 "Change mtu fail, ret =%d\n", ret);
8124 hdev->mps = max_frm_size;
8125 vport->mps = max_frm_size;
8127 ret = hclge_buffer_alloc(hdev);
8129 dev_err(&hdev->pdev->dev,
8130 "Allocate buffer fail, ret =%d\n", ret);
8133 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8134 mutex_unlock(&hdev->vport_lock);
8138 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8141 struct hclge_reset_tqp_queue_cmd *req;
8142 struct hclge_desc desc;
8145 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8147 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8148 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8150 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8152 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8154 dev_err(&hdev->pdev->dev,
8155 "Send tqp reset cmd error, status =%d\n", ret);
8162 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8164 struct hclge_reset_tqp_queue_cmd *req;
8165 struct hclge_desc desc;
8168 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8170 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8171 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8173 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8175 dev_err(&hdev->pdev->dev,
8176 "Get reset status error, status =%d\n", ret);
8180 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8183 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8185 struct hnae3_queue *queue;
8186 struct hclge_tqp *tqp;
8188 queue = handle->kinfo.tqp[queue_id];
8189 tqp = container_of(queue, struct hclge_tqp, q);
8194 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8196 struct hclge_vport *vport = hclge_get_vport(handle);
8197 struct hclge_dev *hdev = vport->back;
8198 int reset_try_times = 0;
8203 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8205 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8207 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8211 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8213 dev_err(&hdev->pdev->dev,
8214 "Send reset tqp cmd fail, ret = %d\n", ret);
8218 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8219 /* Wait for tqp hw reset */
8221 reset_status = hclge_get_reset_status(hdev, queue_gid);
8226 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8227 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8231 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8233 dev_err(&hdev->pdev->dev,
8234 "Deassert the soft reset fail, ret = %d\n", ret);
8239 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8241 struct hclge_dev *hdev = vport->back;
8242 int reset_try_times = 0;
8247 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8249 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8251 dev_warn(&hdev->pdev->dev,
8252 "Send reset tqp cmd fail, ret = %d\n", ret);
8256 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8257 /* Wait for tqp hw reset */
8259 reset_status = hclge_get_reset_status(hdev, queue_gid);
8264 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8265 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8269 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8271 dev_warn(&hdev->pdev->dev,
8272 "Deassert the soft reset fail, ret = %d\n", ret);
8275 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8277 struct hclge_vport *vport = hclge_get_vport(handle);
8278 struct hclge_dev *hdev = vport->back;
8280 return hdev->fw_version;
8283 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8285 struct phy_device *phydev = hdev->hw.mac.phydev;
8290 phy_set_asym_pause(phydev, rx_en, tx_en);
8293 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8297 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8300 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8302 dev_err(&hdev->pdev->dev,
8303 "configure pauseparam error, ret = %d.\n", ret);
8308 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8310 struct phy_device *phydev = hdev->hw.mac.phydev;
8311 u16 remote_advertising = 0;
8312 u16 local_advertising;
8313 u32 rx_pause, tx_pause;
8316 if (!phydev->link || !phydev->autoneg)
8319 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8322 remote_advertising = LPA_PAUSE_CAP;
8324 if (phydev->asym_pause)
8325 remote_advertising |= LPA_PAUSE_ASYM;
8327 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8328 remote_advertising);
8329 tx_pause = flowctl & FLOW_CTRL_TX;
8330 rx_pause = flowctl & FLOW_CTRL_RX;
8332 if (phydev->duplex == HCLGE_MAC_HALF) {
8337 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8340 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8341 u32 *rx_en, u32 *tx_en)
8343 struct hclge_vport *vport = hclge_get_vport(handle);
8344 struct hclge_dev *hdev = vport->back;
8345 struct phy_device *phydev = hdev->hw.mac.phydev;
8347 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8349 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8355 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8358 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8361 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8370 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8371 u32 rx_en, u32 tx_en)
8374 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8375 else if (rx_en && !tx_en)
8376 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8377 else if (!rx_en && tx_en)
8378 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8380 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8382 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8385 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8386 u32 rx_en, u32 tx_en)
8388 struct hclge_vport *vport = hclge_get_vport(handle);
8389 struct hclge_dev *hdev = vport->back;
8390 struct phy_device *phydev = hdev->hw.mac.phydev;
8394 fc_autoneg = hclge_get_autoneg(handle);
8395 if (auto_neg != fc_autoneg) {
8396 dev_info(&hdev->pdev->dev,
8397 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8402 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8403 dev_info(&hdev->pdev->dev,
8404 "Priority flow control enabled. Cannot set link flow control.\n");
8408 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8410 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8413 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8416 return phy_start_aneg(phydev);
8421 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8422 u8 *auto_neg, u32 *speed, u8 *duplex)
8424 struct hclge_vport *vport = hclge_get_vport(handle);
8425 struct hclge_dev *hdev = vport->back;
8428 *speed = hdev->hw.mac.speed;
8430 *duplex = hdev->hw.mac.duplex;
8432 *auto_neg = hdev->hw.mac.autoneg;
8435 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8438 struct hclge_vport *vport = hclge_get_vport(handle);
8439 struct hclge_dev *hdev = vport->back;
8442 *media_type = hdev->hw.mac.media_type;
8445 *module_type = hdev->hw.mac.module_type;
8448 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8449 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8451 struct hclge_vport *vport = hclge_get_vport(handle);
8452 struct hclge_dev *hdev = vport->back;
8453 struct phy_device *phydev = hdev->hw.mac.phydev;
8454 int mdix_ctrl, mdix, is_resolved;
8455 unsigned int retval;
8458 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8459 *tp_mdix = ETH_TP_MDI_INVALID;
8463 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8465 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8466 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8467 HCLGE_PHY_MDIX_CTRL_S);
8469 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8470 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8471 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8473 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8475 switch (mdix_ctrl) {
8477 *tp_mdix_ctrl = ETH_TP_MDI;
8480 *tp_mdix_ctrl = ETH_TP_MDI_X;
8483 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8486 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8491 *tp_mdix = ETH_TP_MDI_INVALID;
8493 *tp_mdix = ETH_TP_MDI_X;
8495 *tp_mdix = ETH_TP_MDI;
8498 static void hclge_info_show(struct hclge_dev *hdev)
8500 struct device *dev = &hdev->pdev->dev;
8502 dev_info(dev, "PF info begin:\n");
8504 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8505 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8506 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8507 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8508 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8509 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8510 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8511 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8512 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8513 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8514 dev_info(dev, "This is %s PF\n",
8515 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8516 dev_info(dev, "DCB %s\n",
8517 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8518 dev_info(dev, "MQPRIO %s\n",
8519 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8521 dev_info(dev, "PF info end.\n");
8524 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8525 struct hclge_vport *vport)
8527 struct hnae3_client *client = vport->nic.client;
8528 struct hclge_dev *hdev = ae_dev->priv;
8532 rst_cnt = hdev->rst_stats.reset_cnt;
8533 ret = client->ops->init_instance(&vport->nic);
8537 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8538 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8539 rst_cnt != hdev->rst_stats.reset_cnt) {
8544 /* Enable nic hw error interrupts */
8545 ret = hclge_config_nic_hw_error(hdev, true);
8547 dev_err(&ae_dev->pdev->dev,
8548 "fail(%d) to enable hw error interrupts\n", ret);
8552 hnae3_set_client_init_flag(client, ae_dev, 1);
8554 if (netif_msg_drv(&hdev->vport->nic))
8555 hclge_info_show(hdev);
8560 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8561 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8562 msleep(HCLGE_WAIT_RESET_DONE);
8564 client->ops->uninit_instance(&vport->nic, 0);
8569 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8570 struct hclge_vport *vport)
8572 struct hnae3_client *client = vport->roce.client;
8573 struct hclge_dev *hdev = ae_dev->priv;
8577 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8581 client = hdev->roce_client;
8582 ret = hclge_init_roce_base_info(vport);
8586 rst_cnt = hdev->rst_stats.reset_cnt;
8587 ret = client->ops->init_instance(&vport->roce);
8591 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8592 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8593 rst_cnt != hdev->rst_stats.reset_cnt) {
8598 /* Enable roce ras interrupts */
8599 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8601 dev_err(&ae_dev->pdev->dev,
8602 "fail(%d) to enable roce ras interrupts\n", ret);
8606 hnae3_set_client_init_flag(client, ae_dev, 1);
8611 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8612 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8613 msleep(HCLGE_WAIT_RESET_DONE);
8615 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8620 static int hclge_init_client_instance(struct hnae3_client *client,
8621 struct hnae3_ae_dev *ae_dev)
8623 struct hclge_dev *hdev = ae_dev->priv;
8624 struct hclge_vport *vport;
8627 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8628 vport = &hdev->vport[i];
8630 switch (client->type) {
8631 case HNAE3_CLIENT_KNIC:
8633 hdev->nic_client = client;
8634 vport->nic.client = client;
8635 ret = hclge_init_nic_client_instance(ae_dev, vport);
8639 ret = hclge_init_roce_client_instance(ae_dev, vport);
8644 case HNAE3_CLIENT_ROCE:
8645 if (hnae3_dev_roce_supported(hdev)) {
8646 hdev->roce_client = client;
8647 vport->roce.client = client;
8650 ret = hclge_init_roce_client_instance(ae_dev, vport);
8663 hdev->nic_client = NULL;
8664 vport->nic.client = NULL;
8667 hdev->roce_client = NULL;
8668 vport->roce.client = NULL;
8672 static void hclge_uninit_client_instance(struct hnae3_client *client,
8673 struct hnae3_ae_dev *ae_dev)
8675 struct hclge_dev *hdev = ae_dev->priv;
8676 struct hclge_vport *vport;
8679 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8680 vport = &hdev->vport[i];
8681 if (hdev->roce_client) {
8682 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8683 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8684 msleep(HCLGE_WAIT_RESET_DONE);
8686 hdev->roce_client->ops->uninit_instance(&vport->roce,
8688 hdev->roce_client = NULL;
8689 vport->roce.client = NULL;
8691 if (client->type == HNAE3_CLIENT_ROCE)
8693 if (hdev->nic_client && client->ops->uninit_instance) {
8694 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8695 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8696 msleep(HCLGE_WAIT_RESET_DONE);
8698 client->ops->uninit_instance(&vport->nic, 0);
8699 hdev->nic_client = NULL;
8700 vport->nic.client = NULL;
8705 static int hclge_pci_init(struct hclge_dev *hdev)
8707 struct pci_dev *pdev = hdev->pdev;
8708 struct hclge_hw *hw;
8711 ret = pci_enable_device(pdev);
8713 dev_err(&pdev->dev, "failed to enable PCI device\n");
8717 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8719 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8722 "can't set consistent PCI DMA");
8723 goto err_disable_device;
8725 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8728 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8730 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8731 goto err_disable_device;
8734 pci_set_master(pdev);
8736 hw->io_base = pcim_iomap(pdev, 2, 0);
8738 dev_err(&pdev->dev, "Can't map configuration register space\n");
8740 goto err_clr_master;
8743 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8747 pci_clear_master(pdev);
8748 pci_release_regions(pdev);
8750 pci_disable_device(pdev);
8755 static void hclge_pci_uninit(struct hclge_dev *hdev)
8757 struct pci_dev *pdev = hdev->pdev;
8759 pcim_iounmap(pdev, hdev->hw.io_base);
8760 pci_free_irq_vectors(pdev);
8761 pci_clear_master(pdev);
8762 pci_release_mem_regions(pdev);
8763 pci_disable_device(pdev);
8766 static void hclge_state_init(struct hclge_dev *hdev)
8768 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8769 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8770 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8771 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8772 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8773 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8776 static void hclge_state_uninit(struct hclge_dev *hdev)
8778 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8779 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8781 if (hdev->reset_timer.function)
8782 del_timer_sync(&hdev->reset_timer);
8783 if (hdev->service_task.work.func)
8784 cancel_delayed_work_sync(&hdev->service_task);
8785 if (hdev->rst_service_task.func)
8786 cancel_work_sync(&hdev->rst_service_task);
8787 if (hdev->mbx_service_task.func)
8788 cancel_work_sync(&hdev->mbx_service_task);
8791 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8793 #define HCLGE_FLR_WAIT_MS 100
8794 #define HCLGE_FLR_WAIT_CNT 50
8795 struct hclge_dev *hdev = ae_dev->priv;
8798 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8799 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8800 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8801 hclge_reset_event(hdev->pdev, NULL);
8803 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8804 cnt++ < HCLGE_FLR_WAIT_CNT)
8805 msleep(HCLGE_FLR_WAIT_MS);
8807 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8808 dev_err(&hdev->pdev->dev,
8809 "flr wait down timeout: %d\n", cnt);
8812 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8814 struct hclge_dev *hdev = ae_dev->priv;
8816 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8819 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8823 for (i = 0; i < hdev->num_alloc_vport; i++) {
8824 struct hclge_vport *vport = &hdev->vport[i];
8827 /* Send cmd to clear VF's FUNC_RST_ING */
8828 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8830 dev_warn(&hdev->pdev->dev,
8831 "clear vf(%d) rst failed %d!\n",
8832 vport->vport_id, ret);
8836 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8838 struct pci_dev *pdev = ae_dev->pdev;
8839 struct hclge_dev *hdev;
8842 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8849 hdev->ae_dev = ae_dev;
8850 hdev->reset_type = HNAE3_NONE_RESET;
8851 hdev->reset_level = HNAE3_FUNC_RESET;
8852 ae_dev->priv = hdev;
8853 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8855 mutex_init(&hdev->vport_lock);
8856 mutex_init(&hdev->vport_cfg_mutex);
8857 spin_lock_init(&hdev->fd_rule_lock);
8859 ret = hclge_pci_init(hdev);
8861 dev_err(&pdev->dev, "PCI init failed\n");
8865 /* Firmware command queue initialize */
8866 ret = hclge_cmd_queue_init(hdev);
8868 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8869 goto err_pci_uninit;
8872 /* Firmware command initialize */
8873 ret = hclge_cmd_init(hdev);
8875 goto err_cmd_uninit;
8877 ret = hclge_get_cap(hdev);
8879 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8881 goto err_cmd_uninit;
8884 ret = hclge_configure(hdev);
8886 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8887 goto err_cmd_uninit;
8890 ret = hclge_init_msi(hdev);
8892 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8893 goto err_cmd_uninit;
8896 ret = hclge_misc_irq_init(hdev);
8899 "Misc IRQ(vector0) init error, ret = %d.\n",
8901 goto err_msi_uninit;
8904 ret = hclge_alloc_tqps(hdev);
8906 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8907 goto err_msi_irq_uninit;
8910 ret = hclge_alloc_vport(hdev);
8912 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8913 goto err_msi_irq_uninit;
8916 ret = hclge_map_tqp(hdev);
8918 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8919 goto err_msi_irq_uninit;
8922 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8923 ret = hclge_mac_mdio_config(hdev);
8925 dev_err(&hdev->pdev->dev,
8926 "mdio config fail ret=%d\n", ret);
8927 goto err_msi_irq_uninit;
8931 ret = hclge_init_umv_space(hdev);
8933 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8934 goto err_mdiobus_unreg;
8937 ret = hclge_mac_init(hdev);
8939 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8940 goto err_mdiobus_unreg;
8943 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8945 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8946 goto err_mdiobus_unreg;
8949 ret = hclge_config_gro(hdev, true);
8951 goto err_mdiobus_unreg;
8953 ret = hclge_init_vlan_config(hdev);
8955 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8956 goto err_mdiobus_unreg;
8959 ret = hclge_tm_schd_init(hdev);
8961 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8962 goto err_mdiobus_unreg;
8965 hclge_rss_init_cfg(hdev);
8966 ret = hclge_rss_init_hw(hdev);
8968 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8969 goto err_mdiobus_unreg;
8972 ret = init_mgr_tbl(hdev);
8974 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8975 goto err_mdiobus_unreg;
8978 ret = hclge_init_fd_config(hdev);
8981 "fd table init fail, ret=%d\n", ret);
8982 goto err_mdiobus_unreg;
8985 INIT_KFIFO(hdev->mac_tnl_log);
8987 hclge_dcb_ops_set(hdev);
8989 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8990 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
8991 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8992 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8994 /* Setup affinity after service timer setup because add_timer_on
8995 * is called in affinity notify.
8997 hclge_misc_affinity_setup(hdev);
8999 hclge_clear_all_event_cause(hdev);
9000 hclge_clear_resetting_state(hdev);
9002 /* Log and clear the hw errors those already occurred */
9003 hclge_handle_all_hns_hw_errors(ae_dev);
9005 /* request delayed reset for the error recovery because an immediate
9006 * global reset on a PF affecting pending initialization of other PFs
9008 if (ae_dev->hw_err_reset_req) {
9009 enum hnae3_reset_type reset_level;
9011 reset_level = hclge_get_reset_level(ae_dev,
9012 &ae_dev->hw_err_reset_req);
9013 hclge_set_def_reset_request(ae_dev, reset_level);
9014 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9017 /* Enable MISC vector(vector0) */
9018 hclge_enable_vector(&hdev->misc_vector, true);
9020 hclge_state_init(hdev);
9021 hdev->last_reset_time = jiffies;
9023 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9029 if (hdev->hw.mac.phydev)
9030 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9032 hclge_misc_irq_uninit(hdev);
9034 pci_free_irq_vectors(pdev);
9036 hclge_cmd_uninit(hdev);
9038 pcim_iounmap(pdev, hdev->hw.io_base);
9039 pci_clear_master(pdev);
9040 pci_release_regions(pdev);
9041 pci_disable_device(pdev);
9046 static void hclge_stats_clear(struct hclge_dev *hdev)
9048 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9051 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9053 struct hclge_vport *vport = hdev->vport;
9056 for (i = 0; i < hdev->num_alloc_vport; i++) {
9057 hclge_vport_stop(vport);
9062 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9064 struct hclge_dev *hdev = ae_dev->priv;
9065 struct pci_dev *pdev = ae_dev->pdev;
9068 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9070 hclge_stats_clear(hdev);
9071 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9072 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9074 ret = hclge_cmd_init(hdev);
9076 dev_err(&pdev->dev, "Cmd queue init failed\n");
9080 ret = hclge_map_tqp(hdev);
9082 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9086 hclge_reset_umv_space(hdev);
9088 ret = hclge_mac_init(hdev);
9090 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9094 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9096 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9100 ret = hclge_config_gro(hdev, true);
9104 ret = hclge_init_vlan_config(hdev);
9106 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9110 ret = hclge_tm_init_hw(hdev, true);
9112 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9116 ret = hclge_rss_init_hw(hdev);
9118 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9122 ret = hclge_init_fd_config(hdev);
9124 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9128 /* Re-enable the hw error interrupts because
9129 * the interrupts get disabled on global reset.
9131 ret = hclge_config_nic_hw_error(hdev, true);
9134 "fail(%d) to re-enable NIC hw error interrupts\n",
9139 if (hdev->roce_client) {
9140 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9143 "fail(%d) to re-enable roce ras interrupts\n",
9149 hclge_reset_vport_state(hdev);
9151 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9157 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9159 struct hclge_dev *hdev = ae_dev->priv;
9160 struct hclge_mac *mac = &hdev->hw.mac;
9162 hclge_misc_affinity_teardown(hdev);
9163 hclge_state_uninit(hdev);
9166 mdiobus_unregister(mac->mdio_bus);
9168 hclge_uninit_umv_space(hdev);
9170 /* Disable MISC vector(vector0) */
9171 hclge_enable_vector(&hdev->misc_vector, false);
9172 synchronize_irq(hdev->misc_vector.vector_irq);
9174 /* Disable all hw interrupts */
9175 hclge_config_mac_tnl_int(hdev, false);
9176 hclge_config_nic_hw_error(hdev, false);
9177 hclge_config_rocee_ras_interrupt(hdev, false);
9179 hclge_cmd_uninit(hdev);
9180 hclge_misc_irq_uninit(hdev);
9181 hclge_pci_uninit(hdev);
9182 mutex_destroy(&hdev->vport_lock);
9183 hclge_uninit_vport_mac_table(hdev);
9184 hclge_uninit_vport_vlan_table(hdev);
9185 mutex_destroy(&hdev->vport_cfg_mutex);
9186 ae_dev->priv = NULL;
9189 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9191 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9192 struct hclge_vport *vport = hclge_get_vport(handle);
9193 struct hclge_dev *hdev = vport->back;
9195 return min_t(u32, hdev->rss_size_max,
9196 vport->alloc_tqps / kinfo->num_tc);
9199 static void hclge_get_channels(struct hnae3_handle *handle,
9200 struct ethtool_channels *ch)
9202 ch->max_combined = hclge_get_max_channels(handle);
9203 ch->other_count = 1;
9205 ch->combined_count = handle->kinfo.rss_size;
9208 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9209 u16 *alloc_tqps, u16 *max_rss_size)
9211 struct hclge_vport *vport = hclge_get_vport(handle);
9212 struct hclge_dev *hdev = vport->back;
9214 *alloc_tqps = vport->alloc_tqps;
9215 *max_rss_size = hdev->rss_size_max;
9218 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9219 bool rxfh_configured)
9221 struct hclge_vport *vport = hclge_get_vport(handle);
9222 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9223 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9224 struct hclge_dev *hdev = vport->back;
9225 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9226 int cur_rss_size = kinfo->rss_size;
9227 int cur_tqps = kinfo->num_tqps;
9228 u16 tc_valid[HCLGE_MAX_TC_NUM];
9234 kinfo->req_rss_size = new_tqps_num;
9236 ret = hclge_tm_vport_map_update(hdev);
9238 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9242 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9243 roundup_size = ilog2(roundup_size);
9244 /* Set the RSS TC mode according to the new RSS size */
9245 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9248 if (!(hdev->hw_tc_map & BIT(i)))
9252 tc_size[i] = roundup_size;
9253 tc_offset[i] = kinfo->rss_size * i;
9255 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9259 /* RSS indirection table has been configuared by user */
9260 if (rxfh_configured)
9263 /* Reinitializes the rss indirect table according to the new RSS size */
9264 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9268 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9269 rss_indir[i] = i % kinfo->rss_size;
9271 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9273 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9280 dev_info(&hdev->pdev->dev,
9281 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9282 cur_rss_size, kinfo->rss_size,
9283 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9288 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9289 u32 *regs_num_64_bit)
9291 struct hclge_desc desc;
9295 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9296 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9298 dev_err(&hdev->pdev->dev,
9299 "Query register number cmd failed, ret = %d.\n", ret);
9303 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9304 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9306 total_num = *regs_num_32_bit + *regs_num_64_bit;
9313 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9316 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9317 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9319 struct hclge_desc *desc;
9320 u32 *reg_val = data;
9330 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9331 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9332 HCLGE_32_BIT_REG_RTN_DATANUM);
9333 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9337 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9338 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9340 dev_err(&hdev->pdev->dev,
9341 "Query 32 bit register cmd failed, ret = %d.\n", ret);
9346 for (i = 0; i < cmd_num; i++) {
9348 desc_data = (__le32 *)(&desc[i].data[0]);
9349 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9351 desc_data = (__le32 *)(&desc[i]);
9352 n = HCLGE_32_BIT_REG_RTN_DATANUM;
9354 for (k = 0; k < n; k++) {
9355 *reg_val++ = le32_to_cpu(*desc_data++);
9367 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9370 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9371 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9373 struct hclge_desc *desc;
9374 u64 *reg_val = data;
9384 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9385 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9386 HCLGE_64_BIT_REG_RTN_DATANUM);
9387 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9391 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9392 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9394 dev_err(&hdev->pdev->dev,
9395 "Query 64 bit register cmd failed, ret = %d.\n", ret);
9400 for (i = 0; i < cmd_num; i++) {
9402 desc_data = (__le64 *)(&desc[i].data[0]);
9403 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9405 desc_data = (__le64 *)(&desc[i]);
9406 n = HCLGE_64_BIT_REG_RTN_DATANUM;
9408 for (k = 0; k < n; k++) {
9409 *reg_val++ = le64_to_cpu(*desc_data++);
9421 #define MAX_SEPARATE_NUM 4
9422 #define SEPARATOR_VALUE 0xFDFCFBFA
9423 #define REG_NUM_PER_LINE 4
9424 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9425 #define REG_SEPARATOR_LINE 1
9426 #define REG_NUM_REMAIN_MASK 3
9427 #define BD_LIST_MAX_NUM 30
9429 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9431 /*prepare 4 commands to query DFX BD number*/
9432 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9433 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9434 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9435 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9436 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9437 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9438 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9440 return hclge_cmd_send(&hdev->hw, desc, 4);
9443 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9447 #define HCLGE_DFX_REG_BD_NUM 4
9449 u32 entries_per_desc, desc_index, index, offset, i;
9450 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9453 ret = hclge_query_bd_num_cmd_send(hdev, desc);
9455 dev_err(&hdev->pdev->dev,
9456 "Get dfx bd num fail, status is %d.\n", ret);
9460 entries_per_desc = ARRAY_SIZE(desc[0].data);
9461 for (i = 0; i < type_num; i++) {
9462 offset = hclge_dfx_bd_offset_list[i];
9463 index = offset % entries_per_desc;
9464 desc_index = offset / entries_per_desc;
9465 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9471 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9472 struct hclge_desc *desc_src, int bd_num,
9473 enum hclge_opcode_type cmd)
9475 struct hclge_desc *desc = desc_src;
9478 hclge_cmd_setup_basic_desc(desc, cmd, true);
9479 for (i = 0; i < bd_num - 1; i++) {
9480 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9482 hclge_cmd_setup_basic_desc(desc, cmd, true);
9486 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9488 dev_err(&hdev->pdev->dev,
9489 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9495 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9498 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9499 struct hclge_desc *desc = desc_src;
9502 entries_per_desc = ARRAY_SIZE(desc->data);
9503 reg_num = entries_per_desc * bd_num;
9504 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9505 for (i = 0; i < reg_num; i++) {
9506 index = i % entries_per_desc;
9507 desc_index = i / entries_per_desc;
9508 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
9510 for (i = 0; i < separator_num; i++)
9511 *reg++ = SEPARATOR_VALUE;
9513 return reg_num + separator_num;
9516 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9518 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9519 int data_len_per_desc, data_len, bd_num, i;
9520 int bd_num_list[BD_LIST_MAX_NUM];
9523 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9525 dev_err(&hdev->pdev->dev,
9526 "Get dfx reg bd num fail, status is %d.\n", ret);
9530 data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9532 for (i = 0; i < dfx_reg_type_num; i++) {
9533 bd_num = bd_num_list[i];
9534 data_len = data_len_per_desc * bd_num;
9535 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9541 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9543 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9544 int bd_num, bd_num_max, buf_len, i;
9545 int bd_num_list[BD_LIST_MAX_NUM];
9546 struct hclge_desc *desc_src;
9550 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9552 dev_err(&hdev->pdev->dev,
9553 "Get dfx reg bd num fail, status is %d.\n", ret);
9557 bd_num_max = bd_num_list[0];
9558 for (i = 1; i < dfx_reg_type_num; i++)
9559 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9561 buf_len = sizeof(*desc_src) * bd_num_max;
9562 desc_src = kzalloc(buf_len, GFP_KERNEL);
9564 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9568 for (i = 0; i < dfx_reg_type_num; i++) {
9569 bd_num = bd_num_list[i];
9570 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9571 hclge_dfx_reg_opcode_list[i]);
9573 dev_err(&hdev->pdev->dev,
9574 "Get dfx reg fail, status is %d.\n", ret);
9578 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9585 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9586 struct hnae3_knic_private_info *kinfo)
9588 #define HCLGE_RING_REG_OFFSET 0x200
9589 #define HCLGE_RING_INT_REG_OFFSET 0x4
9591 int i, j, reg_num, separator_num;
9595 /* fetching per-PF registers valus from PF PCIe register space */
9596 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9597 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9598 for (i = 0; i < reg_num; i++)
9599 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9600 for (i = 0; i < separator_num; i++)
9601 *reg++ = SEPARATOR_VALUE;
9602 data_num_sum = reg_num + separator_num;
9604 reg_num = ARRAY_SIZE(common_reg_addr_list);
9605 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9606 for (i = 0; i < reg_num; i++)
9607 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9608 for (i = 0; i < separator_num; i++)
9609 *reg++ = SEPARATOR_VALUE;
9610 data_num_sum += reg_num + separator_num;
9612 reg_num = ARRAY_SIZE(ring_reg_addr_list);
9613 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9614 for (j = 0; j < kinfo->num_tqps; j++) {
9615 for (i = 0; i < reg_num; i++)
9616 *reg++ = hclge_read_dev(&hdev->hw,
9617 ring_reg_addr_list[i] +
9618 HCLGE_RING_REG_OFFSET * j);
9619 for (i = 0; i < separator_num; i++)
9620 *reg++ = SEPARATOR_VALUE;
9622 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9624 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9625 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9626 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9627 for (i = 0; i < reg_num; i++)
9628 *reg++ = hclge_read_dev(&hdev->hw,
9629 tqp_intr_reg_addr_list[i] +
9630 HCLGE_RING_INT_REG_OFFSET * j);
9631 for (i = 0; i < separator_num; i++)
9632 *reg++ = SEPARATOR_VALUE;
9634 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9636 return data_num_sum;
9639 static int hclge_get_regs_len(struct hnae3_handle *handle)
9641 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9642 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9643 struct hclge_vport *vport = hclge_get_vport(handle);
9644 struct hclge_dev *hdev = vport->back;
9645 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9646 int regs_lines_32_bit, regs_lines_64_bit;
9649 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9651 dev_err(&hdev->pdev->dev,
9652 "Get register number failed, ret = %d.\n", ret);
9656 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9658 dev_err(&hdev->pdev->dev,
9659 "Get dfx reg len failed, ret = %d.\n", ret);
9663 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9665 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9667 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9669 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9671 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9673 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9676 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9677 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9678 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9681 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9684 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9685 struct hclge_vport *vport = hclge_get_vport(handle);
9686 struct hclge_dev *hdev = vport->back;
9687 u32 regs_num_32_bit, regs_num_64_bit;
9688 int i, reg_num, separator_num, ret;
9691 *version = hdev->fw_version;
9693 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9695 dev_err(&hdev->pdev->dev,
9696 "Get register number failed, ret = %d.\n", ret);
9700 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
9702 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9704 dev_err(&hdev->pdev->dev,
9705 "Get 32 bit register failed, ret = %d.\n", ret);
9708 reg_num = regs_num_32_bit;
9710 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9711 for (i = 0; i < separator_num; i++)
9712 *reg++ = SEPARATOR_VALUE;
9714 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9716 dev_err(&hdev->pdev->dev,
9717 "Get 64 bit register failed, ret = %d.\n", ret);
9720 reg_num = regs_num_64_bit * 2;
9722 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9723 for (i = 0; i < separator_num; i++)
9724 *reg++ = SEPARATOR_VALUE;
9726 ret = hclge_get_dfx_reg(hdev, reg);
9728 dev_err(&hdev->pdev->dev,
9729 "Get dfx register failed, ret = %d.\n", ret);
9732 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9734 struct hclge_set_led_state_cmd *req;
9735 struct hclge_desc desc;
9738 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9740 req = (struct hclge_set_led_state_cmd *)desc.data;
9741 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9742 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9744 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9746 dev_err(&hdev->pdev->dev,
9747 "Send set led state cmd error, ret =%d\n", ret);
9752 enum hclge_led_status {
9755 HCLGE_LED_NO_CHANGE = 0xFF,
9758 static int hclge_set_led_id(struct hnae3_handle *handle,
9759 enum ethtool_phys_id_state status)
9761 struct hclge_vport *vport = hclge_get_vport(handle);
9762 struct hclge_dev *hdev = vport->back;
9765 case ETHTOOL_ID_ACTIVE:
9766 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9767 case ETHTOOL_ID_INACTIVE:
9768 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9774 static void hclge_get_link_mode(struct hnae3_handle *handle,
9775 unsigned long *supported,
9776 unsigned long *advertising)
9778 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9779 struct hclge_vport *vport = hclge_get_vport(handle);
9780 struct hclge_dev *hdev = vport->back;
9781 unsigned int idx = 0;
9783 for (; idx < size; idx++) {
9784 supported[idx] = hdev->hw.mac.supported[idx];
9785 advertising[idx] = hdev->hw.mac.advertising[idx];
9789 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9791 struct hclge_vport *vport = hclge_get_vport(handle);
9792 struct hclge_dev *hdev = vport->back;
9794 return hclge_config_gro(hdev, enable);
9797 static const struct hnae3_ae_ops hclge_ops = {
9798 .init_ae_dev = hclge_init_ae_dev,
9799 .uninit_ae_dev = hclge_uninit_ae_dev,
9800 .flr_prepare = hclge_flr_prepare,
9801 .flr_done = hclge_flr_done,
9802 .init_client_instance = hclge_init_client_instance,
9803 .uninit_client_instance = hclge_uninit_client_instance,
9804 .map_ring_to_vector = hclge_map_ring_to_vector,
9805 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9806 .get_vector = hclge_get_vector,
9807 .put_vector = hclge_put_vector,
9808 .set_promisc_mode = hclge_set_promisc_mode,
9809 .set_loopback = hclge_set_loopback,
9810 .start = hclge_ae_start,
9811 .stop = hclge_ae_stop,
9812 .client_start = hclge_client_start,
9813 .client_stop = hclge_client_stop,
9814 .get_status = hclge_get_status,
9815 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9816 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9817 .get_media_type = hclge_get_media_type,
9818 .check_port_speed = hclge_check_port_speed,
9819 .get_fec = hclge_get_fec,
9820 .set_fec = hclge_set_fec,
9821 .get_rss_key_size = hclge_get_rss_key_size,
9822 .get_rss_indir_size = hclge_get_rss_indir_size,
9823 .get_rss = hclge_get_rss,
9824 .set_rss = hclge_set_rss,
9825 .set_rss_tuple = hclge_set_rss_tuple,
9826 .get_rss_tuple = hclge_get_rss_tuple,
9827 .get_tc_size = hclge_get_tc_size,
9828 .get_mac_addr = hclge_get_mac_addr,
9829 .set_mac_addr = hclge_set_mac_addr,
9830 .do_ioctl = hclge_do_ioctl,
9831 .add_uc_addr = hclge_add_uc_addr,
9832 .rm_uc_addr = hclge_rm_uc_addr,
9833 .add_mc_addr = hclge_add_mc_addr,
9834 .rm_mc_addr = hclge_rm_mc_addr,
9835 .set_autoneg = hclge_set_autoneg,
9836 .get_autoneg = hclge_get_autoneg,
9837 .restart_autoneg = hclge_restart_autoneg,
9838 .halt_autoneg = hclge_halt_autoneg,
9839 .get_pauseparam = hclge_get_pauseparam,
9840 .set_pauseparam = hclge_set_pauseparam,
9841 .set_mtu = hclge_set_mtu,
9842 .reset_queue = hclge_reset_tqp,
9843 .get_stats = hclge_get_stats,
9844 .get_mac_stats = hclge_get_mac_stat,
9845 .update_stats = hclge_update_stats,
9846 .get_strings = hclge_get_strings,
9847 .get_sset_count = hclge_get_sset_count,
9848 .get_fw_version = hclge_get_fw_version,
9849 .get_mdix_mode = hclge_get_mdix_mode,
9850 .enable_vlan_filter = hclge_enable_vlan_filter,
9851 .set_vlan_filter = hclge_set_vlan_filter,
9852 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9853 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9854 .reset_event = hclge_reset_event,
9855 .get_reset_level = hclge_get_reset_level,
9856 .set_default_reset_request = hclge_set_def_reset_request,
9857 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9858 .set_channels = hclge_set_channels,
9859 .get_channels = hclge_get_channels,
9860 .get_regs_len = hclge_get_regs_len,
9861 .get_regs = hclge_get_regs,
9862 .set_led_id = hclge_set_led_id,
9863 .get_link_mode = hclge_get_link_mode,
9864 .add_fd_entry = hclge_add_fd_entry,
9865 .del_fd_entry = hclge_del_fd_entry,
9866 .del_all_fd_entries = hclge_del_all_fd_entries,
9867 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9868 .get_fd_rule_info = hclge_get_fd_rule_info,
9869 .get_fd_all_rules = hclge_get_all_rules,
9870 .restore_fd_rules = hclge_restore_fd_entries,
9871 .enable_fd = hclge_enable_fd,
9872 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9873 .dbg_run_cmd = hclge_dbg_run_cmd,
9874 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9875 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9876 .ae_dev_resetting = hclge_ae_dev_resetting,
9877 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9878 .set_gro_en = hclge_gro_en,
9879 .get_global_queue_id = hclge_covert_handle_qid_global,
9880 .set_timer_task = hclge_set_timer_task,
9881 .mac_connect_phy = hclge_mac_connect_phy,
9882 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9883 .restore_vlan_table = hclge_restore_vlan_table,
9886 static struct hnae3_ae_algo ae_algo = {
9888 .pdev_id_table = ae_algo_pci_tbl,
9891 static int hclge_init(void)
9893 pr_info("%s is initializing\n", HCLGE_NAME);
9895 hnae3_register_ae_algo(&ae_algo);
9900 static void hclge_exit(void)
9902 hnae3_unregister_ae_algo(&ae_algo);
9904 module_init(hclge_init);
9905 module_exit(hclge_exit);
9907 MODULE_LICENSE("GPL");
9908 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9909 MODULE_DESCRIPTION("HCLGE Driver");
9910 MODULE_VERSION(HCLGE_MOD_VERSION);