1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
42 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
43 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
44 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
45 u16 *allocated_size, bool is_alloc);
46 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
47 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
48 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
51 static struct hnae3_ae_algo ae_algo;
53 static const struct pci_device_id ae_algo_pci_tbl[] = {
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
59 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
60 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
61 /* required last entry */
65 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
67 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
68 HCLGE_CMDQ_TX_ADDR_H_REG,
69 HCLGE_CMDQ_TX_DEPTH_REG,
70 HCLGE_CMDQ_TX_TAIL_REG,
71 HCLGE_CMDQ_TX_HEAD_REG,
72 HCLGE_CMDQ_RX_ADDR_L_REG,
73 HCLGE_CMDQ_RX_ADDR_H_REG,
74 HCLGE_CMDQ_RX_DEPTH_REG,
75 HCLGE_CMDQ_RX_TAIL_REG,
76 HCLGE_CMDQ_RX_HEAD_REG,
77 HCLGE_VECTOR0_CMDQ_SRC_REG,
78 HCLGE_CMDQ_INTR_STS_REG,
79 HCLGE_CMDQ_INTR_EN_REG,
80 HCLGE_CMDQ_INTR_GEN_REG};
82 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
83 HCLGE_VECTOR0_OTER_EN_REG,
84 HCLGE_MISC_RESET_STS_REG,
85 HCLGE_MISC_VECTOR_INT_STS,
86 HCLGE_GLOBAL_RESET_REG,
90 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
91 HCLGE_RING_RX_ADDR_H_REG,
92 HCLGE_RING_RX_BD_NUM_REG,
93 HCLGE_RING_RX_BD_LENGTH_REG,
94 HCLGE_RING_RX_MERGE_EN_REG,
95 HCLGE_RING_RX_TAIL_REG,
96 HCLGE_RING_RX_HEAD_REG,
97 HCLGE_RING_RX_FBD_NUM_REG,
98 HCLGE_RING_RX_OFFSET_REG,
99 HCLGE_RING_RX_FBD_OFFSET_REG,
100 HCLGE_RING_RX_STASH_REG,
101 HCLGE_RING_RX_BD_ERR_REG,
102 HCLGE_RING_TX_ADDR_L_REG,
103 HCLGE_RING_TX_ADDR_H_REG,
104 HCLGE_RING_TX_BD_NUM_REG,
105 HCLGE_RING_TX_PRIORITY_REG,
106 HCLGE_RING_TX_TC_REG,
107 HCLGE_RING_TX_MERGE_EN_REG,
108 HCLGE_RING_TX_TAIL_REG,
109 HCLGE_RING_TX_HEAD_REG,
110 HCLGE_RING_TX_FBD_NUM_REG,
111 HCLGE_RING_TX_OFFSET_REG,
112 HCLGE_RING_TX_EBD_NUM_REG,
113 HCLGE_RING_TX_EBD_OFFSET_REG,
114 HCLGE_RING_TX_BD_ERR_REG,
117 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
118 HCLGE_TQP_INTR_GL0_REG,
119 HCLGE_TQP_INTR_GL1_REG,
120 HCLGE_TQP_INTR_GL2_REG,
121 HCLGE_TQP_INTR_RL_REG};
123 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
125 "Serdes serial Loopback test",
126 "Serdes parallel Loopback test",
130 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
131 {"mac_tx_mac_pause_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
133 {"mac_rx_mac_pause_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
135 {"mac_tx_control_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
137 {"mac_rx_control_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
139 {"mac_tx_pfc_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
141 {"mac_tx_pfc_pri0_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
143 {"mac_tx_pfc_pri1_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
145 {"mac_tx_pfc_pri2_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
147 {"mac_tx_pfc_pri3_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
149 {"mac_tx_pfc_pri4_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
151 {"mac_tx_pfc_pri5_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
153 {"mac_tx_pfc_pri6_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
155 {"mac_tx_pfc_pri7_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
157 {"mac_rx_pfc_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
159 {"mac_rx_pfc_pri0_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
161 {"mac_rx_pfc_pri1_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
163 {"mac_rx_pfc_pri2_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
165 {"mac_rx_pfc_pri3_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
167 {"mac_rx_pfc_pri4_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
169 {"mac_rx_pfc_pri5_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
171 {"mac_rx_pfc_pri6_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
173 {"mac_rx_pfc_pri7_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
175 {"mac_tx_total_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
177 {"mac_tx_total_oct_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
179 {"mac_tx_good_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
181 {"mac_tx_bad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
183 {"mac_tx_good_oct_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
185 {"mac_tx_bad_oct_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
187 {"mac_tx_uni_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
189 {"mac_tx_multi_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
191 {"mac_tx_broad_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
193 {"mac_tx_undersize_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
195 {"mac_tx_oversize_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
197 {"mac_tx_64_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
199 {"mac_tx_65_127_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
201 {"mac_tx_128_255_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
203 {"mac_tx_256_511_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
205 {"mac_tx_512_1023_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
207 {"mac_tx_1024_1518_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
209 {"mac_tx_1519_2047_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
211 {"mac_tx_2048_4095_oct_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
213 {"mac_tx_4096_8191_oct_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
215 {"mac_tx_8192_9216_oct_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
217 {"mac_tx_9217_12287_oct_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
219 {"mac_tx_12288_16383_oct_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
221 {"mac_tx_1519_max_good_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
223 {"mac_tx_1519_max_bad_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
225 {"mac_rx_total_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
227 {"mac_rx_total_oct_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
229 {"mac_rx_good_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
231 {"mac_rx_bad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
233 {"mac_rx_good_oct_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
235 {"mac_rx_bad_oct_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
237 {"mac_rx_uni_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
239 {"mac_rx_multi_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
241 {"mac_rx_broad_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
243 {"mac_rx_undersize_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
245 {"mac_rx_oversize_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
247 {"mac_rx_64_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
249 {"mac_rx_65_127_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
251 {"mac_rx_128_255_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
253 {"mac_rx_256_511_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
255 {"mac_rx_512_1023_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
257 {"mac_rx_1024_1518_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
259 {"mac_rx_1519_2047_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
261 {"mac_rx_2048_4095_oct_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
263 {"mac_rx_4096_8191_oct_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
265 {"mac_rx_8192_9216_oct_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
267 {"mac_rx_9217_12287_oct_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
269 {"mac_rx_12288_16383_oct_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
271 {"mac_rx_1519_max_good_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
273 {"mac_rx_1519_max_bad_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
276 {"mac_tx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
278 {"mac_tx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
280 {"mac_tx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
282 {"mac_tx_err_all_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
284 {"mac_tx_from_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
286 {"mac_tx_from_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
288 {"mac_rx_fragment_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
290 {"mac_rx_undermin_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
292 {"mac_rx_jabber_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
294 {"mac_rx_fcs_err_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
296 {"mac_rx_send_app_good_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
298 {"mac_rx_send_app_bad_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
302 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
304 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
305 .ethter_type = cpu_to_le16(ETH_P_LLDP),
306 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
307 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
308 .i_port_bitmap = 0x1,
312 static const u8 hclge_hash_key[] = {
313 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
314 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
315 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
316 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
317 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
320 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
322 #define HCLGE_MAC_CMD_NUM 21
324 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
325 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
330 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
331 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
333 dev_err(&hdev->pdev->dev,
334 "Get MAC pkt stats fail, status = %d.\n", ret);
339 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
340 /* for special opcode 0032, only the first desc has the head */
341 if (unlikely(i == 0)) {
342 desc_data = (__le64 *)(&desc[i].data[0]);
343 n = HCLGE_RD_FIRST_STATS_NUM;
345 desc_data = (__le64 *)(&desc[i]);
346 n = HCLGE_RD_OTHER_STATS_NUM;
349 for (k = 0; k < n; k++) {
350 *data += le64_to_cpu(*desc_data);
359 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
361 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
362 struct hclge_desc *desc;
367 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
370 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
371 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
377 for (i = 0; i < desc_num; i++) {
378 /* for special opcode 0034, only the first desc has the head */
380 desc_data = (__le64 *)(&desc[i].data[0]);
381 n = HCLGE_RD_FIRST_STATS_NUM;
383 desc_data = (__le64 *)(&desc[i]);
384 n = HCLGE_RD_OTHER_STATS_NUM;
387 for (k = 0; k < n; k++) {
388 *data += le64_to_cpu(*desc_data);
399 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
401 struct hclge_desc desc;
406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
407 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
411 desc_data = (__le32 *)(&desc.data[0]);
412 reg_num = le32_to_cpu(*desc_data);
414 *desc_num = 1 + ((reg_num - 3) >> 2) +
415 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
420 static int hclge_mac_update_stats(struct hclge_dev *hdev)
425 ret = hclge_mac_query_reg_num(hdev, &desc_num);
427 /* The firmware supports the new statistics acquisition method */
429 ret = hclge_mac_update_stats_complete(hdev, desc_num);
430 else if (ret == -EOPNOTSUPP)
431 ret = hclge_mac_update_stats_defective(hdev);
433 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
438 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
440 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
441 struct hclge_vport *vport = hclge_get_vport(handle);
442 struct hclge_dev *hdev = vport->back;
443 struct hnae3_queue *queue;
444 struct hclge_desc desc[1];
445 struct hclge_tqp *tqp;
448 for (i = 0; i < kinfo->num_tqps; i++) {
449 queue = handle->kinfo.tqp[i];
450 tqp = container_of(queue, struct hclge_tqp, q);
451 /* command : HCLGE_OPC_QUERY_IGU_STAT */
452 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
455 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
456 ret = hclge_cmd_send(&hdev->hw, desc, 1);
458 dev_err(&hdev->pdev->dev,
459 "Query tqp stat fail, status = %d,queue = %d\n",
463 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
464 le32_to_cpu(desc[0].data[1]);
467 for (i = 0; i < kinfo->num_tqps; i++) {
468 queue = handle->kinfo.tqp[i];
469 tqp = container_of(queue, struct hclge_tqp, q);
470 /* command : HCLGE_OPC_QUERY_IGU_STAT */
471 hclge_cmd_setup_basic_desc(&desc[0],
472 HCLGE_OPC_QUERY_TX_STATUS,
475 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
476 ret = hclge_cmd_send(&hdev->hw, desc, 1);
478 dev_err(&hdev->pdev->dev,
479 "Query tqp stat fail, status = %d,queue = %d\n",
483 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
484 le32_to_cpu(desc[0].data[1]);
490 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
492 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
493 struct hclge_tqp *tqp;
497 for (i = 0; i < kinfo->num_tqps; i++) {
498 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
502 for (i = 0; i < kinfo->num_tqps; i++) {
503 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
504 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
510 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
512 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 /* each tqp has TX & RX two queues */
515 return kinfo->num_tqps * (2);
518 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
520 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
524 for (i = 0; i < kinfo->num_tqps; i++) {
525 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
526 struct hclge_tqp, q);
527 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
529 buff = buff + ETH_GSTRING_LEN;
532 for (i = 0; i < kinfo->num_tqps; i++) {
533 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
534 struct hclge_tqp, q);
535 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
537 buff = buff + ETH_GSTRING_LEN;
543 static u64 *hclge_comm_get_stats(const void *comm_stats,
544 const struct hclge_comm_stats_str strs[],
550 for (i = 0; i < size; i++)
551 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
556 static u8 *hclge_comm_get_strings(u32 stringset,
557 const struct hclge_comm_stats_str strs[],
560 char *buff = (char *)data;
563 if (stringset != ETH_SS_STATS)
566 for (i = 0; i < size; i++) {
567 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
568 buff = buff + ETH_GSTRING_LEN;
574 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
576 struct hnae3_handle *handle;
579 handle = &hdev->vport[0].nic;
580 if (handle->client) {
581 status = hclge_tqps_update_stats(handle);
583 dev_err(&hdev->pdev->dev,
584 "Update TQPS stats fail, status = %d.\n",
589 status = hclge_mac_update_stats(hdev);
591 dev_err(&hdev->pdev->dev,
592 "Update MAC stats fail, status = %d.\n", status);
595 static void hclge_update_stats(struct hnae3_handle *handle,
596 struct net_device_stats *net_stats)
598 struct hclge_vport *vport = hclge_get_vport(handle);
599 struct hclge_dev *hdev = vport->back;
602 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
605 status = hclge_mac_update_stats(hdev);
607 dev_err(&hdev->pdev->dev,
608 "Update MAC stats fail, status = %d.\n",
611 status = hclge_tqps_update_stats(handle);
613 dev_err(&hdev->pdev->dev,
614 "Update TQPS stats fail, status = %d.\n",
617 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
620 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
622 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
623 HNAE3_SUPPORT_PHY_LOOPBACK |\
624 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
625 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
627 struct hclge_vport *vport = hclge_get_vport(handle);
628 struct hclge_dev *hdev = vport->back;
631 /* Loopback test support rules:
632 * mac: only GE mode support
633 * serdes: all mac mode will support include GE/XGE/LGE/CGE
634 * phy: only support when phy device exist on board
636 if (stringset == ETH_SS_TEST) {
637 /* clear loopback bit flags at first */
638 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
639 if (hdev->pdev->revision >= 0x21 ||
640 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
641 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
642 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
644 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
648 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
649 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
650 } else if (stringset == ETH_SS_STATS) {
651 count = ARRAY_SIZE(g_mac_stats_string) +
652 hclge_tqps_get_sset_count(handle, stringset);
658 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
661 u8 *p = (char *)data;
664 if (stringset == ETH_SS_STATS) {
665 size = ARRAY_SIZE(g_mac_stats_string);
666 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
668 p = hclge_tqps_get_strings(handle, p);
669 } else if (stringset == ETH_SS_TEST) {
670 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
671 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
673 p += ETH_GSTRING_LEN;
675 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
676 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
678 p += ETH_GSTRING_LEN;
680 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
682 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
684 p += ETH_GSTRING_LEN;
686 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
687 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
689 p += ETH_GSTRING_LEN;
694 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
696 struct hclge_vport *vport = hclge_get_vport(handle);
697 struct hclge_dev *hdev = vport->back;
700 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
701 ARRAY_SIZE(g_mac_stats_string), data);
702 p = hclge_tqps_get_stats(handle, p);
705 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
708 struct hclge_vport *vport = hclge_get_vport(handle);
709 struct hclge_dev *hdev = vport->back;
711 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
712 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
715 static int hclge_parse_func_status(struct hclge_dev *hdev,
716 struct hclge_func_status_cmd *status)
718 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
721 /* Set the pf to main pf */
722 if (status->pf_state & HCLGE_PF_STATE_MAIN)
723 hdev->flag |= HCLGE_FLAG_MAIN;
725 hdev->flag &= ~HCLGE_FLAG_MAIN;
730 static int hclge_query_function_status(struct hclge_dev *hdev)
732 #define HCLGE_QUERY_MAX_CNT 5
734 struct hclge_func_status_cmd *req;
735 struct hclge_desc desc;
739 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
740 req = (struct hclge_func_status_cmd *)desc.data;
743 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
745 dev_err(&hdev->pdev->dev,
746 "query function status failed %d.\n", ret);
750 /* Check pf reset is done */
753 usleep_range(1000, 2000);
754 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
756 ret = hclge_parse_func_status(hdev, req);
761 static int hclge_query_pf_resource(struct hclge_dev *hdev)
763 struct hclge_pf_res_cmd *req;
764 struct hclge_desc desc;
767 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
768 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
770 dev_err(&hdev->pdev->dev,
771 "query pf resource failed %d.\n", ret);
775 req = (struct hclge_pf_res_cmd *)desc.data;
776 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
777 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
779 if (req->tx_buf_size)
781 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
783 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
785 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
787 if (req->dv_buf_size)
789 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
791 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
793 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
795 if (hnae3_dev_roce_supported(hdev)) {
796 hdev->roce_base_msix_offset =
797 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
798 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
800 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
801 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
803 /* PF should have NIC vectors and Roce vectors,
804 * NIC vectors are queued before Roce vectors.
806 hdev->num_msi = hdev->num_roce_msi +
807 hdev->roce_base_msix_offset;
810 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
811 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
817 static int hclge_parse_speed(int speed_cmd, int *speed)
821 *speed = HCLGE_MAC_SPEED_10M;
824 *speed = HCLGE_MAC_SPEED_100M;
827 *speed = HCLGE_MAC_SPEED_1G;
830 *speed = HCLGE_MAC_SPEED_10G;
833 *speed = HCLGE_MAC_SPEED_25G;
836 *speed = HCLGE_MAC_SPEED_40G;
839 *speed = HCLGE_MAC_SPEED_50G;
842 *speed = HCLGE_MAC_SPEED_100G;
851 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
853 struct hclge_vport *vport = hclge_get_vport(handle);
854 struct hclge_dev *hdev = vport->back;
855 u32 speed_ability = hdev->hw.mac.speed_ability;
859 case HCLGE_MAC_SPEED_10M:
860 speed_bit = HCLGE_SUPPORT_10M_BIT;
862 case HCLGE_MAC_SPEED_100M:
863 speed_bit = HCLGE_SUPPORT_100M_BIT;
865 case HCLGE_MAC_SPEED_1G:
866 speed_bit = HCLGE_SUPPORT_1G_BIT;
868 case HCLGE_MAC_SPEED_10G:
869 speed_bit = HCLGE_SUPPORT_10G_BIT;
871 case HCLGE_MAC_SPEED_25G:
872 speed_bit = HCLGE_SUPPORT_25G_BIT;
874 case HCLGE_MAC_SPEED_40G:
875 speed_bit = HCLGE_SUPPORT_40G_BIT;
877 case HCLGE_MAC_SPEED_50G:
878 speed_bit = HCLGE_SUPPORT_50G_BIT;
880 case HCLGE_MAC_SPEED_100G:
881 speed_bit = HCLGE_SUPPORT_100G_BIT;
887 if (speed_bit & speed_ability)
893 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
895 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
896 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
898 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
899 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
901 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
902 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
904 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
905 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
907 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
908 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
912 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
914 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
915 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
917 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
918 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
920 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
921 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
923 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
924 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
926 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
927 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
931 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
933 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
934 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
936 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
937 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
939 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
940 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
942 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
943 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
945 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
946 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
950 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
952 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
953 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
955 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
956 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
958 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
959 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
961 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
962 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
964 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
965 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
967 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
968 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
972 static void hclge_convert_setting_fec(struct hclge_mac *mac)
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
975 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
977 switch (mac->speed) {
978 case HCLGE_MAC_SPEED_10G:
979 case HCLGE_MAC_SPEED_40G:
980 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
983 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
985 case HCLGE_MAC_SPEED_25G:
986 case HCLGE_MAC_SPEED_50G:
987 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
990 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
993 case HCLGE_MAC_SPEED_100G:
994 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
995 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
998 mac->fec_ability = 0;
1003 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1006 struct hclge_mac *mac = &hdev->hw.mac;
1008 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1009 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1012 hclge_convert_setting_sr(mac, speed_ability);
1013 hclge_convert_setting_lr(mac, speed_ability);
1014 hclge_convert_setting_cr(mac, speed_ability);
1015 if (hdev->pdev->revision >= 0x21)
1016 hclge_convert_setting_fec(mac);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1023 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1026 struct hclge_mac *mac = &hdev->hw.mac;
1028 hclge_convert_setting_kr(mac, speed_ability);
1029 if (hdev->pdev->revision >= 0x21)
1030 hclge_convert_setting_fec(mac);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1036 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1039 unsigned long *supported = hdev->hw.mac.supported;
1041 /* default to support all speed for GE port */
1043 speed_ability = HCLGE_SUPPORT_GE;
1045 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1049 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1050 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1056 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1063 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1067 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1069 u8 media_type = hdev->hw.mac.media_type;
1071 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1072 hclge_parse_fiber_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1074 hclge_parse_copper_link_mode(hdev, speed_ability);
1075 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1076 hclge_parse_backplane_link_mode(hdev, speed_ability);
1078 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1080 struct hclge_cfg_param_cmd *req;
1081 u64 mac_addr_tmp_high;
1085 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1087 /* get the configuration */
1088 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1091 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1093 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1094 HCLGE_CFG_TQP_DESC_N_M,
1095 HCLGE_CFG_TQP_DESC_N_S);
1097 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1098 HCLGE_CFG_PHY_ADDR_M,
1099 HCLGE_CFG_PHY_ADDR_S);
1100 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1101 HCLGE_CFG_MEDIA_TP_M,
1102 HCLGE_CFG_MEDIA_TP_S);
1103 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1104 HCLGE_CFG_RX_BUF_LEN_M,
1105 HCLGE_CFG_RX_BUF_LEN_S);
1106 /* get mac_address */
1107 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1108 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1109 HCLGE_CFG_MAC_ADDR_H_M,
1110 HCLGE_CFG_MAC_ADDR_H_S);
1112 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1114 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1115 HCLGE_CFG_DEFAULT_SPEED_M,
1116 HCLGE_CFG_DEFAULT_SPEED_S);
1117 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1118 HCLGE_CFG_RSS_SIZE_M,
1119 HCLGE_CFG_RSS_SIZE_S);
1121 for (i = 0; i < ETH_ALEN; i++)
1122 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1124 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1125 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1127 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1128 HCLGE_CFG_SPEED_ABILITY_M,
1129 HCLGE_CFG_SPEED_ABILITY_S);
1130 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1131 HCLGE_CFG_UMV_TBL_SPACE_M,
1132 HCLGE_CFG_UMV_TBL_SPACE_S);
1133 if (!cfg->umv_space)
1134 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1137 /* hclge_get_cfg: query the static parameter from flash
1138 * @hdev: pointer to struct hclge_dev
1139 * @hcfg: the config structure to be getted
1141 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1143 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1144 struct hclge_cfg_param_cmd *req;
1148 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1151 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1152 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1154 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1155 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1156 /* Len should be united by 4 bytes when send to hardware */
1157 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1158 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1159 req->offset = cpu_to_le32(offset);
1162 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1164 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1168 hclge_parse_cfg(hcfg, desc);
1173 static int hclge_get_cap(struct hclge_dev *hdev)
1177 ret = hclge_query_function_status(hdev);
1179 dev_err(&hdev->pdev->dev,
1180 "query function status error %d.\n", ret);
1184 /* get pf resource */
1185 ret = hclge_query_pf_resource(hdev);
1187 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1192 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1194 #define HCLGE_MIN_TX_DESC 64
1195 #define HCLGE_MIN_RX_DESC 64
1197 if (!is_kdump_kernel())
1200 dev_info(&hdev->pdev->dev,
1201 "Running kdump kernel. Using minimal resources\n");
1203 /* minimal queue pairs equals to the number of vports */
1204 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1205 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1206 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1209 static int hclge_configure(struct hclge_dev *hdev)
1211 struct hclge_cfg cfg;
1215 ret = hclge_get_cfg(hdev, &cfg);
1217 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1221 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1222 hdev->base_tqp_pid = 0;
1223 hdev->rss_size_max = cfg.rss_size_max;
1224 hdev->rx_buf_len = cfg.rx_buf_len;
1225 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1226 hdev->hw.mac.media_type = cfg.media_type;
1227 hdev->hw.mac.phy_addr = cfg.phy_addr;
1228 hdev->num_tx_desc = cfg.tqp_desc_num;
1229 hdev->num_rx_desc = cfg.tqp_desc_num;
1230 hdev->tm_info.num_pg = 1;
1231 hdev->tc_max = cfg.tc_num;
1232 hdev->tm_info.hw_pfc_map = 0;
1233 hdev->wanted_umv_size = cfg.umv_space;
1235 if (hnae3_dev_fd_supported(hdev)) {
1237 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1240 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1242 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1246 hclge_parse_link_mode(hdev, cfg.speed_ability);
1248 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1249 (hdev->tc_max < 1)) {
1250 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1255 /* Dev does not support DCB */
1256 if (!hnae3_dev_dcb_supported(hdev)) {
1260 hdev->pfc_max = hdev->tc_max;
1263 hdev->tm_info.num_tc = 1;
1265 /* Currently not support uncontiuous tc */
1266 for (i = 0; i < hdev->tm_info.num_tc; i++)
1267 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1269 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1271 hclge_init_kdump_kernel_config(hdev);
1273 /* Set the init affinity based on pci func number */
1274 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1275 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1276 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1277 &hdev->affinity_mask);
1282 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1283 unsigned int tso_mss_max)
1285 struct hclge_cfg_tso_status_cmd *req;
1286 struct hclge_desc desc;
1289 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1291 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1294 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1295 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1296 req->tso_mss_min = cpu_to_le16(tso_mss);
1299 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1300 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1301 req->tso_mss_max = cpu_to_le16(tso_mss);
1303 return hclge_cmd_send(&hdev->hw, &desc, 1);
1306 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1308 struct hclge_cfg_gro_status_cmd *req;
1309 struct hclge_desc desc;
1312 if (!hnae3_dev_gro_supported(hdev))
1315 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1316 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1318 req->gro_en = cpu_to_le16(en ? 1 : 0);
1320 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1322 dev_err(&hdev->pdev->dev,
1323 "GRO hardware config cmd failed, ret = %d\n", ret);
1328 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1330 struct hclge_tqp *tqp;
1333 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1334 sizeof(struct hclge_tqp), GFP_KERNEL);
1340 for (i = 0; i < hdev->num_tqps; i++) {
1341 tqp->dev = &hdev->pdev->dev;
1344 tqp->q.ae_algo = &ae_algo;
1345 tqp->q.buf_size = hdev->rx_buf_len;
1346 tqp->q.tx_desc_num = hdev->num_tx_desc;
1347 tqp->q.rx_desc_num = hdev->num_rx_desc;
1348 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1349 i * HCLGE_TQP_REG_SIZE;
1357 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1358 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1360 struct hclge_tqp_map_cmd *req;
1361 struct hclge_desc desc;
1364 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1366 req = (struct hclge_tqp_map_cmd *)desc.data;
1367 req->tqp_id = cpu_to_le16(tqp_pid);
1368 req->tqp_vf = func_id;
1369 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1371 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1372 req->tqp_vid = cpu_to_le16(tqp_vid);
1374 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1376 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1381 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1383 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1384 struct hclge_dev *hdev = vport->back;
1387 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1388 alloced < num_tqps; i++) {
1389 if (!hdev->htqp[i].alloced) {
1390 hdev->htqp[i].q.handle = &vport->nic;
1391 hdev->htqp[i].q.tqp_index = alloced;
1392 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1393 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1394 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1395 hdev->htqp[i].alloced = true;
1399 vport->alloc_tqps = alloced;
1400 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1401 vport->alloc_tqps / hdev->tm_info.num_tc);
1406 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1407 u16 num_tx_desc, u16 num_rx_desc)
1410 struct hnae3_handle *nic = &vport->nic;
1411 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1412 struct hclge_dev *hdev = vport->back;
1415 kinfo->num_tx_desc = num_tx_desc;
1416 kinfo->num_rx_desc = num_rx_desc;
1418 kinfo->rx_buf_len = hdev->rx_buf_len;
1420 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1421 sizeof(struct hnae3_queue *), GFP_KERNEL);
1425 ret = hclge_assign_tqp(vport, num_tqps);
1427 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1432 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1433 struct hclge_vport *vport)
1435 struct hnae3_handle *nic = &vport->nic;
1436 struct hnae3_knic_private_info *kinfo;
1439 kinfo = &nic->kinfo;
1440 for (i = 0; i < vport->alloc_tqps; i++) {
1441 struct hclge_tqp *q =
1442 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1446 is_pf = !(vport->vport_id);
1447 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1456 static int hclge_map_tqp(struct hclge_dev *hdev)
1458 struct hclge_vport *vport = hdev->vport;
1461 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1462 for (i = 0; i < num_vport; i++) {
1465 ret = hclge_map_tqp_to_vport(hdev, vport);
1475 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1477 struct hnae3_handle *nic = &vport->nic;
1478 struct hclge_dev *hdev = vport->back;
1481 nic->pdev = hdev->pdev;
1482 nic->ae_algo = &ae_algo;
1483 nic->numa_node_mask = hdev->numa_node_mask;
1485 ret = hclge_knic_setup(vport, num_tqps,
1486 hdev->num_tx_desc, hdev->num_rx_desc);
1488 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1493 static int hclge_alloc_vport(struct hclge_dev *hdev)
1495 struct pci_dev *pdev = hdev->pdev;
1496 struct hclge_vport *vport;
1502 /* We need to alloc a vport for main NIC of PF */
1503 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1505 if (hdev->num_tqps < num_vport) {
1506 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1507 hdev->num_tqps, num_vport);
1511 /* Alloc the same number of TQPs for every vport */
1512 tqp_per_vport = hdev->num_tqps / num_vport;
1513 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1515 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1520 hdev->vport = vport;
1521 hdev->num_alloc_vport = num_vport;
1523 if (IS_ENABLED(CONFIG_PCI_IOV))
1524 hdev->num_alloc_vfs = hdev->num_req_vfs;
1526 for (i = 0; i < num_vport; i++) {
1528 vport->vport_id = i;
1529 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1530 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1531 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1532 INIT_LIST_HEAD(&vport->vlan_list);
1533 INIT_LIST_HEAD(&vport->uc_mac_list);
1534 INIT_LIST_HEAD(&vport->mc_mac_list);
1537 ret = hclge_vport_setup(vport, tqp_main_vport);
1539 ret = hclge_vport_setup(vport, tqp_per_vport);
1542 "vport setup failed for vport %d, %d\n",
1553 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1554 struct hclge_pkt_buf_alloc *buf_alloc)
1556 /* TX buffer size is unit by 128 byte */
1557 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1558 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1559 struct hclge_tx_buff_alloc_cmd *req;
1560 struct hclge_desc desc;
1564 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1567 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1568 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1570 req->tx_pkt_buff[i] =
1571 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1572 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1577 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1583 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1584 struct hclge_pkt_buf_alloc *buf_alloc)
1586 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1589 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1594 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1599 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1600 if (hdev->hw_tc_map & BIT(i))
1605 /* Get the number of pfc enabled TCs, which have private buffer */
1606 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1607 struct hclge_pkt_buf_alloc *buf_alloc)
1609 struct hclge_priv_buf *priv;
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625 struct hclge_pkt_buf_alloc *buf_alloc)
1627 struct hclge_priv_buf *priv;
1631 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1632 priv = &buf_alloc->priv_buf[i];
1633 if (hdev->hw_tc_map & BIT(i) &&
1634 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1642 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1644 struct hclge_priv_buf *priv;
1648 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1649 priv = &buf_alloc->priv_buf[i];
1651 rx_priv += priv->buf_size;
1656 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1658 u32 i, total_tx_size = 0;
1660 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1661 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1663 return total_tx_size;
1666 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1667 struct hclge_pkt_buf_alloc *buf_alloc,
1670 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1671 u32 tc_num = hclge_get_tc_num(hdev);
1672 u32 shared_buf, aligned_mps;
1676 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1678 if (hnae3_dev_dcb_supported(hdev))
1679 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1682 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1683 + hdev->dv_buf_size;
1685 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1686 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1687 HCLGE_BUF_SIZE_UNIT);
1689 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1690 if (rx_all < rx_priv + shared_std)
1693 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1694 buf_alloc->s_buf.buf_size = shared_buf;
1695 if (hnae3_dev_dcb_supported(hdev)) {
1696 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1697 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1698 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1699 HCLGE_BUF_SIZE_UNIT);
1701 buf_alloc->s_buf.self.high = aligned_mps +
1702 HCLGE_NON_DCB_ADDITIONAL_BUF;
1703 buf_alloc->s_buf.self.low = aligned_mps;
1706 if (hnae3_dev_dcb_supported(hdev)) {
1707 hi_thrd = shared_buf - hdev->dv_buf_size;
1709 if (tc_num <= NEED_RESERVE_TC_NUM)
1710 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1714 hi_thrd = hi_thrd / tc_num;
1716 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1717 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1718 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1720 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1721 lo_thrd = aligned_mps;
1724 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1725 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1726 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1732 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1733 struct hclge_pkt_buf_alloc *buf_alloc)
1737 total_size = hdev->pkt_buf_size;
1739 /* alloc tx buffer for all enabled tc */
1740 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1741 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1743 if (hdev->hw_tc_map & BIT(i)) {
1744 if (total_size < hdev->tx_buf_size)
1747 priv->tx_buf_size = hdev->tx_buf_size;
1749 priv->tx_buf_size = 0;
1752 total_size -= priv->tx_buf_size;
1758 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1759 struct hclge_pkt_buf_alloc *buf_alloc)
1761 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1762 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1765 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1766 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1773 if (!(hdev->hw_tc_map & BIT(i)))
1778 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1779 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1780 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1781 HCLGE_BUF_SIZE_UNIT);
1784 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1788 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1791 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1794 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1795 struct hclge_pkt_buf_alloc *buf_alloc)
1797 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1798 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1801 /* let the last to be cleared first */
1802 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1803 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1804 unsigned int mask = BIT((unsigned int)i);
1806 if (hdev->hw_tc_map & mask &&
1807 !(hdev->tm_info.hw_pfc_map & mask)) {
1808 /* Clear the no pfc TC private buffer */
1816 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1817 no_pfc_priv_num == 0)
1821 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1824 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1825 struct hclge_pkt_buf_alloc *buf_alloc)
1827 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1828 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1831 /* let the last to be cleared first */
1832 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1833 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1834 unsigned int mask = BIT((unsigned int)i);
1836 if (hdev->hw_tc_map & mask &&
1837 hdev->tm_info.hw_pfc_map & mask) {
1838 /* Reduce the number of pfc TC with private buffer */
1846 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1851 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1854 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1855 struct hclge_pkt_buf_alloc *buf_alloc)
1857 #define COMPENSATE_BUFFER 0x3C00
1858 #define COMPENSATE_HALF_MPS_NUM 5
1859 #define PRIV_WL_GAP 0x1800
1861 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1862 u32 tc_num = hclge_get_tc_num(hdev);
1863 u32 half_mps = hdev->mps >> 1;
1868 rx_priv = rx_priv / tc_num;
1870 if (tc_num <= NEED_RESERVE_TC_NUM)
1871 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1873 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1874 COMPENSATE_HALF_MPS_NUM * half_mps;
1875 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1876 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1878 if (rx_priv < min_rx_priv)
1881 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1882 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1889 if (!(hdev->hw_tc_map & BIT(i)))
1893 priv->buf_size = rx_priv;
1894 priv->wl.high = rx_priv - hdev->dv_buf_size;
1895 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1898 buf_alloc->s_buf.buf_size = 0;
1903 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1904 * @hdev: pointer to struct hclge_dev
1905 * @buf_alloc: pointer to buffer calculation data
1906 * @return: 0: calculate sucessful, negative: fail
1908 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1909 struct hclge_pkt_buf_alloc *buf_alloc)
1911 /* When DCB is not supported, rx private buffer is not allocated. */
1912 if (!hnae3_dev_dcb_supported(hdev)) {
1913 u32 rx_all = hdev->pkt_buf_size;
1915 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1916 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1922 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1925 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1928 /* try to decrease the buffer size */
1929 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1932 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1935 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1941 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1942 struct hclge_pkt_buf_alloc *buf_alloc)
1944 struct hclge_rx_priv_buff_cmd *req;
1945 struct hclge_desc desc;
1949 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1950 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1952 /* Alloc private buffer TCs */
1953 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1954 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1957 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1959 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1963 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1964 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1966 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1968 dev_err(&hdev->pdev->dev,
1969 "rx private buffer alloc cmd failed %d\n", ret);
1974 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1975 struct hclge_pkt_buf_alloc *buf_alloc)
1977 struct hclge_rx_priv_wl_buf *req;
1978 struct hclge_priv_buf *priv;
1979 struct hclge_desc desc[2];
1983 for (i = 0; i < 2; i++) {
1984 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1986 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1988 /* The first descriptor set the NEXT bit to 1 */
1990 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1992 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1994 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1995 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1997 priv = &buf_alloc->priv_buf[idx];
1998 req->tc_wl[j].high =
1999 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2000 req->tc_wl[j].high |=
2001 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2003 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2004 req->tc_wl[j].low |=
2005 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2009 /* Send 2 descriptor at one time */
2010 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2012 dev_err(&hdev->pdev->dev,
2013 "rx private waterline config cmd failed %d\n",
2018 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2019 struct hclge_pkt_buf_alloc *buf_alloc)
2021 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2022 struct hclge_rx_com_thrd *req;
2023 struct hclge_desc desc[2];
2024 struct hclge_tc_thrd *tc;
2028 for (i = 0; i < 2; i++) {
2029 hclge_cmd_setup_basic_desc(&desc[i],
2030 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2031 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2033 /* The first descriptor set the NEXT bit to 1 */
2035 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2037 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2039 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2040 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2042 req->com_thrd[j].high =
2043 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2044 req->com_thrd[j].high |=
2045 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2046 req->com_thrd[j].low =
2047 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2048 req->com_thrd[j].low |=
2049 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2053 /* Send 2 descriptors at one time */
2054 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2056 dev_err(&hdev->pdev->dev,
2057 "common threshold config cmd failed %d\n", ret);
2061 static int hclge_common_wl_config(struct hclge_dev *hdev,
2062 struct hclge_pkt_buf_alloc *buf_alloc)
2064 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2065 struct hclge_rx_com_wl *req;
2066 struct hclge_desc desc;
2069 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2071 req = (struct hclge_rx_com_wl *)desc.data;
2072 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2073 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2075 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2076 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2078 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2080 dev_err(&hdev->pdev->dev,
2081 "common waterline config cmd failed %d\n", ret);
2086 int hclge_buffer_alloc(struct hclge_dev *hdev)
2088 struct hclge_pkt_buf_alloc *pkt_buf;
2091 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2095 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2097 dev_err(&hdev->pdev->dev,
2098 "could not calc tx buffer size for all TCs %d\n", ret);
2102 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2104 dev_err(&hdev->pdev->dev,
2105 "could not alloc tx buffers %d\n", ret);
2109 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2111 dev_err(&hdev->pdev->dev,
2112 "could not calc rx priv buffer size for all TCs %d\n",
2117 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2119 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2124 if (hnae3_dev_dcb_supported(hdev)) {
2125 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2127 dev_err(&hdev->pdev->dev,
2128 "could not configure rx private waterline %d\n",
2133 ret = hclge_common_thrd_config(hdev, pkt_buf);
2135 dev_err(&hdev->pdev->dev,
2136 "could not configure common threshold %d\n",
2142 ret = hclge_common_wl_config(hdev, pkt_buf);
2144 dev_err(&hdev->pdev->dev,
2145 "could not configure common waterline %d\n", ret);
2152 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2154 struct hnae3_handle *roce = &vport->roce;
2155 struct hnae3_handle *nic = &vport->nic;
2157 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2159 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2160 vport->back->num_msi_left == 0)
2163 roce->rinfo.base_vector = vport->back->roce_base_vector;
2165 roce->rinfo.netdev = nic->kinfo.netdev;
2166 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2168 roce->pdev = nic->pdev;
2169 roce->ae_algo = nic->ae_algo;
2170 roce->numa_node_mask = nic->numa_node_mask;
2175 static int hclge_init_msi(struct hclge_dev *hdev)
2177 struct pci_dev *pdev = hdev->pdev;
2181 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2182 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2185 "failed(%d) to allocate MSI/MSI-X vectors\n",
2189 if (vectors < hdev->num_msi)
2190 dev_warn(&hdev->pdev->dev,
2191 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2192 hdev->num_msi, vectors);
2194 hdev->num_msi = vectors;
2195 hdev->num_msi_left = vectors;
2196 hdev->base_msi_vector = pdev->irq;
2197 hdev->roce_base_vector = hdev->base_msi_vector +
2198 hdev->roce_base_msix_offset;
2200 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2201 sizeof(u16), GFP_KERNEL);
2202 if (!hdev->vector_status) {
2203 pci_free_irq_vectors(pdev);
2207 for (i = 0; i < hdev->num_msi; i++)
2208 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2210 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2211 sizeof(int), GFP_KERNEL);
2212 if (!hdev->vector_irq) {
2213 pci_free_irq_vectors(pdev);
2220 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2222 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2223 duplex = HCLGE_MAC_FULL;
2228 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2231 struct hclge_config_mac_speed_dup_cmd *req;
2232 struct hclge_desc desc;
2235 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2240 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2243 case HCLGE_MAC_SPEED_10M:
2244 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2245 HCLGE_CFG_SPEED_S, 6);
2247 case HCLGE_MAC_SPEED_100M:
2248 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2249 HCLGE_CFG_SPEED_S, 7);
2251 case HCLGE_MAC_SPEED_1G:
2252 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2253 HCLGE_CFG_SPEED_S, 0);
2255 case HCLGE_MAC_SPEED_10G:
2256 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2257 HCLGE_CFG_SPEED_S, 1);
2259 case HCLGE_MAC_SPEED_25G:
2260 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2261 HCLGE_CFG_SPEED_S, 2);
2263 case HCLGE_MAC_SPEED_40G:
2264 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2265 HCLGE_CFG_SPEED_S, 3);
2267 case HCLGE_MAC_SPEED_50G:
2268 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2269 HCLGE_CFG_SPEED_S, 4);
2271 case HCLGE_MAC_SPEED_100G:
2272 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2273 HCLGE_CFG_SPEED_S, 5);
2276 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2280 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2283 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2285 dev_err(&hdev->pdev->dev,
2286 "mac speed/duplex config cmd failed %d.\n", ret);
2293 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2297 duplex = hclge_check_speed_dup(duplex, speed);
2298 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2301 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2305 hdev->hw.mac.speed = speed;
2306 hdev->hw.mac.duplex = duplex;
2311 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2314 struct hclge_vport *vport = hclge_get_vport(handle);
2315 struct hclge_dev *hdev = vport->back;
2317 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2320 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2322 struct hclge_config_auto_neg_cmd *req;
2323 struct hclge_desc desc;
2327 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2329 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2331 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2332 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2334 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2336 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2342 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2344 struct hclge_vport *vport = hclge_get_vport(handle);
2345 struct hclge_dev *hdev = vport->back;
2347 if (!hdev->hw.mac.support_autoneg) {
2349 dev_err(&hdev->pdev->dev,
2350 "autoneg is not supported by current port\n");
2357 return hclge_set_autoneg_en(hdev, enable);
2360 static int hclge_get_autoneg(struct hnae3_handle *handle)
2362 struct hclge_vport *vport = hclge_get_vport(handle);
2363 struct hclge_dev *hdev = vport->back;
2364 struct phy_device *phydev = hdev->hw.mac.phydev;
2367 return phydev->autoneg;
2369 return hdev->hw.mac.autoneg;
2372 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2374 struct hclge_vport *vport = hclge_get_vport(handle);
2375 struct hclge_dev *hdev = vport->back;
2378 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2380 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2383 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2386 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2388 struct hclge_vport *vport = hclge_get_vport(handle);
2389 struct hclge_dev *hdev = vport->back;
2391 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2392 return hclge_set_autoneg_en(hdev, !halt);
2397 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2399 struct hclge_config_fec_cmd *req;
2400 struct hclge_desc desc;
2403 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2405 req = (struct hclge_config_fec_cmd *)desc.data;
2406 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2407 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2408 if (fec_mode & BIT(HNAE3_FEC_RS))
2409 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2410 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2411 if (fec_mode & BIT(HNAE3_FEC_BASER))
2412 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2413 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2415 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2417 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2422 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2424 struct hclge_vport *vport = hclge_get_vport(handle);
2425 struct hclge_dev *hdev = vport->back;
2426 struct hclge_mac *mac = &hdev->hw.mac;
2429 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2430 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2434 ret = hclge_set_fec_hw(hdev, fec_mode);
2438 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2442 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2445 struct hclge_vport *vport = hclge_get_vport(handle);
2446 struct hclge_dev *hdev = vport->back;
2447 struct hclge_mac *mac = &hdev->hw.mac;
2450 *fec_ability = mac->fec_ability;
2452 *fec_mode = mac->fec_mode;
2455 static int hclge_mac_init(struct hclge_dev *hdev)
2457 struct hclge_mac *mac = &hdev->hw.mac;
2460 hdev->support_sfp_query = true;
2461 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2462 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2463 hdev->hw.mac.duplex);
2465 dev_err(&hdev->pdev->dev,
2466 "Config mac speed dup fail ret=%d\n", ret);
2470 if (hdev->hw.mac.support_autoneg) {
2471 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2473 dev_err(&hdev->pdev->dev,
2474 "Config mac autoneg fail ret=%d\n", ret);
2481 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2482 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2484 dev_err(&hdev->pdev->dev,
2485 "Fec mode init fail, ret = %d\n", ret);
2490 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2492 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2496 ret = hclge_buffer_alloc(hdev);
2498 dev_err(&hdev->pdev->dev,
2499 "allocate buffer fail, ret=%d\n", ret);
2504 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2506 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2507 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2508 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2509 &hdev->mbx_service_task);
2512 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2514 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2515 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2516 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2517 &hdev->rst_service_task);
2520 static void hclge_task_schedule(struct hclge_dev *hdev)
2522 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2523 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2524 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2525 hdev->hw_stats.stats_timer++;
2526 hdev->fd_arfs_expire_timer++;
2527 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2528 system_wq, &hdev->service_task,
2529 round_jiffies_relative(HZ));
2533 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2535 struct hclge_link_status_cmd *req;
2536 struct hclge_desc desc;
2540 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2541 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2543 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2548 req = (struct hclge_link_status_cmd *)desc.data;
2549 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2551 return !!link_status;
2554 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2556 unsigned int mac_state;
2559 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2562 mac_state = hclge_get_mac_link_status(hdev);
2564 if (hdev->hw.mac.phydev) {
2565 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2566 link_stat = mac_state &
2567 hdev->hw.mac.phydev->link;
2572 link_stat = mac_state;
2578 static void hclge_update_link_status(struct hclge_dev *hdev)
2580 struct hnae3_client *rclient = hdev->roce_client;
2581 struct hnae3_client *client = hdev->nic_client;
2582 struct hnae3_handle *rhandle;
2583 struct hnae3_handle *handle;
2589 state = hclge_get_mac_phy_link(hdev);
2590 if (state != hdev->hw.mac.link) {
2591 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2592 handle = &hdev->vport[i].nic;
2593 client->ops->link_status_change(handle, state);
2594 hclge_config_mac_tnl_int(hdev, state);
2595 rhandle = &hdev->vport[i].roce;
2596 if (rclient && rclient->ops->link_status_change)
2597 rclient->ops->link_status_change(rhandle,
2600 hdev->hw.mac.link = state;
2604 static void hclge_update_port_capability(struct hclge_mac *mac)
2606 /* update fec ability by speed */
2607 hclge_convert_setting_fec(mac);
2609 /* firmware can not identify back plane type, the media type
2610 * read from configuration can help deal it
2612 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2613 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2614 mac->module_type = HNAE3_MODULE_TYPE_KR;
2615 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2616 mac->module_type = HNAE3_MODULE_TYPE_TP;
2618 if (mac->support_autoneg == true) {
2619 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2620 linkmode_copy(mac->advertising, mac->supported);
2622 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2624 linkmode_zero(mac->advertising);
2628 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2630 struct hclge_sfp_info_cmd *resp;
2631 struct hclge_desc desc;
2634 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2635 resp = (struct hclge_sfp_info_cmd *)desc.data;
2636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2637 if (ret == -EOPNOTSUPP) {
2638 dev_warn(&hdev->pdev->dev,
2639 "IMP do not support get SFP speed %d\n", ret);
2642 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2646 *speed = le32_to_cpu(resp->speed);
2651 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2653 struct hclge_sfp_info_cmd *resp;
2654 struct hclge_desc desc;
2657 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2658 resp = (struct hclge_sfp_info_cmd *)desc.data;
2660 resp->query_type = QUERY_ACTIVE_SPEED;
2662 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2663 if (ret == -EOPNOTSUPP) {
2664 dev_warn(&hdev->pdev->dev,
2665 "IMP does not support get SFP info %d\n", ret);
2668 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2672 mac->speed = le32_to_cpu(resp->speed);
2673 /* if resp->speed_ability is 0, it means it's an old version
2674 * firmware, do not update these params
2676 if (resp->speed_ability) {
2677 mac->module_type = le32_to_cpu(resp->module_type);
2678 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2679 mac->autoneg = resp->autoneg;
2680 mac->support_autoneg = resp->autoneg_ability;
2681 mac->speed_type = QUERY_ACTIVE_SPEED;
2682 if (!resp->active_fec)
2685 mac->fec_mode = BIT(resp->active_fec);
2687 mac->speed_type = QUERY_SFP_SPEED;
2693 static int hclge_update_port_info(struct hclge_dev *hdev)
2695 struct hclge_mac *mac = &hdev->hw.mac;
2696 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2699 /* get the port info from SFP cmd if not copper port */
2700 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2703 /* if IMP does not support get SFP/qSFP info, return directly */
2704 if (!hdev->support_sfp_query)
2707 if (hdev->pdev->revision >= 0x21)
2708 ret = hclge_get_sfp_info(hdev, mac);
2710 ret = hclge_get_sfp_speed(hdev, &speed);
2712 if (ret == -EOPNOTSUPP) {
2713 hdev->support_sfp_query = false;
2719 if (hdev->pdev->revision >= 0x21) {
2720 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2721 hclge_update_port_capability(mac);
2724 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2727 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2728 return 0; /* do nothing if no SFP */
2730 /* must config full duplex for SFP */
2731 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2735 static int hclge_get_status(struct hnae3_handle *handle)
2737 struct hclge_vport *vport = hclge_get_vport(handle);
2738 struct hclge_dev *hdev = vport->back;
2740 hclge_update_link_status(hdev);
2742 return hdev->hw.mac.link;
2745 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2747 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2749 /* fetch the events from their corresponding regs */
2750 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2751 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2752 msix_src_reg = hclge_read_dev(&hdev->hw,
2753 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2755 /* Assumption: If by any chance reset and mailbox events are reported
2756 * together then we will only process reset event in this go and will
2757 * defer the processing of the mailbox events. Since, we would have not
2758 * cleared RX CMDQ event this time we would receive again another
2759 * interrupt from H/W just for the mailbox.
2762 /* check for vector0 reset event sources */
2763 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2764 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2765 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2766 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2767 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2768 hdev->rst_stats.imp_rst_cnt++;
2769 return HCLGE_VECTOR0_EVENT_RST;
2772 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2773 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2774 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2775 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2776 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2777 hdev->rst_stats.global_rst_cnt++;
2778 return HCLGE_VECTOR0_EVENT_RST;
2781 /* check for vector0 msix event source */
2782 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2783 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2785 *clearval = msix_src_reg;
2786 return HCLGE_VECTOR0_EVENT_ERR;
2789 /* check for vector0 mailbox(=CMDQ RX) event source */
2790 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2791 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2792 *clearval = cmdq_src_reg;
2793 return HCLGE_VECTOR0_EVENT_MBX;
2796 /* print other vector0 event source */
2797 dev_info(&hdev->pdev->dev,
2798 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2799 cmdq_src_reg, msix_src_reg);
2800 *clearval = msix_src_reg;
2802 return HCLGE_VECTOR0_EVENT_OTHER;
2805 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2808 switch (event_type) {
2809 case HCLGE_VECTOR0_EVENT_RST:
2810 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2812 case HCLGE_VECTOR0_EVENT_MBX:
2813 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2820 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2822 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2823 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2824 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2825 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2826 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2829 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2831 writel(enable ? 1 : 0, vector->addr);
2834 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2836 struct hclge_dev *hdev = data;
2840 hclge_enable_vector(&hdev->misc_vector, false);
2841 event_cause = hclge_check_event_cause(hdev, &clearval);
2843 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2844 switch (event_cause) {
2845 case HCLGE_VECTOR0_EVENT_ERR:
2846 /* we do not know what type of reset is required now. This could
2847 * only be decided after we fetch the type of errors which
2848 * caused this event. Therefore, we will do below for now:
2849 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2850 * have defered type of reset to be used.
2851 * 2. Schedule the reset serivce task.
2852 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2853 * will fetch the correct type of reset. This would be done
2854 * by first decoding the types of errors.
2856 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2858 case HCLGE_VECTOR0_EVENT_RST:
2859 hclge_reset_task_schedule(hdev);
2861 case HCLGE_VECTOR0_EVENT_MBX:
2862 /* If we are here then,
2863 * 1. Either we are not handling any mbx task and we are not
2866 * 2. We could be handling a mbx task but nothing more is
2868 * In both cases, we should schedule mbx task as there are more
2869 * mbx messages reported by this interrupt.
2871 hclge_mbx_task_schedule(hdev);
2874 dev_warn(&hdev->pdev->dev,
2875 "received unknown or unhandled event of vector0\n");
2879 /* clear the source of interrupt if it is not cause by reset */
2881 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2882 hclge_clear_event_cause(hdev, event_cause, clearval);
2883 hclge_enable_vector(&hdev->misc_vector, true);
2889 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2891 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2892 dev_warn(&hdev->pdev->dev,
2893 "vector(vector_id %d) has been freed.\n", vector_id);
2897 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2898 hdev->num_msi_left += 1;
2899 hdev->num_msi_used -= 1;
2902 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2904 struct hclge_misc_vector *vector = &hdev->misc_vector;
2906 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2908 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2909 hdev->vector_status[0] = 0;
2911 hdev->num_msi_left -= 1;
2912 hdev->num_msi_used += 1;
2915 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
2916 const cpumask_t *mask)
2918 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
2921 cpumask_copy(&hdev->affinity_mask, mask);
2924 static void hclge_irq_affinity_release(struct kref *ref)
2928 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
2930 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
2931 &hdev->affinity_mask);
2933 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
2934 hdev->affinity_notify.release = hclge_irq_affinity_release;
2935 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
2936 &hdev->affinity_notify);
2939 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
2941 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
2942 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
2945 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2949 hclge_get_misc_vector(hdev);
2951 /* this would be explicitly freed in the end */
2952 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2953 0, "hclge_misc", hdev);
2955 hclge_free_vector(hdev, 0);
2956 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2957 hdev->misc_vector.vector_irq);
2963 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2965 free_irq(hdev->misc_vector.vector_irq, hdev);
2966 hclge_free_vector(hdev, 0);
2969 int hclge_notify_client(struct hclge_dev *hdev,
2970 enum hnae3_reset_notify_type type)
2972 struct hnae3_client *client = hdev->nic_client;
2975 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2978 if (!client->ops->reset_notify)
2981 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2982 struct hnae3_handle *handle = &hdev->vport[i].nic;
2985 ret = client->ops->reset_notify(handle, type);
2987 dev_err(&hdev->pdev->dev,
2988 "notify nic client failed %d(%d)\n", type, ret);
2996 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2997 enum hnae3_reset_notify_type type)
2999 struct hnae3_client *client = hdev->roce_client;
3003 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3006 if (!client->ops->reset_notify)
3009 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3010 struct hnae3_handle *handle = &hdev->vport[i].roce;
3012 ret = client->ops->reset_notify(handle, type);
3014 dev_err(&hdev->pdev->dev,
3015 "notify roce client failed %d(%d)",
3024 static int hclge_reset_wait(struct hclge_dev *hdev)
3026 #define HCLGE_RESET_WATI_MS 100
3027 #define HCLGE_RESET_WAIT_CNT 200
3028 u32 val, reg, reg_bit;
3031 switch (hdev->reset_type) {
3032 case HNAE3_IMP_RESET:
3033 reg = HCLGE_GLOBAL_RESET_REG;
3034 reg_bit = HCLGE_IMP_RESET_BIT;
3036 case HNAE3_GLOBAL_RESET:
3037 reg = HCLGE_GLOBAL_RESET_REG;
3038 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3040 case HNAE3_FUNC_RESET:
3041 reg = HCLGE_FUN_RST_ING;
3042 reg_bit = HCLGE_FUN_RST_ING_B;
3044 case HNAE3_FLR_RESET:
3047 dev_err(&hdev->pdev->dev,
3048 "Wait for unsupported reset type: %d\n",
3053 if (hdev->reset_type == HNAE3_FLR_RESET) {
3054 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3055 cnt++ < HCLGE_RESET_WAIT_CNT)
3056 msleep(HCLGE_RESET_WATI_MS);
3058 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3059 dev_err(&hdev->pdev->dev,
3060 "flr wait timeout: %d\n", cnt);
3067 val = hclge_read_dev(&hdev->hw, reg);
3068 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3069 msleep(HCLGE_RESET_WATI_MS);
3070 val = hclge_read_dev(&hdev->hw, reg);
3074 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3075 dev_warn(&hdev->pdev->dev,
3076 "Wait for reset timeout: %d\n", hdev->reset_type);
3083 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3085 struct hclge_vf_rst_cmd *req;
3086 struct hclge_desc desc;
3088 req = (struct hclge_vf_rst_cmd *)desc.data;
3089 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3090 req->dest_vfid = func_id;
3095 return hclge_cmd_send(&hdev->hw, &desc, 1);
3098 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3102 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3103 struct hclge_vport *vport = &hdev->vport[i];
3106 /* Send cmd to set/clear VF's FUNC_RST_ING */
3107 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3109 dev_err(&hdev->pdev->dev,
3110 "set vf(%d) rst failed %d!\n",
3111 vport->vport_id, ret);
3115 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3118 /* Inform VF to process the reset.
3119 * hclge_inform_reset_assert_to_vf may fail if VF
3120 * driver is not loaded.
3122 ret = hclge_inform_reset_assert_to_vf(vport);
3124 dev_warn(&hdev->pdev->dev,
3125 "inform reset to vf(%d) failed %d!\n",
3126 vport->vport_id, ret);
3132 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3134 struct hclge_desc desc;
3135 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3138 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3139 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3140 req->fun_reset_vfid = func_id;
3142 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3144 dev_err(&hdev->pdev->dev,
3145 "send function reset cmd fail, status =%d\n", ret);
3150 static void hclge_do_reset(struct hclge_dev *hdev)
3152 struct hnae3_handle *handle = &hdev->vport[0].nic;
3153 struct pci_dev *pdev = hdev->pdev;
3156 if (hclge_get_hw_reset_stat(handle)) {
3157 dev_info(&pdev->dev, "Hardware reset not finish\n");
3158 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3159 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3160 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3164 switch (hdev->reset_type) {
3165 case HNAE3_GLOBAL_RESET:
3166 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3167 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3168 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3169 dev_info(&pdev->dev, "Global Reset requested\n");
3171 case HNAE3_FUNC_RESET:
3172 dev_info(&pdev->dev, "PF Reset requested\n");
3173 /* schedule again to check later */
3174 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3175 hclge_reset_task_schedule(hdev);
3177 case HNAE3_FLR_RESET:
3178 dev_info(&pdev->dev, "FLR requested\n");
3179 /* schedule again to check later */
3180 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3181 hclge_reset_task_schedule(hdev);
3184 dev_warn(&pdev->dev,
3185 "Unsupported reset type: %d\n", hdev->reset_type);
3190 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3191 unsigned long *addr)
3193 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3194 struct hclge_dev *hdev = ae_dev->priv;
3196 /* first, resolve any unknown reset type to the known type(s) */
3197 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3198 /* we will intentionally ignore any errors from this function
3199 * as we will end up in *some* reset request in any case
3201 hclge_handle_hw_msix_error(hdev, addr);
3202 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3203 /* We defered the clearing of the error event which caused
3204 * interrupt since it was not posssible to do that in
3205 * interrupt context (and this is the reason we introduced
3206 * new UNKNOWN reset type). Now, the errors have been
3207 * handled and cleared in hardware we can safely enable
3208 * interrupts. This is an exception to the norm.
3210 hclge_enable_vector(&hdev->misc_vector, true);
3213 /* return the highest priority reset level amongst all */
3214 if (test_bit(HNAE3_IMP_RESET, addr)) {
3215 rst_level = HNAE3_IMP_RESET;
3216 clear_bit(HNAE3_IMP_RESET, addr);
3217 clear_bit(HNAE3_GLOBAL_RESET, addr);
3218 clear_bit(HNAE3_FUNC_RESET, addr);
3219 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3220 rst_level = HNAE3_GLOBAL_RESET;
3221 clear_bit(HNAE3_GLOBAL_RESET, addr);
3222 clear_bit(HNAE3_FUNC_RESET, addr);
3223 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3224 rst_level = HNAE3_FUNC_RESET;
3225 clear_bit(HNAE3_FUNC_RESET, addr);
3226 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3227 rst_level = HNAE3_FLR_RESET;
3228 clear_bit(HNAE3_FLR_RESET, addr);
3231 if (hdev->reset_type != HNAE3_NONE_RESET &&
3232 rst_level < hdev->reset_type)
3233 return HNAE3_NONE_RESET;
3238 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3242 switch (hdev->reset_type) {
3243 case HNAE3_IMP_RESET:
3244 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3246 case HNAE3_GLOBAL_RESET:
3247 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3256 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3257 hclge_enable_vector(&hdev->misc_vector, true);
3260 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3264 switch (hdev->reset_type) {
3265 case HNAE3_FUNC_RESET:
3267 case HNAE3_FLR_RESET:
3268 ret = hclge_set_all_vf_rst(hdev, true);
3277 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3279 #define HCLGE_RESET_SYNC_TIME 100
3284 switch (hdev->reset_type) {
3285 case HNAE3_FUNC_RESET:
3286 /* There is no mechanism for PF to know if VF has stopped IO
3287 * for now, just wait 100 ms for VF to stop IO
3289 msleep(HCLGE_RESET_SYNC_TIME);
3290 ret = hclge_func_reset_cmd(hdev, 0);
3292 dev_err(&hdev->pdev->dev,
3293 "asserting function reset fail %d!\n", ret);
3297 /* After performaning pf reset, it is not necessary to do the
3298 * mailbox handling or send any command to firmware, because
3299 * any mailbox handling or command to firmware is only valid
3300 * after hclge_cmd_init is called.
3302 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3303 hdev->rst_stats.pf_rst_cnt++;
3305 case HNAE3_FLR_RESET:
3306 /* There is no mechanism for PF to know if VF has stopped IO
3307 * for now, just wait 100 ms for VF to stop IO
3309 msleep(HCLGE_RESET_SYNC_TIME);
3310 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3311 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3312 hdev->rst_stats.flr_rst_cnt++;
3314 case HNAE3_IMP_RESET:
3315 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3316 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3317 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3323 /* inform hardware that preparatory work is done */
3324 msleep(HCLGE_RESET_SYNC_TIME);
3325 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3326 HCLGE_NIC_CMQ_ENABLE);
3327 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3332 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3334 #define MAX_RESET_FAIL_CNT 5
3336 if (hdev->reset_pending) {
3337 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3338 hdev->reset_pending);
3340 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3341 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3342 BIT(HCLGE_IMP_RESET_BIT))) {
3343 dev_info(&hdev->pdev->dev,
3344 "reset failed because IMP Reset is pending\n");
3345 hclge_clear_reset_cause(hdev);
3347 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3348 hdev->reset_fail_cnt++;
3349 set_bit(hdev->reset_type, &hdev->reset_pending);
3350 dev_info(&hdev->pdev->dev,
3351 "re-schedule reset task(%d)\n",
3352 hdev->reset_fail_cnt);
3356 hclge_clear_reset_cause(hdev);
3357 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3361 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3365 switch (hdev->reset_type) {
3366 case HNAE3_FUNC_RESET:
3368 case HNAE3_FLR_RESET:
3369 ret = hclge_set_all_vf_rst(hdev, false);
3378 static int hclge_reset_stack(struct hclge_dev *hdev)
3382 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3386 ret = hclge_reset_ae_dev(hdev->ae_dev);
3390 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3394 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3397 static void hclge_reset(struct hclge_dev *hdev)
3399 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3402 /* Initialize ae_dev reset status as well, in case enet layer wants to
3403 * know if device is undergoing reset
3405 ae_dev->reset_type = hdev->reset_type;
3406 hdev->rst_stats.reset_cnt++;
3407 /* perform reset of the stack & ae device for a client */
3408 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3412 ret = hclge_reset_prepare_down(hdev);
3417 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3419 goto err_reset_lock;
3423 ret = hclge_reset_prepare_wait(hdev);
3427 if (hclge_reset_wait(hdev))
3430 hdev->rst_stats.hw_reset_done_cnt++;
3432 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3438 ret = hclge_reset_stack(hdev);
3440 goto err_reset_lock;
3442 hclge_clear_reset_cause(hdev);
3444 ret = hclge_reset_prepare_up(hdev);
3446 goto err_reset_lock;
3450 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3451 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3454 if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3459 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3461 goto err_reset_lock;
3465 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3469 hdev->last_reset_time = jiffies;
3470 hdev->reset_fail_cnt = 0;
3471 hdev->rst_stats.reset_done_cnt++;
3472 ae_dev->reset_type = HNAE3_NONE_RESET;
3473 del_timer(&hdev->reset_timer);
3480 if (hclge_reset_err_handle(hdev))
3481 hclge_reset_task_schedule(hdev);
3484 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3486 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3487 struct hclge_dev *hdev = ae_dev->priv;
3489 /* We might end up getting called broadly because of 2 below cases:
3490 * 1. Recoverable error was conveyed through APEI and only way to bring
3491 * normalcy is to reset.
3492 * 2. A new reset request from the stack due to timeout
3494 * For the first case,error event might not have ae handle available.
3495 * check if this is a new reset request and we are not here just because
3496 * last reset attempt did not succeed and watchdog hit us again. We will
3497 * know this if last reset request did not occur very recently (watchdog
3498 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3499 * In case of new request we reset the "reset level" to PF reset.
3500 * And if it is a repeat reset request of the most recent one then we
3501 * want to make sure we throttle the reset request. Therefore, we will
3502 * not allow it again before 3*HZ times.
3505 handle = &hdev->vport[0].nic;
3507 if (time_before(jiffies, (hdev->last_reset_time +
3508 HCLGE_RESET_INTERVAL)))
3510 else if (hdev->default_reset_request)
3512 hclge_get_reset_level(ae_dev,
3513 &hdev->default_reset_request);
3514 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3515 hdev->reset_level = HNAE3_FUNC_RESET;
3517 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3520 /* request reset & schedule reset task */
3521 set_bit(hdev->reset_level, &hdev->reset_request);
3522 hclge_reset_task_schedule(hdev);
3524 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3525 hdev->reset_level++;
3528 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3529 enum hnae3_reset_type rst_type)
3531 struct hclge_dev *hdev = ae_dev->priv;
3533 set_bit(rst_type, &hdev->default_reset_request);
3536 static void hclge_reset_timer(struct timer_list *t)
3538 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3540 dev_info(&hdev->pdev->dev,
3541 "triggering reset in reset timer\n");
3542 hclge_reset_event(hdev->pdev, NULL);
3545 static void hclge_reset_subtask(struct hclge_dev *hdev)
3547 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3549 /* check if there is any ongoing reset in the hardware. This status can
3550 * be checked from reset_pending. If there is then, we need to wait for
3551 * hardware to complete reset.
3552 * a. If we are able to figure out in reasonable time that hardware
3553 * has fully resetted then, we can proceed with driver, client
3555 * b. else, we can come back later to check this status so re-sched
3558 hdev->last_reset_time = jiffies;
3559 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3560 if (hdev->reset_type != HNAE3_NONE_RESET)
3563 /* check if we got any *new* reset requests to be honored */
3564 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3565 if (hdev->reset_type != HNAE3_NONE_RESET)
3566 hclge_do_reset(hdev);
3568 hdev->reset_type = HNAE3_NONE_RESET;
3571 static void hclge_reset_service_task(struct work_struct *work)
3573 struct hclge_dev *hdev =
3574 container_of(work, struct hclge_dev, rst_service_task);
3576 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3579 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3581 hclge_reset_subtask(hdev);
3583 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3586 static void hclge_mailbox_service_task(struct work_struct *work)
3588 struct hclge_dev *hdev =
3589 container_of(work, struct hclge_dev, mbx_service_task);
3591 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3594 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3596 hclge_mbx_handler(hdev);
3598 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3601 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3605 /* start from vport 1 for PF is always alive */
3606 for (i = 1; i < hdev->num_alloc_vport; i++) {
3607 struct hclge_vport *vport = &hdev->vport[i];
3609 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3610 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3612 /* If vf is not alive, set to default value */
3613 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3614 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3618 static void hclge_service_task(struct work_struct *work)
3620 struct hclge_dev *hdev =
3621 container_of(work, struct hclge_dev, service_task.work);
3623 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3625 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3626 hclge_update_stats_for_all(hdev);
3627 hdev->hw_stats.stats_timer = 0;
3630 hclge_update_port_info(hdev);
3631 hclge_update_link_status(hdev);
3632 hclge_update_vport_alive(hdev);
3633 hclge_sync_vlan_filter(hdev);
3634 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3635 hclge_rfs_filter_expire(hdev);
3636 hdev->fd_arfs_expire_timer = 0;
3639 hclge_task_schedule(hdev);
3642 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3644 /* VF handle has no client */
3645 if (!handle->client)
3646 return container_of(handle, struct hclge_vport, nic);
3647 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3648 return container_of(handle, struct hclge_vport, roce);
3650 return container_of(handle, struct hclge_vport, nic);
3653 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3654 struct hnae3_vector_info *vector_info)
3656 struct hclge_vport *vport = hclge_get_vport(handle);
3657 struct hnae3_vector_info *vector = vector_info;
3658 struct hclge_dev *hdev = vport->back;
3662 vector_num = min(hdev->num_msi_left, vector_num);
3664 for (j = 0; j < vector_num; j++) {
3665 for (i = 1; i < hdev->num_msi; i++) {
3666 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3667 vector->vector = pci_irq_vector(hdev->pdev, i);
3668 vector->io_addr = hdev->hw.io_base +
3669 HCLGE_VECTOR_REG_BASE +
3670 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3672 HCLGE_VECTOR_VF_OFFSET;
3673 hdev->vector_status[i] = vport->vport_id;
3674 hdev->vector_irq[i] = vector->vector;
3683 hdev->num_msi_left -= alloc;
3684 hdev->num_msi_used += alloc;
3689 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3693 for (i = 0; i < hdev->num_msi; i++)
3694 if (vector == hdev->vector_irq[i])
3700 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3702 struct hclge_vport *vport = hclge_get_vport(handle);
3703 struct hclge_dev *hdev = vport->back;
3706 vector_id = hclge_get_vector_index(hdev, vector);
3707 if (vector_id < 0) {
3708 dev_err(&hdev->pdev->dev,
3709 "Get vector index fail. vector_id =%d\n", vector_id);
3713 hclge_free_vector(hdev, vector_id);
3718 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3720 return HCLGE_RSS_KEY_SIZE;
3723 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3725 return HCLGE_RSS_IND_TBL_SIZE;
3728 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3729 const u8 hfunc, const u8 *key)
3731 struct hclge_rss_config_cmd *req;
3732 unsigned int key_offset = 0;
3733 struct hclge_desc desc;
3738 key_counts = HCLGE_RSS_KEY_SIZE;
3739 req = (struct hclge_rss_config_cmd *)desc.data;
3741 while (key_counts) {
3742 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3745 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3746 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3748 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3749 memcpy(req->hash_key,
3750 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3752 key_counts -= key_size;
3754 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3756 dev_err(&hdev->pdev->dev,
3757 "Configure RSS config fail, status = %d\n",
3765 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3767 struct hclge_rss_indirection_table_cmd *req;
3768 struct hclge_desc desc;
3772 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3774 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3775 hclge_cmd_setup_basic_desc
3776 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3778 req->start_table_index =
3779 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3780 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3782 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3783 req->rss_result[j] =
3784 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3786 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3788 dev_err(&hdev->pdev->dev,
3789 "Configure rss indir table fail,status = %d\n",
3797 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3798 u16 *tc_size, u16 *tc_offset)
3800 struct hclge_rss_tc_mode_cmd *req;
3801 struct hclge_desc desc;
3805 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3806 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3808 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3811 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3812 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3813 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3814 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3815 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3817 req->rss_tc_mode[i] = cpu_to_le16(mode);
3820 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3822 dev_err(&hdev->pdev->dev,
3823 "Configure rss tc mode fail, status = %d\n", ret);
3828 static void hclge_get_rss_type(struct hclge_vport *vport)
3830 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3831 vport->rss_tuple_sets.ipv4_udp_en ||
3832 vport->rss_tuple_sets.ipv4_sctp_en ||
3833 vport->rss_tuple_sets.ipv6_tcp_en ||
3834 vport->rss_tuple_sets.ipv6_udp_en ||
3835 vport->rss_tuple_sets.ipv6_sctp_en)
3836 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3837 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3838 vport->rss_tuple_sets.ipv6_fragment_en)
3839 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3841 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3844 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3846 struct hclge_rss_input_tuple_cmd *req;
3847 struct hclge_desc desc;
3850 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3852 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3854 /* Get the tuple cfg from pf */
3855 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3856 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3857 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3858 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3859 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3860 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3861 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3862 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3863 hclge_get_rss_type(&hdev->vport[0]);
3864 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3866 dev_err(&hdev->pdev->dev,
3867 "Configure rss input fail, status = %d\n", ret);
3871 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3874 struct hclge_vport *vport = hclge_get_vport(handle);
3877 /* Get hash algorithm */
3879 switch (vport->rss_algo) {
3880 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3881 *hfunc = ETH_RSS_HASH_TOP;
3883 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3884 *hfunc = ETH_RSS_HASH_XOR;
3887 *hfunc = ETH_RSS_HASH_UNKNOWN;
3892 /* Get the RSS Key required by the user */
3894 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3896 /* Get indirect table */
3898 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3899 indir[i] = vport->rss_indirection_tbl[i];
3904 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3905 const u8 *key, const u8 hfunc)
3907 struct hclge_vport *vport = hclge_get_vport(handle);
3908 struct hclge_dev *hdev = vport->back;
3912 /* Set the RSS Hash Key if specififed by the user */
3915 case ETH_RSS_HASH_TOP:
3916 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3918 case ETH_RSS_HASH_XOR:
3919 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3921 case ETH_RSS_HASH_NO_CHANGE:
3922 hash_algo = vport->rss_algo;
3928 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3932 /* Update the shadow RSS key with user specified qids */
3933 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3934 vport->rss_algo = hash_algo;
3937 /* Update the shadow RSS table with user specified qids */
3938 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3939 vport->rss_indirection_tbl[i] = indir[i];
3941 /* Update the hardware */
3942 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3945 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3947 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3949 if (nfc->data & RXH_L4_B_2_3)
3950 hash_sets |= HCLGE_D_PORT_BIT;
3952 hash_sets &= ~HCLGE_D_PORT_BIT;
3954 if (nfc->data & RXH_IP_SRC)
3955 hash_sets |= HCLGE_S_IP_BIT;
3957 hash_sets &= ~HCLGE_S_IP_BIT;
3959 if (nfc->data & RXH_IP_DST)
3960 hash_sets |= HCLGE_D_IP_BIT;
3962 hash_sets &= ~HCLGE_D_IP_BIT;
3964 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3965 hash_sets |= HCLGE_V_TAG_BIT;
3970 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3971 struct ethtool_rxnfc *nfc)
3973 struct hclge_vport *vport = hclge_get_vport(handle);
3974 struct hclge_dev *hdev = vport->back;
3975 struct hclge_rss_input_tuple_cmd *req;
3976 struct hclge_desc desc;
3980 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3981 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3984 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3985 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3987 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3988 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3989 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3990 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3991 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3992 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3993 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3994 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3996 tuple_sets = hclge_get_rss_hash_bits(nfc);
3997 switch (nfc->flow_type) {
3999 req->ipv4_tcp_en = tuple_sets;
4002 req->ipv6_tcp_en = tuple_sets;
4005 req->ipv4_udp_en = tuple_sets;
4008 req->ipv6_udp_en = tuple_sets;
4011 req->ipv4_sctp_en = tuple_sets;
4014 if ((nfc->data & RXH_L4_B_0_1) ||
4015 (nfc->data & RXH_L4_B_2_3))
4018 req->ipv6_sctp_en = tuple_sets;
4021 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4024 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4030 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4032 dev_err(&hdev->pdev->dev,
4033 "Set rss tuple fail, status = %d\n", ret);
4037 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4038 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4039 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4040 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4041 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4042 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4043 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4044 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4045 hclge_get_rss_type(vport);
4049 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4050 struct ethtool_rxnfc *nfc)
4052 struct hclge_vport *vport = hclge_get_vport(handle);
4057 switch (nfc->flow_type) {
4059 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4062 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4065 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4068 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4071 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4074 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4078 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4087 if (tuple_sets & HCLGE_D_PORT_BIT)
4088 nfc->data |= RXH_L4_B_2_3;
4089 if (tuple_sets & HCLGE_S_PORT_BIT)
4090 nfc->data |= RXH_L4_B_0_1;
4091 if (tuple_sets & HCLGE_D_IP_BIT)
4092 nfc->data |= RXH_IP_DST;
4093 if (tuple_sets & HCLGE_S_IP_BIT)
4094 nfc->data |= RXH_IP_SRC;
4099 static int hclge_get_tc_size(struct hnae3_handle *handle)
4101 struct hclge_vport *vport = hclge_get_vport(handle);
4102 struct hclge_dev *hdev = vport->back;
4104 return hdev->rss_size_max;
4107 int hclge_rss_init_hw(struct hclge_dev *hdev)
4109 struct hclge_vport *vport = hdev->vport;
4110 u8 *rss_indir = vport[0].rss_indirection_tbl;
4111 u16 rss_size = vport[0].alloc_rss_size;
4112 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4113 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4114 u8 *key = vport[0].rss_hash_key;
4115 u8 hfunc = vport[0].rss_algo;
4116 u16 tc_valid[HCLGE_MAX_TC_NUM];
4121 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4125 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4129 ret = hclge_set_rss_input_tuple(hdev);
4133 /* Each TC have the same queue size, and tc_size set to hardware is
4134 * the log2 of roundup power of two of rss_size, the acutal queue
4135 * size is limited by indirection table.
4137 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4138 dev_err(&hdev->pdev->dev,
4139 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4144 roundup_size = roundup_pow_of_two(rss_size);
4145 roundup_size = ilog2(roundup_size);
4147 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4150 if (!(hdev->hw_tc_map & BIT(i)))
4154 tc_size[i] = roundup_size;
4155 tc_offset[i] = rss_size * i;
4158 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4161 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4163 struct hclge_vport *vport = hdev->vport;
4166 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4167 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4168 vport[j].rss_indirection_tbl[i] =
4169 i % vport[j].alloc_rss_size;
4173 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4175 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4176 struct hclge_vport *vport = hdev->vport;
4178 if (hdev->pdev->revision >= 0x21)
4179 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4181 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4182 vport[i].rss_tuple_sets.ipv4_tcp_en =
4183 HCLGE_RSS_INPUT_TUPLE_OTHER;
4184 vport[i].rss_tuple_sets.ipv4_udp_en =
4185 HCLGE_RSS_INPUT_TUPLE_OTHER;
4186 vport[i].rss_tuple_sets.ipv4_sctp_en =
4187 HCLGE_RSS_INPUT_TUPLE_SCTP;
4188 vport[i].rss_tuple_sets.ipv4_fragment_en =
4189 HCLGE_RSS_INPUT_TUPLE_OTHER;
4190 vport[i].rss_tuple_sets.ipv6_tcp_en =
4191 HCLGE_RSS_INPUT_TUPLE_OTHER;
4192 vport[i].rss_tuple_sets.ipv6_udp_en =
4193 HCLGE_RSS_INPUT_TUPLE_OTHER;
4194 vport[i].rss_tuple_sets.ipv6_sctp_en =
4195 HCLGE_RSS_INPUT_TUPLE_SCTP;
4196 vport[i].rss_tuple_sets.ipv6_fragment_en =
4197 HCLGE_RSS_INPUT_TUPLE_OTHER;
4199 vport[i].rss_algo = rss_algo;
4201 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4202 HCLGE_RSS_KEY_SIZE);
4205 hclge_rss_indir_init_cfg(hdev);
4208 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4209 int vector_id, bool en,
4210 struct hnae3_ring_chain_node *ring_chain)
4212 struct hclge_dev *hdev = vport->back;
4213 struct hnae3_ring_chain_node *node;
4214 struct hclge_desc desc;
4215 struct hclge_ctrl_vector_chain_cmd *req
4216 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4217 enum hclge_cmd_status status;
4218 enum hclge_opcode_type op;
4219 u16 tqp_type_and_id;
4222 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4223 hclge_cmd_setup_basic_desc(&desc, op, false);
4224 req->int_vector_id = vector_id;
4227 for (node = ring_chain; node; node = node->next) {
4228 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4229 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4231 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4232 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4233 HCLGE_TQP_ID_S, node->tqp_index);
4234 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4236 hnae3_get_field(node->int_gl_idx,
4237 HNAE3_RING_GL_IDX_M,
4238 HNAE3_RING_GL_IDX_S));
4239 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4240 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4241 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4242 req->vfid = vport->vport_id;
4244 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4246 dev_err(&hdev->pdev->dev,
4247 "Map TQP fail, status is %d.\n",
4253 hclge_cmd_setup_basic_desc(&desc,
4256 req->int_vector_id = vector_id;
4261 req->int_cause_num = i;
4262 req->vfid = vport->vport_id;
4263 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4265 dev_err(&hdev->pdev->dev,
4266 "Map TQP fail, status is %d.\n", status);
4274 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4275 struct hnae3_ring_chain_node *ring_chain)
4277 struct hclge_vport *vport = hclge_get_vport(handle);
4278 struct hclge_dev *hdev = vport->back;
4281 vector_id = hclge_get_vector_index(hdev, vector);
4282 if (vector_id < 0) {
4283 dev_err(&hdev->pdev->dev,
4284 "Get vector index fail. vector_id =%d\n", vector_id);
4288 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4291 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4292 struct hnae3_ring_chain_node *ring_chain)
4294 struct hclge_vport *vport = hclge_get_vport(handle);
4295 struct hclge_dev *hdev = vport->back;
4298 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4301 vector_id = hclge_get_vector_index(hdev, vector);
4302 if (vector_id < 0) {
4303 dev_err(&handle->pdev->dev,
4304 "Get vector index fail. ret =%d\n", vector_id);
4308 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4310 dev_err(&handle->pdev->dev,
4311 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4317 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4318 struct hclge_promisc_param *param)
4320 struct hclge_promisc_cfg_cmd *req;
4321 struct hclge_desc desc;
4324 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4326 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4327 req->vf_id = param->vf_id;
4329 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4330 * pdev revision(0x20), new revision support them. The
4331 * value of this two fields will not return error when driver
4332 * send command to fireware in revision(0x20).
4334 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4335 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4337 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4339 dev_err(&hdev->pdev->dev,
4340 "Set promisc mode fail, status is %d.\n", ret);
4345 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4346 bool en_mc, bool en_bc, int vport_id)
4351 memset(param, 0, sizeof(struct hclge_promisc_param));
4353 param->enable = HCLGE_PROMISC_EN_UC;
4355 param->enable |= HCLGE_PROMISC_EN_MC;
4357 param->enable |= HCLGE_PROMISC_EN_BC;
4358 param->vf_id = vport_id;
4361 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4364 struct hclge_vport *vport = hclge_get_vport(handle);
4365 struct hclge_dev *hdev = vport->back;
4366 struct hclge_promisc_param param;
4367 bool en_bc_pmc = true;
4369 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4370 * always bypassed. So broadcast promisc should be disabled until
4371 * user enable promisc mode
4373 if (handle->pdev->revision == 0x20)
4374 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4376 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4378 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4381 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4383 struct hclge_get_fd_mode_cmd *req;
4384 struct hclge_desc desc;
4387 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4389 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4391 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4393 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4397 *fd_mode = req->mode;
4402 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4403 u32 *stage1_entry_num,
4404 u32 *stage2_entry_num,
4405 u16 *stage1_counter_num,
4406 u16 *stage2_counter_num)
4408 struct hclge_get_fd_allocation_cmd *req;
4409 struct hclge_desc desc;
4412 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4414 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4416 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4418 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4423 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4424 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4425 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4426 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4431 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4433 struct hclge_set_fd_key_config_cmd *req;
4434 struct hclge_fd_key_cfg *stage;
4435 struct hclge_desc desc;
4438 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4440 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4441 stage = &hdev->fd_cfg.key_cfg[stage_num];
4442 req->stage = stage_num;
4443 req->key_select = stage->key_sel;
4444 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4445 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4446 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4447 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4448 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4449 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4451 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4453 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4458 static int hclge_init_fd_config(struct hclge_dev *hdev)
4460 #define LOW_2_WORDS 0x03
4461 struct hclge_fd_key_cfg *key_cfg;
4464 if (!hnae3_dev_fd_supported(hdev))
4467 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4471 switch (hdev->fd_cfg.fd_mode) {
4472 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4473 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4475 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4476 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4479 dev_err(&hdev->pdev->dev,
4480 "Unsupported flow director mode %d\n",
4481 hdev->fd_cfg.fd_mode);
4485 hdev->fd_cfg.proto_support =
4486 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4487 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4488 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4489 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4490 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4491 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4492 key_cfg->outer_sipv6_word_en = 0;
4493 key_cfg->outer_dipv6_word_en = 0;
4495 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4496 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4497 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4498 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4500 /* If use max 400bit key, we can support tuples for ether type */
4501 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4502 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4503 key_cfg->tuple_active |=
4504 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4507 /* roce_type is used to filter roce frames
4508 * dst_vport is used to specify the rule
4510 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4512 ret = hclge_get_fd_allocation(hdev,
4513 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4514 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4515 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4516 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4520 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4523 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4524 int loc, u8 *key, bool is_add)
4526 struct hclge_fd_tcam_config_1_cmd *req1;
4527 struct hclge_fd_tcam_config_2_cmd *req2;
4528 struct hclge_fd_tcam_config_3_cmd *req3;
4529 struct hclge_desc desc[3];
4532 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4533 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4534 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4535 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4536 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4538 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4539 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4540 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4542 req1->stage = stage;
4543 req1->xy_sel = sel_x ? 1 : 0;
4544 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4545 req1->index = cpu_to_le32(loc);
4546 req1->entry_vld = sel_x ? is_add : 0;
4549 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4550 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4551 sizeof(req2->tcam_data));
4552 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4553 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4556 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4558 dev_err(&hdev->pdev->dev,
4559 "config tcam key fail, ret=%d\n",
4565 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4566 struct hclge_fd_ad_data *action)
4568 struct hclge_fd_ad_config_cmd *req;
4569 struct hclge_desc desc;
4573 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4575 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4576 req->index = cpu_to_le32(loc);
4579 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4580 action->write_rule_id_to_bd);
4581 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4584 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4585 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4586 action->forward_to_direct_queue);
4587 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4589 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4590 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4591 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4592 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4593 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4594 action->counter_id);
4596 req->ad_data = cpu_to_le64(ad_data);
4597 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4599 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4604 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4605 struct hclge_fd_rule *rule)
4607 u16 tmp_x_s, tmp_y_s;
4608 u32 tmp_x_l, tmp_y_l;
4611 if (rule->unused_tuple & tuple_bit)
4614 switch (tuple_bit) {
4617 case BIT(INNER_DST_MAC):
4618 for (i = 0; i < ETH_ALEN; i++) {
4619 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4620 rule->tuples_mask.dst_mac[i]);
4621 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4622 rule->tuples_mask.dst_mac[i]);
4626 case BIT(INNER_SRC_MAC):
4627 for (i = 0; i < ETH_ALEN; i++) {
4628 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4629 rule->tuples.src_mac[i]);
4630 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4631 rule->tuples.src_mac[i]);
4635 case BIT(INNER_VLAN_TAG_FST):
4636 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4637 rule->tuples_mask.vlan_tag1);
4638 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4639 rule->tuples_mask.vlan_tag1);
4640 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4641 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4644 case BIT(INNER_ETH_TYPE):
4645 calc_x(tmp_x_s, rule->tuples.ether_proto,
4646 rule->tuples_mask.ether_proto);
4647 calc_y(tmp_y_s, rule->tuples.ether_proto,
4648 rule->tuples_mask.ether_proto);
4649 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4650 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4653 case BIT(INNER_IP_TOS):
4654 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4655 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4658 case BIT(INNER_IP_PROTO):
4659 calc_x(*key_x, rule->tuples.ip_proto,
4660 rule->tuples_mask.ip_proto);
4661 calc_y(*key_y, rule->tuples.ip_proto,
4662 rule->tuples_mask.ip_proto);
4665 case BIT(INNER_SRC_IP):
4666 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4667 rule->tuples_mask.src_ip[IPV4_INDEX]);
4668 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4669 rule->tuples_mask.src_ip[IPV4_INDEX]);
4670 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4671 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4674 case BIT(INNER_DST_IP):
4675 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4676 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4677 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4678 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4679 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4680 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4683 case BIT(INNER_SRC_PORT):
4684 calc_x(tmp_x_s, rule->tuples.src_port,
4685 rule->tuples_mask.src_port);
4686 calc_y(tmp_y_s, rule->tuples.src_port,
4687 rule->tuples_mask.src_port);
4688 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4689 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4692 case BIT(INNER_DST_PORT):
4693 calc_x(tmp_x_s, rule->tuples.dst_port,
4694 rule->tuples_mask.dst_port);
4695 calc_y(tmp_y_s, rule->tuples.dst_port,
4696 rule->tuples_mask.dst_port);
4697 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4698 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4706 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4707 u8 vf_id, u8 network_port_id)
4709 u32 port_number = 0;
4711 if (port_type == HOST_PORT) {
4712 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4714 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4716 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4718 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4719 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4720 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4726 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4727 __le32 *key_x, __le32 *key_y,
4728 struct hclge_fd_rule *rule)
4730 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4731 u8 cur_pos = 0, tuple_size, shift_bits;
4734 for (i = 0; i < MAX_META_DATA; i++) {
4735 tuple_size = meta_data_key_info[i].key_length;
4736 tuple_bit = key_cfg->meta_data_active & BIT(i);
4738 switch (tuple_bit) {
4739 case BIT(ROCE_TYPE):
4740 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4741 cur_pos += tuple_size;
4743 case BIT(DST_VPORT):
4744 port_number = hclge_get_port_number(HOST_PORT, 0,
4746 hnae3_set_field(meta_data,
4747 GENMASK(cur_pos + tuple_size, cur_pos),
4748 cur_pos, port_number);
4749 cur_pos += tuple_size;
4756 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4757 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4758 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4760 *key_x = cpu_to_le32(tmp_x << shift_bits);
4761 *key_y = cpu_to_le32(tmp_y << shift_bits);
4764 /* A complete key is combined with meta data key and tuple key.
4765 * Meta data key is stored at the MSB region, and tuple key is stored at
4766 * the LSB region, unused bits will be filled 0.
4768 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4769 struct hclge_fd_rule *rule)
4771 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4772 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4773 u8 *cur_key_x, *cur_key_y;
4775 int ret, tuple_size;
4776 u8 meta_data_region;
4778 memset(key_x, 0, sizeof(key_x));
4779 memset(key_y, 0, sizeof(key_y));
4783 for (i = 0 ; i < MAX_TUPLE; i++) {
4787 tuple_size = tuple_key_info[i].key_length / 8;
4788 check_tuple = key_cfg->tuple_active & BIT(i);
4790 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4793 cur_key_x += tuple_size;
4794 cur_key_y += tuple_size;
4798 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4799 MAX_META_DATA_LENGTH / 8;
4801 hclge_fd_convert_meta_data(key_cfg,
4802 (__le32 *)(key_x + meta_data_region),
4803 (__le32 *)(key_y + meta_data_region),
4806 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4809 dev_err(&hdev->pdev->dev,
4810 "fd key_y config fail, loc=%d, ret=%d\n",
4811 rule->queue_id, ret);
4815 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4818 dev_err(&hdev->pdev->dev,
4819 "fd key_x config fail, loc=%d, ret=%d\n",
4820 rule->queue_id, ret);
4824 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4825 struct hclge_fd_rule *rule)
4827 struct hclge_fd_ad_data ad_data;
4829 ad_data.ad_id = rule->location;
4831 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4832 ad_data.drop_packet = true;
4833 ad_data.forward_to_direct_queue = false;
4834 ad_data.queue_id = 0;
4836 ad_data.drop_packet = false;
4837 ad_data.forward_to_direct_queue = true;
4838 ad_data.queue_id = rule->queue_id;
4841 ad_data.use_counter = false;
4842 ad_data.counter_id = 0;
4844 ad_data.use_next_stage = false;
4845 ad_data.next_input_key = 0;
4847 ad_data.write_rule_id_to_bd = true;
4848 ad_data.rule_id = rule->location;
4850 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4853 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4854 struct ethtool_rx_flow_spec *fs, u32 *unused)
4856 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4857 struct ethtool_usrip4_spec *usr_ip4_spec;
4858 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4859 struct ethtool_usrip6_spec *usr_ip6_spec;
4860 struct ethhdr *ether_spec;
4862 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4865 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4868 if ((fs->flow_type & FLOW_EXT) &&
4869 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4870 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4874 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4878 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4879 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4881 if (!tcp_ip4_spec->ip4src)
4882 *unused |= BIT(INNER_SRC_IP);
4884 if (!tcp_ip4_spec->ip4dst)
4885 *unused |= BIT(INNER_DST_IP);
4887 if (!tcp_ip4_spec->psrc)
4888 *unused |= BIT(INNER_SRC_PORT);
4890 if (!tcp_ip4_spec->pdst)
4891 *unused |= BIT(INNER_DST_PORT);
4893 if (!tcp_ip4_spec->tos)
4894 *unused |= BIT(INNER_IP_TOS);
4898 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4899 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4900 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4902 if (!usr_ip4_spec->ip4src)
4903 *unused |= BIT(INNER_SRC_IP);
4905 if (!usr_ip4_spec->ip4dst)
4906 *unused |= BIT(INNER_DST_IP);
4908 if (!usr_ip4_spec->tos)
4909 *unused |= BIT(INNER_IP_TOS);
4911 if (!usr_ip4_spec->proto)
4912 *unused |= BIT(INNER_IP_PROTO);
4914 if (usr_ip4_spec->l4_4_bytes)
4917 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4924 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4925 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4928 /* check whether src/dst ip address used */
4929 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4930 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4931 *unused |= BIT(INNER_SRC_IP);
4933 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4934 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4935 *unused |= BIT(INNER_DST_IP);
4937 if (!tcp_ip6_spec->psrc)
4938 *unused |= BIT(INNER_SRC_PORT);
4940 if (!tcp_ip6_spec->pdst)
4941 *unused |= BIT(INNER_DST_PORT);
4943 if (tcp_ip6_spec->tclass)
4947 case IPV6_USER_FLOW:
4948 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4949 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4950 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4951 BIT(INNER_DST_PORT);
4953 /* check whether src/dst ip address used */
4954 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4955 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4956 *unused |= BIT(INNER_SRC_IP);
4958 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4959 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4960 *unused |= BIT(INNER_DST_IP);
4962 if (!usr_ip6_spec->l4_proto)
4963 *unused |= BIT(INNER_IP_PROTO);
4965 if (usr_ip6_spec->tclass)
4968 if (usr_ip6_spec->l4_4_bytes)
4973 ether_spec = &fs->h_u.ether_spec;
4974 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4975 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4976 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4978 if (is_zero_ether_addr(ether_spec->h_source))
4979 *unused |= BIT(INNER_SRC_MAC);
4981 if (is_zero_ether_addr(ether_spec->h_dest))
4982 *unused |= BIT(INNER_DST_MAC);
4984 if (!ether_spec->h_proto)
4985 *unused |= BIT(INNER_ETH_TYPE);
4992 if ((fs->flow_type & FLOW_EXT)) {
4993 if (fs->h_ext.vlan_etype)
4995 if (!fs->h_ext.vlan_tci)
4996 *unused |= BIT(INNER_VLAN_TAG_FST);
4998 if (fs->m_ext.vlan_tci) {
4999 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5003 *unused |= BIT(INNER_VLAN_TAG_FST);
5006 if (fs->flow_type & FLOW_MAC_EXT) {
5007 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5010 if (is_zero_ether_addr(fs->h_ext.h_dest))
5011 *unused |= BIT(INNER_DST_MAC);
5013 *unused &= ~(BIT(INNER_DST_MAC));
5019 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5021 struct hclge_fd_rule *rule = NULL;
5022 struct hlist_node *node2;
5024 spin_lock_bh(&hdev->fd_rule_lock);
5025 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5026 if (rule->location >= location)
5030 spin_unlock_bh(&hdev->fd_rule_lock);
5032 return rule && rule->location == location;
5035 /* make sure being called after lock up with fd_rule_lock */
5036 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5037 struct hclge_fd_rule *new_rule,
5041 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5042 struct hlist_node *node2;
5044 if (is_add && !new_rule)
5047 hlist_for_each_entry_safe(rule, node2,
5048 &hdev->fd_rule_list, rule_node) {
5049 if (rule->location >= location)
5054 if (rule && rule->location == location) {
5055 hlist_del(&rule->rule_node);
5057 hdev->hclge_fd_rule_num--;
5060 if (!hdev->hclge_fd_rule_num)
5061 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5062 clear_bit(location, hdev->fd_bmap);
5066 } else if (!is_add) {
5067 dev_err(&hdev->pdev->dev,
5068 "delete fail, rule %d is inexistent\n",
5073 INIT_HLIST_NODE(&new_rule->rule_node);
5076 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5078 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5080 set_bit(location, hdev->fd_bmap);
5081 hdev->hclge_fd_rule_num++;
5082 hdev->fd_active_type = new_rule->rule_type;
5087 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5088 struct ethtool_rx_flow_spec *fs,
5089 struct hclge_fd_rule *rule)
5091 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5093 switch (flow_type) {
5097 rule->tuples.src_ip[IPV4_INDEX] =
5098 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5099 rule->tuples_mask.src_ip[IPV4_INDEX] =
5100 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5102 rule->tuples.dst_ip[IPV4_INDEX] =
5103 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5104 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5105 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5107 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5108 rule->tuples_mask.src_port =
5109 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5111 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5112 rule->tuples_mask.dst_port =
5113 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5115 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5116 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5118 rule->tuples.ether_proto = ETH_P_IP;
5119 rule->tuples_mask.ether_proto = 0xFFFF;
5123 rule->tuples.src_ip[IPV4_INDEX] =
5124 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5125 rule->tuples_mask.src_ip[IPV4_INDEX] =
5126 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5128 rule->tuples.dst_ip[IPV4_INDEX] =
5129 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5130 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5131 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5133 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5134 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5136 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5137 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5139 rule->tuples.ether_proto = ETH_P_IP;
5140 rule->tuples_mask.ether_proto = 0xFFFF;
5146 be32_to_cpu_array(rule->tuples.src_ip,
5147 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5148 be32_to_cpu_array(rule->tuples_mask.src_ip,
5149 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5151 be32_to_cpu_array(rule->tuples.dst_ip,
5152 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5153 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5154 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5156 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5157 rule->tuples_mask.src_port =
5158 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5160 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5161 rule->tuples_mask.dst_port =
5162 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5164 rule->tuples.ether_proto = ETH_P_IPV6;
5165 rule->tuples_mask.ether_proto = 0xFFFF;
5168 case IPV6_USER_FLOW:
5169 be32_to_cpu_array(rule->tuples.src_ip,
5170 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5171 be32_to_cpu_array(rule->tuples_mask.src_ip,
5172 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5174 be32_to_cpu_array(rule->tuples.dst_ip,
5175 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5176 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5177 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5179 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5180 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5182 rule->tuples.ether_proto = ETH_P_IPV6;
5183 rule->tuples_mask.ether_proto = 0xFFFF;
5187 ether_addr_copy(rule->tuples.src_mac,
5188 fs->h_u.ether_spec.h_source);
5189 ether_addr_copy(rule->tuples_mask.src_mac,
5190 fs->m_u.ether_spec.h_source);
5192 ether_addr_copy(rule->tuples.dst_mac,
5193 fs->h_u.ether_spec.h_dest);
5194 ether_addr_copy(rule->tuples_mask.dst_mac,
5195 fs->m_u.ether_spec.h_dest);
5197 rule->tuples.ether_proto =
5198 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5199 rule->tuples_mask.ether_proto =
5200 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5207 switch (flow_type) {
5210 rule->tuples.ip_proto = IPPROTO_SCTP;
5211 rule->tuples_mask.ip_proto = 0xFF;
5215 rule->tuples.ip_proto = IPPROTO_TCP;
5216 rule->tuples_mask.ip_proto = 0xFF;
5220 rule->tuples.ip_proto = IPPROTO_UDP;
5221 rule->tuples_mask.ip_proto = 0xFF;
5227 if ((fs->flow_type & FLOW_EXT)) {
5228 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5229 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5232 if (fs->flow_type & FLOW_MAC_EXT) {
5233 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5234 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5240 /* make sure being called after lock up with fd_rule_lock */
5241 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5242 struct hclge_fd_rule *rule)
5247 dev_err(&hdev->pdev->dev,
5248 "The flow director rule is NULL\n");
5252 /* it will never fail here, so needn't to check return value */
5253 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5255 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5259 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5266 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5270 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5271 struct ethtool_rxnfc *cmd)
5273 struct hclge_vport *vport = hclge_get_vport(handle);
5274 struct hclge_dev *hdev = vport->back;
5275 u16 dst_vport_id = 0, q_index = 0;
5276 struct ethtool_rx_flow_spec *fs;
5277 struct hclge_fd_rule *rule;
5282 if (!hnae3_dev_fd_supported(hdev))
5286 dev_warn(&hdev->pdev->dev,
5287 "Please enable flow director first\n");
5291 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5293 ret = hclge_fd_check_spec(hdev, fs, &unused);
5295 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5299 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5300 action = HCLGE_FD_ACTION_DROP_PACKET;
5302 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5303 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5306 if (vf > hdev->num_req_vfs) {
5307 dev_err(&hdev->pdev->dev,
5308 "Error: vf id (%d) > max vf num (%d)\n",
5309 vf, hdev->num_req_vfs);
5313 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5314 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5317 dev_err(&hdev->pdev->dev,
5318 "Error: queue id (%d) > max tqp num (%d)\n",
5323 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5327 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5331 ret = hclge_fd_get_tuple(hdev, fs, rule);
5337 rule->flow_type = fs->flow_type;
5339 rule->location = fs->location;
5340 rule->unused_tuple = unused;
5341 rule->vf_id = dst_vport_id;
5342 rule->queue_id = q_index;
5343 rule->action = action;
5344 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5346 /* to avoid rule conflict, when user configure rule by ethtool,
5347 * we need to clear all arfs rules
5349 hclge_clear_arfs_rules(handle);
5351 spin_lock_bh(&hdev->fd_rule_lock);
5352 ret = hclge_fd_config_rule(hdev, rule);
5354 spin_unlock_bh(&hdev->fd_rule_lock);
5359 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5360 struct ethtool_rxnfc *cmd)
5362 struct hclge_vport *vport = hclge_get_vport(handle);
5363 struct hclge_dev *hdev = vport->back;
5364 struct ethtool_rx_flow_spec *fs;
5367 if (!hnae3_dev_fd_supported(hdev))
5370 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5372 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5375 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5376 dev_err(&hdev->pdev->dev,
5377 "Delete fail, rule %d is inexistent\n", fs->location);
5381 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5386 spin_lock_bh(&hdev->fd_rule_lock);
5387 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5389 spin_unlock_bh(&hdev->fd_rule_lock);
5394 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5397 struct hclge_vport *vport = hclge_get_vport(handle);
5398 struct hclge_dev *hdev = vport->back;
5399 struct hclge_fd_rule *rule;
5400 struct hlist_node *node;
5403 if (!hnae3_dev_fd_supported(hdev))
5406 spin_lock_bh(&hdev->fd_rule_lock);
5407 for_each_set_bit(location, hdev->fd_bmap,
5408 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5409 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5413 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5415 hlist_del(&rule->rule_node);
5418 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5419 hdev->hclge_fd_rule_num = 0;
5420 bitmap_zero(hdev->fd_bmap,
5421 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5424 spin_unlock_bh(&hdev->fd_rule_lock);
5427 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5429 struct hclge_vport *vport = hclge_get_vport(handle);
5430 struct hclge_dev *hdev = vport->back;
5431 struct hclge_fd_rule *rule;
5432 struct hlist_node *node;
5435 /* Return ok here, because reset error handling will check this
5436 * return value. If error is returned here, the reset process will
5439 if (!hnae3_dev_fd_supported(hdev))
5442 /* if fd is disabled, should not restore it when reset */
5446 spin_lock_bh(&hdev->fd_rule_lock);
5447 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5448 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5450 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5453 dev_warn(&hdev->pdev->dev,
5454 "Restore rule %d failed, remove it\n",
5456 clear_bit(rule->location, hdev->fd_bmap);
5457 hlist_del(&rule->rule_node);
5459 hdev->hclge_fd_rule_num--;
5463 if (hdev->hclge_fd_rule_num)
5464 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5466 spin_unlock_bh(&hdev->fd_rule_lock);
5471 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5472 struct ethtool_rxnfc *cmd)
5474 struct hclge_vport *vport = hclge_get_vport(handle);
5475 struct hclge_dev *hdev = vport->back;
5477 if (!hnae3_dev_fd_supported(hdev))
5480 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5481 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5486 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5487 struct ethtool_rxnfc *cmd)
5489 struct hclge_vport *vport = hclge_get_vport(handle);
5490 struct hclge_fd_rule *rule = NULL;
5491 struct hclge_dev *hdev = vport->back;
5492 struct ethtool_rx_flow_spec *fs;
5493 struct hlist_node *node2;
5495 if (!hnae3_dev_fd_supported(hdev))
5498 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5500 spin_lock_bh(&hdev->fd_rule_lock);
5502 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5503 if (rule->location >= fs->location)
5507 if (!rule || fs->location != rule->location) {
5508 spin_unlock_bh(&hdev->fd_rule_lock);
5513 fs->flow_type = rule->flow_type;
5514 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5518 fs->h_u.tcp_ip4_spec.ip4src =
5519 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5520 fs->m_u.tcp_ip4_spec.ip4src =
5521 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5522 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5524 fs->h_u.tcp_ip4_spec.ip4dst =
5525 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5526 fs->m_u.tcp_ip4_spec.ip4dst =
5527 rule->unused_tuple & BIT(INNER_DST_IP) ?
5528 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5530 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5531 fs->m_u.tcp_ip4_spec.psrc =
5532 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5533 0 : cpu_to_be16(rule->tuples_mask.src_port);
5535 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5536 fs->m_u.tcp_ip4_spec.pdst =
5537 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5538 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5540 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5541 fs->m_u.tcp_ip4_spec.tos =
5542 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5543 0 : rule->tuples_mask.ip_tos;
5547 fs->h_u.usr_ip4_spec.ip4src =
5548 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5549 fs->m_u.tcp_ip4_spec.ip4src =
5550 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5551 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5553 fs->h_u.usr_ip4_spec.ip4dst =
5554 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5555 fs->m_u.usr_ip4_spec.ip4dst =
5556 rule->unused_tuple & BIT(INNER_DST_IP) ?
5557 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5559 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5560 fs->m_u.usr_ip4_spec.tos =
5561 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5562 0 : rule->tuples_mask.ip_tos;
5564 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5565 fs->m_u.usr_ip4_spec.proto =
5566 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5567 0 : rule->tuples_mask.ip_proto;
5569 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5575 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5576 rule->tuples.src_ip, IPV6_SIZE);
5577 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5578 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5579 sizeof(int) * IPV6_SIZE);
5581 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5582 rule->tuples_mask.src_ip, IPV6_SIZE);
5584 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5585 rule->tuples.dst_ip, IPV6_SIZE);
5586 if (rule->unused_tuple & BIT(INNER_DST_IP))
5587 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5588 sizeof(int) * IPV6_SIZE);
5590 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5591 rule->tuples_mask.dst_ip, IPV6_SIZE);
5593 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5594 fs->m_u.tcp_ip6_spec.psrc =
5595 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5596 0 : cpu_to_be16(rule->tuples_mask.src_port);
5598 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5599 fs->m_u.tcp_ip6_spec.pdst =
5600 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5601 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5604 case IPV6_USER_FLOW:
5605 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5606 rule->tuples.src_ip, IPV6_SIZE);
5607 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5608 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5609 sizeof(int) * IPV6_SIZE);
5611 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5612 rule->tuples_mask.src_ip, IPV6_SIZE);
5614 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5615 rule->tuples.dst_ip, IPV6_SIZE);
5616 if (rule->unused_tuple & BIT(INNER_DST_IP))
5617 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5618 sizeof(int) * IPV6_SIZE);
5620 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5621 rule->tuples_mask.dst_ip, IPV6_SIZE);
5623 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5624 fs->m_u.usr_ip6_spec.l4_proto =
5625 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5626 0 : rule->tuples_mask.ip_proto;
5630 ether_addr_copy(fs->h_u.ether_spec.h_source,
5631 rule->tuples.src_mac);
5632 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5633 eth_zero_addr(fs->m_u.ether_spec.h_source);
5635 ether_addr_copy(fs->m_u.ether_spec.h_source,
5636 rule->tuples_mask.src_mac);
5638 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5639 rule->tuples.dst_mac);
5640 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5641 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5643 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5644 rule->tuples_mask.dst_mac);
5646 fs->h_u.ether_spec.h_proto =
5647 cpu_to_be16(rule->tuples.ether_proto);
5648 fs->m_u.ether_spec.h_proto =
5649 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5650 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5654 spin_unlock_bh(&hdev->fd_rule_lock);
5658 if (fs->flow_type & FLOW_EXT) {
5659 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5660 fs->m_ext.vlan_tci =
5661 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5662 cpu_to_be16(VLAN_VID_MASK) :
5663 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5666 if (fs->flow_type & FLOW_MAC_EXT) {
5667 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5668 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5669 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5671 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5672 rule->tuples_mask.dst_mac);
5675 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5676 fs->ring_cookie = RX_CLS_FLOW_DISC;
5680 fs->ring_cookie = rule->queue_id;
5681 vf_id = rule->vf_id;
5682 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5683 fs->ring_cookie |= vf_id;
5686 spin_unlock_bh(&hdev->fd_rule_lock);
5691 static int hclge_get_all_rules(struct hnae3_handle *handle,
5692 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5694 struct hclge_vport *vport = hclge_get_vport(handle);
5695 struct hclge_dev *hdev = vport->back;
5696 struct hclge_fd_rule *rule;
5697 struct hlist_node *node2;
5700 if (!hnae3_dev_fd_supported(hdev))
5703 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5705 spin_lock_bh(&hdev->fd_rule_lock);
5706 hlist_for_each_entry_safe(rule, node2,
5707 &hdev->fd_rule_list, rule_node) {
5708 if (cnt == cmd->rule_cnt) {
5709 spin_unlock_bh(&hdev->fd_rule_lock);
5713 rule_locs[cnt] = rule->location;
5717 spin_unlock_bh(&hdev->fd_rule_lock);
5719 cmd->rule_cnt = cnt;
5724 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5725 struct hclge_fd_rule_tuples *tuples)
5727 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5728 tuples->ip_proto = fkeys->basic.ip_proto;
5729 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5731 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5732 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5733 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5735 memcpy(tuples->src_ip,
5736 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5737 sizeof(tuples->src_ip));
5738 memcpy(tuples->dst_ip,
5739 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5740 sizeof(tuples->dst_ip));
5744 /* traverse all rules, check whether an existed rule has the same tuples */
5745 static struct hclge_fd_rule *
5746 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5747 const struct hclge_fd_rule_tuples *tuples)
5749 struct hclge_fd_rule *rule = NULL;
5750 struct hlist_node *node;
5752 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5753 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5760 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5761 struct hclge_fd_rule *rule)
5763 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5764 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5765 BIT(INNER_SRC_PORT);
5768 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5769 if (tuples->ether_proto == ETH_P_IP) {
5770 if (tuples->ip_proto == IPPROTO_TCP)
5771 rule->flow_type = TCP_V4_FLOW;
5773 rule->flow_type = UDP_V4_FLOW;
5775 if (tuples->ip_proto == IPPROTO_TCP)
5776 rule->flow_type = TCP_V6_FLOW;
5778 rule->flow_type = UDP_V6_FLOW;
5780 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5781 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5784 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5785 u16 flow_id, struct flow_keys *fkeys)
5787 struct hclge_vport *vport = hclge_get_vport(handle);
5788 struct hclge_fd_rule_tuples new_tuples;
5789 struct hclge_dev *hdev = vport->back;
5790 struct hclge_fd_rule *rule;
5795 if (!hnae3_dev_fd_supported(hdev))
5798 memset(&new_tuples, 0, sizeof(new_tuples));
5799 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5801 spin_lock_bh(&hdev->fd_rule_lock);
5803 /* when there is already fd rule existed add by user,
5804 * arfs should not work
5806 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5807 spin_unlock_bh(&hdev->fd_rule_lock);
5812 /* check is there flow director filter existed for this flow,
5813 * if not, create a new filter for it;
5814 * if filter exist with different queue id, modify the filter;
5815 * if filter exist with same queue id, do nothing
5817 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5819 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5820 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5821 spin_unlock_bh(&hdev->fd_rule_lock);
5826 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
5828 spin_unlock_bh(&hdev->fd_rule_lock);
5833 set_bit(bit_id, hdev->fd_bmap);
5834 rule->location = bit_id;
5835 rule->flow_id = flow_id;
5836 rule->queue_id = queue_id;
5837 hclge_fd_build_arfs_rule(&new_tuples, rule);
5838 ret = hclge_fd_config_rule(hdev, rule);
5840 spin_unlock_bh(&hdev->fd_rule_lock);
5845 return rule->location;
5848 spin_unlock_bh(&hdev->fd_rule_lock);
5850 if (rule->queue_id == queue_id)
5851 return rule->location;
5853 tmp_queue_id = rule->queue_id;
5854 rule->queue_id = queue_id;
5855 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5857 rule->queue_id = tmp_queue_id;
5861 return rule->location;
5864 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5866 #ifdef CONFIG_RFS_ACCEL
5867 struct hnae3_handle *handle = &hdev->vport[0].nic;
5868 struct hclge_fd_rule *rule;
5869 struct hlist_node *node;
5870 HLIST_HEAD(del_list);
5872 spin_lock_bh(&hdev->fd_rule_lock);
5873 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5874 spin_unlock_bh(&hdev->fd_rule_lock);
5877 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5878 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5879 rule->flow_id, rule->location)) {
5880 hlist_del_init(&rule->rule_node);
5881 hlist_add_head(&rule->rule_node, &del_list);
5882 hdev->hclge_fd_rule_num--;
5883 clear_bit(rule->location, hdev->fd_bmap);
5886 spin_unlock_bh(&hdev->fd_rule_lock);
5888 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5889 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5890 rule->location, NULL, false);
5896 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5898 #ifdef CONFIG_RFS_ACCEL
5899 struct hclge_vport *vport = hclge_get_vport(handle);
5900 struct hclge_dev *hdev = vport->back;
5902 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5903 hclge_del_all_fd_entries(handle, true);
5907 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5909 struct hclge_vport *vport = hclge_get_vport(handle);
5910 struct hclge_dev *hdev = vport->back;
5912 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5913 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5916 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5918 struct hclge_vport *vport = hclge_get_vport(handle);
5919 struct hclge_dev *hdev = vport->back;
5921 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5924 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5926 struct hclge_vport *vport = hclge_get_vport(handle);
5927 struct hclge_dev *hdev = vport->back;
5929 return hdev->rst_stats.hw_reset_done_cnt;
5932 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5934 struct hclge_vport *vport = hclge_get_vport(handle);
5935 struct hclge_dev *hdev = vport->back;
5938 hdev->fd_en = enable;
5939 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5941 hclge_del_all_fd_entries(handle, clear);
5943 hclge_restore_fd_entries(handle);
5946 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5948 struct hclge_desc desc;
5949 struct hclge_config_mac_mode_cmd *req =
5950 (struct hclge_config_mac_mode_cmd *)desc.data;
5954 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5957 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
5958 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
5959 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
5960 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
5961 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
5962 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
5963 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
5964 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
5965 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
5966 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
5969 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5971 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5973 dev_err(&hdev->pdev->dev,
5974 "mac enable fail, ret =%d.\n", ret);
5977 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5979 struct hclge_config_mac_mode_cmd *req;
5980 struct hclge_desc desc;
5984 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5985 /* 1 Read out the MAC mode config at first */
5986 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5987 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5989 dev_err(&hdev->pdev->dev,
5990 "mac loopback get fail, ret =%d.\n", ret);
5994 /* 2 Then setup the loopback flag */
5995 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5996 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5997 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5998 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6000 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6002 /* 3 Config mac work mode with loopback flag
6003 * and its original configure parameters
6005 hclge_cmd_reuse_desc(&desc, false);
6006 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6008 dev_err(&hdev->pdev->dev,
6009 "mac loopback set fail, ret =%d.\n", ret);
6013 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6014 enum hnae3_loop loop_mode)
6016 #define HCLGE_SERDES_RETRY_MS 10
6017 #define HCLGE_SERDES_RETRY_NUM 100
6019 #define HCLGE_MAC_LINK_STATUS_MS 10
6020 #define HCLGE_MAC_LINK_STATUS_NUM 100
6021 #define HCLGE_MAC_LINK_STATUS_DOWN 0
6022 #define HCLGE_MAC_LINK_STATUS_UP 1
6024 struct hclge_serdes_lb_cmd *req;
6025 struct hclge_desc desc;
6026 int mac_link_ret = 0;
6030 req = (struct hclge_serdes_lb_cmd *)desc.data;
6031 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6033 switch (loop_mode) {
6034 case HNAE3_LOOP_SERIAL_SERDES:
6035 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6037 case HNAE3_LOOP_PARALLEL_SERDES:
6038 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6041 dev_err(&hdev->pdev->dev,
6042 "unsupported serdes loopback mode %d\n", loop_mode);
6047 req->enable = loop_mode_b;
6048 req->mask = loop_mode_b;
6049 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6051 req->mask = loop_mode_b;
6052 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6055 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6057 dev_err(&hdev->pdev->dev,
6058 "serdes loopback set fail, ret = %d\n", ret);
6063 msleep(HCLGE_SERDES_RETRY_MS);
6064 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6066 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6068 dev_err(&hdev->pdev->dev,
6069 "serdes loopback get, ret = %d\n", ret);
6072 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6073 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6075 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6076 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6078 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6079 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6083 hclge_cfg_mac_mode(hdev, en);
6087 /* serdes Internal loopback, independent of the network cable.*/
6088 msleep(HCLGE_MAC_LINK_STATUS_MS);
6089 ret = hclge_get_mac_link_status(hdev);
6090 if (ret == mac_link_ret)
6092 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6094 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6099 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6100 int stream_id, bool enable)
6102 struct hclge_desc desc;
6103 struct hclge_cfg_com_tqp_queue_cmd *req =
6104 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6108 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6109 req->stream_id = cpu_to_le16(stream_id);
6111 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6113 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6115 dev_err(&hdev->pdev->dev,
6116 "Tqp enable fail, status =%d.\n", ret);
6120 static int hclge_set_loopback(struct hnae3_handle *handle,
6121 enum hnae3_loop loop_mode, bool en)
6123 struct hclge_vport *vport = hclge_get_vport(handle);
6124 struct hnae3_knic_private_info *kinfo;
6125 struct hclge_dev *hdev = vport->back;
6128 switch (loop_mode) {
6129 case HNAE3_LOOP_APP:
6130 ret = hclge_set_app_loopback(hdev, en);
6132 case HNAE3_LOOP_SERIAL_SERDES:
6133 case HNAE3_LOOP_PARALLEL_SERDES:
6134 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6138 dev_err(&hdev->pdev->dev,
6139 "loop_mode %d is not supported\n", loop_mode);
6146 kinfo = &vport->nic.kinfo;
6147 for (i = 0; i < kinfo->num_tqps; i++) {
6148 ret = hclge_tqp_enable(hdev, i, 0, en);
6156 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6158 struct hclge_vport *vport = hclge_get_vport(handle);
6159 struct hnae3_knic_private_info *kinfo;
6160 struct hnae3_queue *queue;
6161 struct hclge_tqp *tqp;
6164 kinfo = &vport->nic.kinfo;
6165 for (i = 0; i < kinfo->num_tqps; i++) {
6166 queue = handle->kinfo.tqp[i];
6167 tqp = container_of(queue, struct hclge_tqp, q);
6168 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6172 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6174 struct hclge_vport *vport = hclge_get_vport(handle);
6175 struct hclge_dev *hdev = vport->back;
6178 hclge_task_schedule(hdev);
6180 /* Set the DOWN flag here to disable the service to be
6183 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6184 cancel_delayed_work_sync(&hdev->service_task);
6185 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6189 static int hclge_ae_start(struct hnae3_handle *handle)
6191 struct hclge_vport *vport = hclge_get_vport(handle);
6192 struct hclge_dev *hdev = vport->back;
6195 hclge_cfg_mac_mode(hdev, true);
6196 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6197 hdev->hw.mac.link = 0;
6199 /* reset tqp stats */
6200 hclge_reset_tqp_stats(handle);
6202 hclge_mac_start_phy(hdev);
6207 static void hclge_ae_stop(struct hnae3_handle *handle)
6209 struct hclge_vport *vport = hclge_get_vport(handle);
6210 struct hclge_dev *hdev = vport->back;
6213 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6215 hclge_clear_arfs_rules(handle);
6217 /* If it is not PF reset, the firmware will disable the MAC,
6218 * so it only need to stop phy here.
6220 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6221 hdev->reset_type != HNAE3_FUNC_RESET) {
6222 hclge_mac_stop_phy(hdev);
6226 for (i = 0; i < handle->kinfo.num_tqps; i++)
6227 hclge_reset_tqp(handle, i);
6230 hclge_cfg_mac_mode(hdev, false);
6232 hclge_mac_stop_phy(hdev);
6234 /* reset tqp stats */
6235 hclge_reset_tqp_stats(handle);
6236 hclge_update_link_status(hdev);
6239 int hclge_vport_start(struct hclge_vport *vport)
6241 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6242 vport->last_active_jiffies = jiffies;
6246 void hclge_vport_stop(struct hclge_vport *vport)
6248 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6251 static int hclge_client_start(struct hnae3_handle *handle)
6253 struct hclge_vport *vport = hclge_get_vport(handle);
6255 return hclge_vport_start(vport);
6258 static void hclge_client_stop(struct hnae3_handle *handle)
6260 struct hclge_vport *vport = hclge_get_vport(handle);
6262 hclge_vport_stop(vport);
6265 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6266 u16 cmdq_resp, u8 resp_code,
6267 enum hclge_mac_vlan_tbl_opcode op)
6269 struct hclge_dev *hdev = vport->back;
6270 int return_status = -EIO;
6273 dev_err(&hdev->pdev->dev,
6274 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6279 if (op == HCLGE_MAC_VLAN_ADD) {
6280 if ((!resp_code) || (resp_code == 1)) {
6282 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6283 return_status = -ENOSPC;
6284 dev_err(&hdev->pdev->dev,
6285 "add mac addr failed for uc_overflow.\n");
6286 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6287 return_status = -ENOSPC;
6288 dev_err(&hdev->pdev->dev,
6289 "add mac addr failed for mc_overflow.\n");
6291 dev_err(&hdev->pdev->dev,
6292 "add mac addr failed for undefined, code=%d.\n",
6295 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6298 } else if (resp_code == 1) {
6299 return_status = -ENOENT;
6300 dev_dbg(&hdev->pdev->dev,
6301 "remove mac addr failed for miss.\n");
6303 dev_err(&hdev->pdev->dev,
6304 "remove mac addr failed for undefined, code=%d.\n",
6307 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6310 } else if (resp_code == 1) {
6311 return_status = -ENOENT;
6312 dev_dbg(&hdev->pdev->dev,
6313 "lookup mac addr failed for miss.\n");
6315 dev_err(&hdev->pdev->dev,
6316 "lookup mac addr failed for undefined, code=%d.\n",
6320 return_status = -EINVAL;
6321 dev_err(&hdev->pdev->dev,
6322 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6326 return return_status;
6329 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6331 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6333 unsigned int word_num;
6334 unsigned int bit_num;
6336 if (vfid > 255 || vfid < 0)
6339 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6340 word_num = vfid / 32;
6341 bit_num = vfid % 32;
6343 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6345 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6347 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6348 bit_num = vfid % 32;
6350 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6352 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6358 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6360 #define HCLGE_DESC_NUMBER 3
6361 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6364 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6365 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6366 if (desc[i].data[j])
6372 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6373 const u8 *addr, bool is_mc)
6375 const unsigned char *mac_addr = addr;
6376 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6377 (mac_addr[0]) | (mac_addr[1] << 8);
6378 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6380 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6382 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6383 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6386 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6387 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6390 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6391 struct hclge_mac_vlan_tbl_entry_cmd *req)
6393 struct hclge_dev *hdev = vport->back;
6394 struct hclge_desc desc;
6399 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6401 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6403 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6405 dev_err(&hdev->pdev->dev,
6406 "del mac addr failed for cmd_send, ret =%d.\n",
6410 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6411 retval = le16_to_cpu(desc.retval);
6413 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6414 HCLGE_MAC_VLAN_REMOVE);
6417 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6418 struct hclge_mac_vlan_tbl_entry_cmd *req,
6419 struct hclge_desc *desc,
6422 struct hclge_dev *hdev = vport->back;
6427 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6429 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6430 memcpy(desc[0].data,
6432 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6433 hclge_cmd_setup_basic_desc(&desc[1],
6434 HCLGE_OPC_MAC_VLAN_ADD,
6436 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6437 hclge_cmd_setup_basic_desc(&desc[2],
6438 HCLGE_OPC_MAC_VLAN_ADD,
6440 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6442 memcpy(desc[0].data,
6444 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6445 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6448 dev_err(&hdev->pdev->dev,
6449 "lookup mac addr failed for cmd_send, ret =%d.\n",
6453 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6454 retval = le16_to_cpu(desc[0].retval);
6456 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6457 HCLGE_MAC_VLAN_LKUP);
6460 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6461 struct hclge_mac_vlan_tbl_entry_cmd *req,
6462 struct hclge_desc *mc_desc)
6464 struct hclge_dev *hdev = vport->back;
6471 struct hclge_desc desc;
6473 hclge_cmd_setup_basic_desc(&desc,
6474 HCLGE_OPC_MAC_VLAN_ADD,
6476 memcpy(desc.data, req,
6477 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6478 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6479 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6480 retval = le16_to_cpu(desc.retval);
6482 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6484 HCLGE_MAC_VLAN_ADD);
6486 hclge_cmd_reuse_desc(&mc_desc[0], false);
6487 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6488 hclge_cmd_reuse_desc(&mc_desc[1], false);
6489 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6490 hclge_cmd_reuse_desc(&mc_desc[2], false);
6491 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6492 memcpy(mc_desc[0].data, req,
6493 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6494 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6495 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6496 retval = le16_to_cpu(mc_desc[0].retval);
6498 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6500 HCLGE_MAC_VLAN_ADD);
6504 dev_err(&hdev->pdev->dev,
6505 "add mac addr failed for cmd_send, ret =%d.\n",
6513 static int hclge_init_umv_space(struct hclge_dev *hdev)
6515 u16 allocated_size = 0;
6518 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6523 if (allocated_size < hdev->wanted_umv_size)
6524 dev_warn(&hdev->pdev->dev,
6525 "Alloc umv space failed, want %d, get %d\n",
6526 hdev->wanted_umv_size, allocated_size);
6528 mutex_init(&hdev->umv_mutex);
6529 hdev->max_umv_size = allocated_size;
6530 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6531 * preserve some unicast mac vlan table entries shared by pf
6534 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6535 hdev->share_umv_size = hdev->priv_umv_size +
6536 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6541 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6545 if (hdev->max_umv_size > 0) {
6546 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6550 hdev->max_umv_size = 0;
6552 mutex_destroy(&hdev->umv_mutex);
6557 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6558 u16 *allocated_size, bool is_alloc)
6560 struct hclge_umv_spc_alc_cmd *req;
6561 struct hclge_desc desc;
6564 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6565 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6567 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6569 req->space_size = cpu_to_le32(space_size);
6571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6573 dev_err(&hdev->pdev->dev,
6574 "%s umv space failed for cmd_send, ret =%d\n",
6575 is_alloc ? "allocate" : "free", ret);
6579 if (is_alloc && allocated_size)
6580 *allocated_size = le32_to_cpu(desc.data[1]);
6585 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6587 struct hclge_vport *vport;
6590 for (i = 0; i < hdev->num_alloc_vport; i++) {
6591 vport = &hdev->vport[i];
6592 vport->used_umv_num = 0;
6595 mutex_lock(&hdev->umv_mutex);
6596 hdev->share_umv_size = hdev->priv_umv_size +
6597 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6598 mutex_unlock(&hdev->umv_mutex);
6601 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6603 struct hclge_dev *hdev = vport->back;
6606 mutex_lock(&hdev->umv_mutex);
6607 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6608 hdev->share_umv_size == 0);
6609 mutex_unlock(&hdev->umv_mutex);
6614 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6616 struct hclge_dev *hdev = vport->back;
6618 mutex_lock(&hdev->umv_mutex);
6620 if (vport->used_umv_num > hdev->priv_umv_size)
6621 hdev->share_umv_size++;
6623 if (vport->used_umv_num > 0)
6624 vport->used_umv_num--;
6626 if (vport->used_umv_num >= hdev->priv_umv_size &&
6627 hdev->share_umv_size > 0)
6628 hdev->share_umv_size--;
6629 vport->used_umv_num++;
6631 mutex_unlock(&hdev->umv_mutex);
6634 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6635 const unsigned char *addr)
6637 struct hclge_vport *vport = hclge_get_vport(handle);
6639 return hclge_add_uc_addr_common(vport, addr);
6642 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6643 const unsigned char *addr)
6645 struct hclge_dev *hdev = vport->back;
6646 struct hclge_mac_vlan_tbl_entry_cmd req;
6647 struct hclge_desc desc;
6648 u16 egress_port = 0;
6651 /* mac addr check */
6652 if (is_zero_ether_addr(addr) ||
6653 is_broadcast_ether_addr(addr) ||
6654 is_multicast_ether_addr(addr)) {
6655 dev_err(&hdev->pdev->dev,
6656 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6657 addr, is_zero_ether_addr(addr),
6658 is_broadcast_ether_addr(addr),
6659 is_multicast_ether_addr(addr));
6663 memset(&req, 0, sizeof(req));
6665 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6666 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6668 req.egress_port = cpu_to_le16(egress_port);
6670 hclge_prepare_mac_addr(&req, addr, false);
6672 /* Lookup the mac address in the mac_vlan table, and add
6673 * it if the entry is inexistent. Repeated unicast entry
6674 * is not allowed in the mac vlan table.
6676 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6677 if (ret == -ENOENT) {
6678 if (!hclge_is_umv_space_full(vport)) {
6679 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6681 hclge_update_umv_space(vport, false);
6685 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6686 hdev->priv_umv_size);
6691 /* check if we just hit the duplicate */
6693 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6694 vport->vport_id, addr);
6698 dev_err(&hdev->pdev->dev,
6699 "PF failed to add unicast entry(%pM) in the MAC table\n",
6705 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6706 const unsigned char *addr)
6708 struct hclge_vport *vport = hclge_get_vport(handle);
6710 return hclge_rm_uc_addr_common(vport, addr);
6713 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6714 const unsigned char *addr)
6716 struct hclge_dev *hdev = vport->back;
6717 struct hclge_mac_vlan_tbl_entry_cmd req;
6720 /* mac addr check */
6721 if (is_zero_ether_addr(addr) ||
6722 is_broadcast_ether_addr(addr) ||
6723 is_multicast_ether_addr(addr)) {
6724 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6729 memset(&req, 0, sizeof(req));
6730 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6731 hclge_prepare_mac_addr(&req, addr, false);
6732 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6734 hclge_update_umv_space(vport, true);
6739 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6740 const unsigned char *addr)
6742 struct hclge_vport *vport = hclge_get_vport(handle);
6744 return hclge_add_mc_addr_common(vport, addr);
6747 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6748 const unsigned char *addr)
6750 struct hclge_dev *hdev = vport->back;
6751 struct hclge_mac_vlan_tbl_entry_cmd req;
6752 struct hclge_desc desc[3];
6755 /* mac addr check */
6756 if (!is_multicast_ether_addr(addr)) {
6757 dev_err(&hdev->pdev->dev,
6758 "Add mc mac err! invalid mac:%pM.\n",
6762 memset(&req, 0, sizeof(req));
6763 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6764 hclge_prepare_mac_addr(&req, addr, true);
6765 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6767 /* This mac addr do not exist, add new entry for it */
6768 memset(desc[0].data, 0, sizeof(desc[0].data));
6769 memset(desc[1].data, 0, sizeof(desc[0].data));
6770 memset(desc[2].data, 0, sizeof(desc[0].data));
6772 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6775 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6777 if (status == -ENOSPC)
6778 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6783 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6784 const unsigned char *addr)
6786 struct hclge_vport *vport = hclge_get_vport(handle);
6788 return hclge_rm_mc_addr_common(vport, addr);
6791 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6792 const unsigned char *addr)
6794 struct hclge_dev *hdev = vport->back;
6795 struct hclge_mac_vlan_tbl_entry_cmd req;
6796 enum hclge_cmd_status status;
6797 struct hclge_desc desc[3];
6799 /* mac addr check */
6800 if (!is_multicast_ether_addr(addr)) {
6801 dev_dbg(&hdev->pdev->dev,
6802 "Remove mc mac err! invalid mac:%pM.\n",
6807 memset(&req, 0, sizeof(req));
6808 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6809 hclge_prepare_mac_addr(&req, addr, true);
6810 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6812 /* This mac addr exist, remove this handle's VFID for it */
6813 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6817 if (hclge_is_all_function_id_zero(desc))
6818 /* All the vfid is zero, so need to delete this entry */
6819 status = hclge_remove_mac_vlan_tbl(vport, &req);
6821 /* Not all the vfid is zero, update the vfid */
6822 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6825 /* Maybe this mac address is in mta table, but it cannot be
6826 * deleted here because an entry of mta represents an address
6827 * range rather than a specific address. the delete action to
6828 * all entries will take effect in update_mta_status called by
6829 * hns3_nic_set_rx_mode.
6837 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6838 enum HCLGE_MAC_ADDR_TYPE mac_type)
6840 struct hclge_vport_mac_addr_cfg *mac_cfg;
6841 struct list_head *list;
6843 if (!vport->vport_id)
6846 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6850 mac_cfg->hd_tbl_status = true;
6851 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6853 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6854 &vport->uc_mac_list : &vport->mc_mac_list;
6856 list_add_tail(&mac_cfg->node, list);
6859 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6861 enum HCLGE_MAC_ADDR_TYPE mac_type)
6863 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6864 struct list_head *list;
6865 bool uc_flag, mc_flag;
6867 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6868 &vport->uc_mac_list : &vport->mc_mac_list;
6870 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6871 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6873 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6874 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6875 if (uc_flag && mac_cfg->hd_tbl_status)
6876 hclge_rm_uc_addr_common(vport, mac_addr);
6878 if (mc_flag && mac_cfg->hd_tbl_status)
6879 hclge_rm_mc_addr_common(vport, mac_addr);
6881 list_del(&mac_cfg->node);
6888 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6889 enum HCLGE_MAC_ADDR_TYPE mac_type)
6891 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6892 struct list_head *list;
6894 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6895 &vport->uc_mac_list : &vport->mc_mac_list;
6897 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6898 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6899 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6901 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6902 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6904 mac_cfg->hd_tbl_status = false;
6906 list_del(&mac_cfg->node);
6912 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6914 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6915 struct hclge_vport *vport;
6918 mutex_lock(&hdev->vport_cfg_mutex);
6919 for (i = 0; i < hdev->num_alloc_vport; i++) {
6920 vport = &hdev->vport[i];
6921 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6922 list_del(&mac->node);
6926 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6927 list_del(&mac->node);
6931 mutex_unlock(&hdev->vport_cfg_mutex);
6934 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6935 u16 cmdq_resp, u8 resp_code)
6937 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6938 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6939 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6940 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6945 dev_err(&hdev->pdev->dev,
6946 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6951 switch (resp_code) {
6952 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6953 case HCLGE_ETHERTYPE_ALREADY_ADD:
6956 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6957 dev_err(&hdev->pdev->dev,
6958 "add mac ethertype failed for manager table overflow.\n");
6959 return_status = -EIO;
6961 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6962 dev_err(&hdev->pdev->dev,
6963 "add mac ethertype failed for key conflict.\n");
6964 return_status = -EIO;
6967 dev_err(&hdev->pdev->dev,
6968 "add mac ethertype failed for undefined, code=%d.\n",
6970 return_status = -EIO;
6973 return return_status;
6976 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6977 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6979 struct hclge_desc desc;
6984 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6985 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6987 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6989 dev_err(&hdev->pdev->dev,
6990 "add mac ethertype failed for cmd_send, ret =%d.\n",
6995 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6996 retval = le16_to_cpu(desc.retval);
6998 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7001 static int init_mgr_tbl(struct hclge_dev *hdev)
7006 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7007 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7009 dev_err(&hdev->pdev->dev,
7010 "add mac ethertype failed, ret =%d.\n",
7019 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7021 struct hclge_vport *vport = hclge_get_vport(handle);
7022 struct hclge_dev *hdev = vport->back;
7024 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7027 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7030 const unsigned char *new_addr = (const unsigned char *)p;
7031 struct hclge_vport *vport = hclge_get_vport(handle);
7032 struct hclge_dev *hdev = vport->back;
7035 /* mac addr check */
7036 if (is_zero_ether_addr(new_addr) ||
7037 is_broadcast_ether_addr(new_addr) ||
7038 is_multicast_ether_addr(new_addr)) {
7039 dev_err(&hdev->pdev->dev,
7040 "Change uc mac err! invalid mac:%p.\n",
7045 if ((!is_first || is_kdump_kernel()) &&
7046 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7047 dev_warn(&hdev->pdev->dev,
7048 "remove old uc mac address fail.\n");
7050 ret = hclge_add_uc_addr(handle, new_addr);
7052 dev_err(&hdev->pdev->dev,
7053 "add uc mac address fail, ret =%d.\n",
7057 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7058 dev_err(&hdev->pdev->dev,
7059 "restore uc mac address fail.\n");
7064 ret = hclge_pause_addr_cfg(hdev, new_addr);
7066 dev_err(&hdev->pdev->dev,
7067 "configure mac pause address fail, ret =%d.\n",
7072 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7077 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7080 struct hclge_vport *vport = hclge_get_vport(handle);
7081 struct hclge_dev *hdev = vport->back;
7083 if (!hdev->hw.mac.phydev)
7086 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7089 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7090 u8 fe_type, bool filter_en, u8 vf_id)
7092 struct hclge_vlan_filter_ctrl_cmd *req;
7093 struct hclge_desc desc;
7096 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7098 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7099 req->vlan_type = vlan_type;
7100 req->vlan_fe = filter_en ? fe_type : 0;
7103 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7105 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7111 #define HCLGE_FILTER_TYPE_VF 0
7112 #define HCLGE_FILTER_TYPE_PORT 1
7113 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7114 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7115 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7116 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7117 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7118 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7119 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7120 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7121 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7123 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7125 struct hclge_vport *vport = hclge_get_vport(handle);
7126 struct hclge_dev *hdev = vport->back;
7128 if (hdev->pdev->revision >= 0x21) {
7129 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7130 HCLGE_FILTER_FE_EGRESS, enable, 0);
7131 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7132 HCLGE_FILTER_FE_INGRESS, enable, 0);
7134 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7135 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7139 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7141 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7144 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7145 bool is_kill, u16 vlan, u8 qos,
7148 #define HCLGE_MAX_VF_BYTES 16
7149 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7150 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7151 struct hclge_desc desc[2];
7156 /* if vf vlan table is full, firmware will close vf vlan filter, it
7157 * is unable and unnecessary to add new vlan id to vf vlan filter
7159 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7162 hclge_cmd_setup_basic_desc(&desc[0],
7163 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7164 hclge_cmd_setup_basic_desc(&desc[1],
7165 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7167 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7169 vf_byte_off = vfid / 8;
7170 vf_byte_val = 1 << (vfid % 8);
7172 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7173 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7175 req0->vlan_id = cpu_to_le16(vlan);
7176 req0->vlan_cfg = is_kill;
7178 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7179 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7181 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7183 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7185 dev_err(&hdev->pdev->dev,
7186 "Send vf vlan command fail, ret =%d.\n",
7192 #define HCLGE_VF_VLAN_NO_ENTRY 2
7193 if (!req0->resp_code || req0->resp_code == 1)
7196 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7197 set_bit(vfid, hdev->vf_vlan_full);
7198 dev_warn(&hdev->pdev->dev,
7199 "vf vlan table is full, vf vlan filter is disabled\n");
7203 dev_err(&hdev->pdev->dev,
7204 "Add vf vlan filter fail, ret =%d.\n",
7207 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7208 if (!req0->resp_code)
7211 /* vf vlan filter is disabled when vf vlan table is full,
7212 * then new vlan id will not be added into vf vlan table.
7213 * Just return 0 without warning, avoid massive verbose
7214 * print logs when unload.
7216 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7219 dev_err(&hdev->pdev->dev,
7220 "Kill vf vlan filter fail, ret =%d.\n",
7227 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7228 u16 vlan_id, bool is_kill)
7230 struct hclge_vlan_filter_pf_cfg_cmd *req;
7231 struct hclge_desc desc;
7232 u8 vlan_offset_byte_val;
7233 u8 vlan_offset_byte;
7237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7239 vlan_offset_160 = vlan_id / 160;
7240 vlan_offset_byte = (vlan_id % 160) / 8;
7241 vlan_offset_byte_val = 1 << (vlan_id % 8);
7243 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7244 req->vlan_offset = vlan_offset_160;
7245 req->vlan_cfg = is_kill;
7246 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7248 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7250 dev_err(&hdev->pdev->dev,
7251 "port vlan command, send fail, ret =%d.\n", ret);
7255 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7256 u16 vport_id, u16 vlan_id, u8 qos,
7259 u16 vport_idx, vport_num = 0;
7262 if (is_kill && !vlan_id)
7265 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7268 dev_err(&hdev->pdev->dev,
7269 "Set %d vport vlan filter config fail, ret =%d.\n",
7274 /* vlan 0 may be added twice when 8021q module is enabled */
7275 if (!is_kill && !vlan_id &&
7276 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7279 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7280 dev_err(&hdev->pdev->dev,
7281 "Add port vlan failed, vport %d is already in vlan %d\n",
7287 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7288 dev_err(&hdev->pdev->dev,
7289 "Delete port vlan failed, vport %d is not in vlan %d\n",
7294 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7297 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7298 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7304 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7306 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7307 struct hclge_vport_vtag_tx_cfg_cmd *req;
7308 struct hclge_dev *hdev = vport->back;
7309 struct hclge_desc desc;
7312 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7314 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7315 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7316 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7317 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7318 vcfg->accept_tag1 ? 1 : 0);
7319 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7320 vcfg->accept_untag1 ? 1 : 0);
7321 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7322 vcfg->accept_tag2 ? 1 : 0);
7323 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7324 vcfg->accept_untag2 ? 1 : 0);
7325 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7326 vcfg->insert_tag1_en ? 1 : 0);
7327 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7328 vcfg->insert_tag2_en ? 1 : 0);
7329 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7331 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7332 req->vf_bitmap[req->vf_offset] =
7333 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7335 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7337 dev_err(&hdev->pdev->dev,
7338 "Send port txvlan cfg command fail, ret =%d\n",
7344 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7346 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7347 struct hclge_vport_vtag_rx_cfg_cmd *req;
7348 struct hclge_dev *hdev = vport->back;
7349 struct hclge_desc desc;
7352 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7354 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7355 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7356 vcfg->strip_tag1_en ? 1 : 0);
7357 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7358 vcfg->strip_tag2_en ? 1 : 0);
7359 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7360 vcfg->vlan1_vlan_prionly ? 1 : 0);
7361 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7362 vcfg->vlan2_vlan_prionly ? 1 : 0);
7364 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7365 req->vf_bitmap[req->vf_offset] =
7366 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7368 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7370 dev_err(&hdev->pdev->dev,
7371 "Send port rxvlan cfg command fail, ret =%d\n",
7377 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7378 u16 port_base_vlan_state,
7383 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7384 vport->txvlan_cfg.accept_tag1 = true;
7385 vport->txvlan_cfg.insert_tag1_en = false;
7386 vport->txvlan_cfg.default_tag1 = 0;
7388 vport->txvlan_cfg.accept_tag1 = false;
7389 vport->txvlan_cfg.insert_tag1_en = true;
7390 vport->txvlan_cfg.default_tag1 = vlan_tag;
7393 vport->txvlan_cfg.accept_untag1 = true;
7395 /* accept_tag2 and accept_untag2 are not supported on
7396 * pdev revision(0x20), new revision support them,
7397 * this two fields can not be configured by user.
7399 vport->txvlan_cfg.accept_tag2 = true;
7400 vport->txvlan_cfg.accept_untag2 = true;
7401 vport->txvlan_cfg.insert_tag2_en = false;
7402 vport->txvlan_cfg.default_tag2 = 0;
7404 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7405 vport->rxvlan_cfg.strip_tag1_en = false;
7406 vport->rxvlan_cfg.strip_tag2_en =
7407 vport->rxvlan_cfg.rx_vlan_offload_en;
7409 vport->rxvlan_cfg.strip_tag1_en =
7410 vport->rxvlan_cfg.rx_vlan_offload_en;
7411 vport->rxvlan_cfg.strip_tag2_en = true;
7413 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7414 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7416 ret = hclge_set_vlan_tx_offload_cfg(vport);
7420 return hclge_set_vlan_rx_offload_cfg(vport);
7423 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7425 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7426 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7427 struct hclge_desc desc;
7430 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7431 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7432 rx_req->ot_fst_vlan_type =
7433 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7434 rx_req->ot_sec_vlan_type =
7435 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7436 rx_req->in_fst_vlan_type =
7437 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7438 rx_req->in_sec_vlan_type =
7439 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7441 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7443 dev_err(&hdev->pdev->dev,
7444 "Send rxvlan protocol type command fail, ret =%d\n",
7449 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7451 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7452 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7453 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7455 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7457 dev_err(&hdev->pdev->dev,
7458 "Send txvlan protocol type command fail, ret =%d\n",
7464 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7466 #define HCLGE_DEF_VLAN_TYPE 0x8100
7468 struct hnae3_handle *handle = &hdev->vport[0].nic;
7469 struct hclge_vport *vport;
7473 if (hdev->pdev->revision >= 0x21) {
7474 /* for revision 0x21, vf vlan filter is per function */
7475 for (i = 0; i < hdev->num_alloc_vport; i++) {
7476 vport = &hdev->vport[i];
7477 ret = hclge_set_vlan_filter_ctrl(hdev,
7478 HCLGE_FILTER_TYPE_VF,
7479 HCLGE_FILTER_FE_EGRESS,
7486 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7487 HCLGE_FILTER_FE_INGRESS, true,
7492 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7493 HCLGE_FILTER_FE_EGRESS_V1_B,
7499 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7501 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7502 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7503 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7504 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7505 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7506 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7508 ret = hclge_set_vlan_protocol_type(hdev);
7512 for (i = 0; i < hdev->num_alloc_vport; i++) {
7515 vport = &hdev->vport[i];
7516 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7518 ret = hclge_vlan_offload_cfg(vport,
7519 vport->port_base_vlan_cfg.state,
7525 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7528 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7531 struct hclge_vport_vlan_cfg *vlan;
7533 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7537 vlan->hd_tbl_status = writen_to_tbl;
7538 vlan->vlan_id = vlan_id;
7540 list_add_tail(&vlan->node, &vport->vlan_list);
7543 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7545 struct hclge_vport_vlan_cfg *vlan, *tmp;
7546 struct hclge_dev *hdev = vport->back;
7549 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7550 if (!vlan->hd_tbl_status) {
7551 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7553 vlan->vlan_id, 0, false);
7555 dev_err(&hdev->pdev->dev,
7556 "restore vport vlan list failed, ret=%d\n",
7561 vlan->hd_tbl_status = true;
7567 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7570 struct hclge_vport_vlan_cfg *vlan, *tmp;
7571 struct hclge_dev *hdev = vport->back;
7573 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7574 if (vlan->vlan_id == vlan_id) {
7575 if (is_write_tbl && vlan->hd_tbl_status)
7576 hclge_set_vlan_filter_hw(hdev,
7582 list_del(&vlan->node);
7589 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7591 struct hclge_vport_vlan_cfg *vlan, *tmp;
7592 struct hclge_dev *hdev = vport->back;
7594 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7595 if (vlan->hd_tbl_status)
7596 hclge_set_vlan_filter_hw(hdev,
7602 vlan->hd_tbl_status = false;
7604 list_del(&vlan->node);
7610 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7612 struct hclge_vport_vlan_cfg *vlan, *tmp;
7613 struct hclge_vport *vport;
7616 mutex_lock(&hdev->vport_cfg_mutex);
7617 for (i = 0; i < hdev->num_alloc_vport; i++) {
7618 vport = &hdev->vport[i];
7619 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7620 list_del(&vlan->node);
7624 mutex_unlock(&hdev->vport_cfg_mutex);
7627 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7629 struct hclge_vport *vport = hclge_get_vport(handle);
7630 struct hclge_vport_vlan_cfg *vlan, *tmp;
7631 struct hclge_dev *hdev = vport->back;
7632 u16 vlan_proto, qos;
7636 mutex_lock(&hdev->vport_cfg_mutex);
7637 for (i = 0; i < hdev->num_alloc_vport; i++) {
7638 vport = &hdev->vport[i];
7639 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7640 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7641 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7642 state = vport->port_base_vlan_cfg.state;
7644 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7645 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7646 vport->vport_id, vlan_id, qos,
7651 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7652 if (vlan->hd_tbl_status)
7653 hclge_set_vlan_filter_hw(hdev,
7661 mutex_unlock(&hdev->vport_cfg_mutex);
7664 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7666 struct hclge_vport *vport = hclge_get_vport(handle);
7668 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7669 vport->rxvlan_cfg.strip_tag1_en = false;
7670 vport->rxvlan_cfg.strip_tag2_en = enable;
7672 vport->rxvlan_cfg.strip_tag1_en = enable;
7673 vport->rxvlan_cfg.strip_tag2_en = true;
7675 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7676 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7677 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7679 return hclge_set_vlan_rx_offload_cfg(vport);
7682 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7683 u16 port_base_vlan_state,
7684 struct hclge_vlan_info *new_info,
7685 struct hclge_vlan_info *old_info)
7687 struct hclge_dev *hdev = vport->back;
7690 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7691 hclge_rm_vport_all_vlan_table(vport, false);
7692 return hclge_set_vlan_filter_hw(hdev,
7693 htons(new_info->vlan_proto),
7696 new_info->qos, false);
7699 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7700 vport->vport_id, old_info->vlan_tag,
7701 old_info->qos, true);
7705 return hclge_add_vport_all_vlan_table(vport);
7708 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7709 struct hclge_vlan_info *vlan_info)
7711 struct hnae3_handle *nic = &vport->nic;
7712 struct hclge_vlan_info *old_vlan_info;
7713 struct hclge_dev *hdev = vport->back;
7716 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7718 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7722 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7723 /* add new VLAN tag */
7724 ret = hclge_set_vlan_filter_hw(hdev,
7725 htons(vlan_info->vlan_proto),
7727 vlan_info->vlan_tag,
7728 vlan_info->qos, false);
7732 /* remove old VLAN tag */
7733 ret = hclge_set_vlan_filter_hw(hdev,
7734 htons(old_vlan_info->vlan_proto),
7736 old_vlan_info->vlan_tag,
7737 old_vlan_info->qos, true);
7744 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7749 /* update state only when disable/enable port based VLAN */
7750 vport->port_base_vlan_cfg.state = state;
7751 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7752 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7754 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7757 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7758 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7759 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7764 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7765 enum hnae3_port_base_vlan_state state,
7768 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7770 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7772 return HNAE3_PORT_BASE_VLAN_ENABLE;
7775 return HNAE3_PORT_BASE_VLAN_DISABLE;
7776 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7777 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7779 return HNAE3_PORT_BASE_VLAN_MODIFY;
7783 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7784 u16 vlan, u8 qos, __be16 proto)
7786 struct hclge_vport *vport = hclge_get_vport(handle);
7787 struct hclge_dev *hdev = vport->back;
7788 struct hclge_vlan_info vlan_info;
7792 if (hdev->pdev->revision == 0x20)
7795 /* qos is a 3 bits value, so can not be bigger than 7 */
7796 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7798 if (proto != htons(ETH_P_8021Q))
7799 return -EPROTONOSUPPORT;
7801 vport = &hdev->vport[vfid];
7802 state = hclge_get_port_base_vlan_state(vport,
7803 vport->port_base_vlan_cfg.state,
7805 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7808 vlan_info.vlan_tag = vlan;
7809 vlan_info.qos = qos;
7810 vlan_info.vlan_proto = ntohs(proto);
7812 /* update port based VLAN for PF */
7814 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7815 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7816 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7821 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7822 return hclge_update_port_base_vlan_cfg(vport, state,
7825 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7833 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7834 u16 vlan_id, bool is_kill)
7836 struct hclge_vport *vport = hclge_get_vport(handle);
7837 struct hclge_dev *hdev = vport->back;
7838 bool writen_to_tbl = false;
7841 /* When device is resetting, firmware is unable to handle
7842 * mailbox. Just record the vlan id, and remove it after
7845 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
7846 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7850 /* When port base vlan enabled, we use port base vlan as the vlan
7851 * filter entry. In this case, we don't update vlan filter table
7852 * when user add new vlan or remove exist vlan, just update the vport
7853 * vlan list. The vlan id in vlan list will be writen in vlan filter
7854 * table until port base vlan disabled
7856 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7857 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7858 vlan_id, 0, is_kill);
7859 writen_to_tbl = true;
7864 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7866 hclge_add_vport_vlan_table(vport, vlan_id,
7868 } else if (is_kill) {
7869 /* When remove hw vlan filter failed, record the vlan id,
7870 * and try to remove it from hw later, to be consistence
7873 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7878 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
7880 #define HCLGE_MAX_SYNC_COUNT 60
7882 int i, ret, sync_cnt = 0;
7885 /* start from vport 1 for PF is always alive */
7886 for (i = 0; i < hdev->num_alloc_vport; i++) {
7887 struct hclge_vport *vport = &hdev->vport[i];
7889 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7891 while (vlan_id != VLAN_N_VID) {
7892 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7893 vport->vport_id, vlan_id,
7895 if (ret && ret != -EINVAL)
7898 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
7899 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7902 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
7905 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7911 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7913 struct hclge_config_max_frm_size_cmd *req;
7914 struct hclge_desc desc;
7916 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7918 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7919 req->max_frm_size = cpu_to_le16(new_mps);
7920 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7922 return hclge_cmd_send(&hdev->hw, &desc, 1);
7925 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7927 struct hclge_vport *vport = hclge_get_vport(handle);
7929 return hclge_set_vport_mtu(vport, new_mtu);
7932 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7934 struct hclge_dev *hdev = vport->back;
7935 int i, max_frm_size, ret;
7937 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7938 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7939 max_frm_size > HCLGE_MAC_MAX_FRAME)
7942 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7943 mutex_lock(&hdev->vport_lock);
7944 /* VF's mps must fit within hdev->mps */
7945 if (vport->vport_id && max_frm_size > hdev->mps) {
7946 mutex_unlock(&hdev->vport_lock);
7948 } else if (vport->vport_id) {
7949 vport->mps = max_frm_size;
7950 mutex_unlock(&hdev->vport_lock);
7954 /* PF's mps must be greater then VF's mps */
7955 for (i = 1; i < hdev->num_alloc_vport; i++)
7956 if (max_frm_size < hdev->vport[i].mps) {
7957 mutex_unlock(&hdev->vport_lock);
7961 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7963 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7965 dev_err(&hdev->pdev->dev,
7966 "Change mtu fail, ret =%d\n", ret);
7970 hdev->mps = max_frm_size;
7971 vport->mps = max_frm_size;
7973 ret = hclge_buffer_alloc(hdev);
7975 dev_err(&hdev->pdev->dev,
7976 "Allocate buffer fail, ret =%d\n", ret);
7979 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7980 mutex_unlock(&hdev->vport_lock);
7984 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7987 struct hclge_reset_tqp_queue_cmd *req;
7988 struct hclge_desc desc;
7991 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7993 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7994 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7996 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
7998 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8000 dev_err(&hdev->pdev->dev,
8001 "Send tqp reset cmd error, status =%d\n", ret);
8008 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8010 struct hclge_reset_tqp_queue_cmd *req;
8011 struct hclge_desc desc;
8014 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8016 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8017 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8019 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8021 dev_err(&hdev->pdev->dev,
8022 "Get reset status error, status =%d\n", ret);
8026 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8029 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8031 struct hnae3_queue *queue;
8032 struct hclge_tqp *tqp;
8034 queue = handle->kinfo.tqp[queue_id];
8035 tqp = container_of(queue, struct hclge_tqp, q);
8040 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8042 struct hclge_vport *vport = hclge_get_vport(handle);
8043 struct hclge_dev *hdev = vport->back;
8044 int reset_try_times = 0;
8049 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8051 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8053 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8057 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8059 dev_err(&hdev->pdev->dev,
8060 "Send reset tqp cmd fail, ret = %d\n", ret);
8064 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8065 /* Wait for tqp hw reset */
8067 reset_status = hclge_get_reset_status(hdev, queue_gid);
8072 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8073 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8077 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8079 dev_err(&hdev->pdev->dev,
8080 "Deassert the soft reset fail, ret = %d\n", ret);
8085 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8087 struct hclge_dev *hdev = vport->back;
8088 int reset_try_times = 0;
8093 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8095 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8097 dev_warn(&hdev->pdev->dev,
8098 "Send reset tqp cmd fail, ret = %d\n", ret);
8102 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8103 /* Wait for tqp hw reset */
8105 reset_status = hclge_get_reset_status(hdev, queue_gid);
8110 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8111 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8115 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8117 dev_warn(&hdev->pdev->dev,
8118 "Deassert the soft reset fail, ret = %d\n", ret);
8121 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8123 struct hclge_vport *vport = hclge_get_vport(handle);
8124 struct hclge_dev *hdev = vport->back;
8126 return hdev->fw_version;
8129 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8131 struct phy_device *phydev = hdev->hw.mac.phydev;
8136 phy_set_asym_pause(phydev, rx_en, tx_en);
8139 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8144 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8145 else if (rx_en && !tx_en)
8146 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8147 else if (!rx_en && tx_en)
8148 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8150 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8152 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8155 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8157 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8162 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8167 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8169 struct phy_device *phydev = hdev->hw.mac.phydev;
8170 u16 remote_advertising = 0;
8171 u16 local_advertising;
8172 u32 rx_pause, tx_pause;
8175 if (!phydev->link || !phydev->autoneg)
8178 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8181 remote_advertising = LPA_PAUSE_CAP;
8183 if (phydev->asym_pause)
8184 remote_advertising |= LPA_PAUSE_ASYM;
8186 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8187 remote_advertising);
8188 tx_pause = flowctl & FLOW_CTRL_TX;
8189 rx_pause = flowctl & FLOW_CTRL_RX;
8191 if (phydev->duplex == HCLGE_MAC_HALF) {
8196 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8199 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8200 u32 *rx_en, u32 *tx_en)
8202 struct hclge_vport *vport = hclge_get_vport(handle);
8203 struct hclge_dev *hdev = vport->back;
8204 struct phy_device *phydev = hdev->hw.mac.phydev;
8206 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8208 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8214 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8217 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8220 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8229 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8230 u32 rx_en, u32 tx_en)
8232 struct hclge_vport *vport = hclge_get_vport(handle);
8233 struct hclge_dev *hdev = vport->back;
8234 struct phy_device *phydev = hdev->hw.mac.phydev;
8238 fc_autoneg = hclge_get_autoneg(handle);
8239 if (auto_neg != fc_autoneg) {
8240 dev_info(&hdev->pdev->dev,
8241 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8246 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8247 dev_info(&hdev->pdev->dev,
8248 "Priority flow control enabled. Cannot set link flow control.\n");
8252 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8255 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8258 return phy_start_aneg(phydev);
8263 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8264 u8 *auto_neg, u32 *speed, u8 *duplex)
8266 struct hclge_vport *vport = hclge_get_vport(handle);
8267 struct hclge_dev *hdev = vport->back;
8270 *speed = hdev->hw.mac.speed;
8272 *duplex = hdev->hw.mac.duplex;
8274 *auto_neg = hdev->hw.mac.autoneg;
8277 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8280 struct hclge_vport *vport = hclge_get_vport(handle);
8281 struct hclge_dev *hdev = vport->back;
8284 *media_type = hdev->hw.mac.media_type;
8287 *module_type = hdev->hw.mac.module_type;
8290 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8291 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8293 struct hclge_vport *vport = hclge_get_vport(handle);
8294 struct hclge_dev *hdev = vport->back;
8295 struct phy_device *phydev = hdev->hw.mac.phydev;
8296 int mdix_ctrl, mdix, is_resolved;
8297 unsigned int retval;
8300 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8301 *tp_mdix = ETH_TP_MDI_INVALID;
8305 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8307 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8308 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8309 HCLGE_PHY_MDIX_CTRL_S);
8311 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8312 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8313 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8315 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8317 switch (mdix_ctrl) {
8319 *tp_mdix_ctrl = ETH_TP_MDI;
8322 *tp_mdix_ctrl = ETH_TP_MDI_X;
8325 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8328 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8333 *tp_mdix = ETH_TP_MDI_INVALID;
8335 *tp_mdix = ETH_TP_MDI_X;
8337 *tp_mdix = ETH_TP_MDI;
8340 static void hclge_info_show(struct hclge_dev *hdev)
8342 struct device *dev = &hdev->pdev->dev;
8344 dev_info(dev, "PF info begin:\n");
8346 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8347 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8348 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8349 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8350 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8351 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8352 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8353 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8354 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8355 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8356 dev_info(dev, "This is %s PF\n",
8357 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8358 dev_info(dev, "DCB %s\n",
8359 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8360 dev_info(dev, "MQPRIO %s\n",
8361 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8363 dev_info(dev, "PF info end.\n");
8366 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8367 struct hclge_vport *vport)
8369 struct hnae3_client *client = vport->nic.client;
8370 struct hclge_dev *hdev = ae_dev->priv;
8374 rst_cnt = hdev->rst_stats.reset_cnt;
8375 ret = client->ops->init_instance(&vport->nic);
8379 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8380 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8381 rst_cnt != hdev->rst_stats.reset_cnt) {
8386 /* Enable nic hw error interrupts */
8387 ret = hclge_config_nic_hw_error(hdev, true);
8389 dev_err(&ae_dev->pdev->dev,
8390 "fail(%d) to enable hw error interrupts\n", ret);
8394 hnae3_set_client_init_flag(client, ae_dev, 1);
8396 if (netif_msg_drv(&hdev->vport->nic))
8397 hclge_info_show(hdev);
8402 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8403 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8404 msleep(HCLGE_WAIT_RESET_DONE);
8406 client->ops->uninit_instance(&vport->nic, 0);
8411 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8412 struct hclge_vport *vport)
8414 struct hnae3_client *client = vport->roce.client;
8415 struct hclge_dev *hdev = ae_dev->priv;
8419 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8423 client = hdev->roce_client;
8424 ret = hclge_init_roce_base_info(vport);
8428 rst_cnt = hdev->rst_stats.reset_cnt;
8429 ret = client->ops->init_instance(&vport->roce);
8433 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8434 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8435 rst_cnt != hdev->rst_stats.reset_cnt) {
8440 /* Enable roce ras interrupts */
8441 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8443 dev_err(&ae_dev->pdev->dev,
8444 "fail(%d) to enable roce ras interrupts\n", ret);
8448 hnae3_set_client_init_flag(client, ae_dev, 1);
8453 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8454 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8455 msleep(HCLGE_WAIT_RESET_DONE);
8457 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8462 static int hclge_init_client_instance(struct hnae3_client *client,
8463 struct hnae3_ae_dev *ae_dev)
8465 struct hclge_dev *hdev = ae_dev->priv;
8466 struct hclge_vport *vport;
8469 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8470 vport = &hdev->vport[i];
8472 switch (client->type) {
8473 case HNAE3_CLIENT_KNIC:
8475 hdev->nic_client = client;
8476 vport->nic.client = client;
8477 ret = hclge_init_nic_client_instance(ae_dev, vport);
8481 ret = hclge_init_roce_client_instance(ae_dev, vport);
8486 case HNAE3_CLIENT_ROCE:
8487 if (hnae3_dev_roce_supported(hdev)) {
8488 hdev->roce_client = client;
8489 vport->roce.client = client;
8492 ret = hclge_init_roce_client_instance(ae_dev, vport);
8505 hdev->nic_client = NULL;
8506 vport->nic.client = NULL;
8509 hdev->roce_client = NULL;
8510 vport->roce.client = NULL;
8514 static void hclge_uninit_client_instance(struct hnae3_client *client,
8515 struct hnae3_ae_dev *ae_dev)
8517 struct hclge_dev *hdev = ae_dev->priv;
8518 struct hclge_vport *vport;
8521 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8522 vport = &hdev->vport[i];
8523 if (hdev->roce_client) {
8524 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8525 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8526 msleep(HCLGE_WAIT_RESET_DONE);
8528 hdev->roce_client->ops->uninit_instance(&vport->roce,
8530 hdev->roce_client = NULL;
8531 vport->roce.client = NULL;
8533 if (client->type == HNAE3_CLIENT_ROCE)
8535 if (hdev->nic_client && client->ops->uninit_instance) {
8536 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8537 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8538 msleep(HCLGE_WAIT_RESET_DONE);
8540 client->ops->uninit_instance(&vport->nic, 0);
8541 hdev->nic_client = NULL;
8542 vport->nic.client = NULL;
8547 static int hclge_pci_init(struct hclge_dev *hdev)
8549 struct pci_dev *pdev = hdev->pdev;
8550 struct hclge_hw *hw;
8553 ret = pci_enable_device(pdev);
8555 dev_err(&pdev->dev, "failed to enable PCI device\n");
8559 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8561 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8564 "can't set consistent PCI DMA");
8565 goto err_disable_device;
8567 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8570 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8572 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8573 goto err_disable_device;
8576 pci_set_master(pdev);
8578 hw->io_base = pcim_iomap(pdev, 2, 0);
8580 dev_err(&pdev->dev, "Can't map configuration register space\n");
8582 goto err_clr_master;
8585 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8589 pci_clear_master(pdev);
8590 pci_release_regions(pdev);
8592 pci_disable_device(pdev);
8597 static void hclge_pci_uninit(struct hclge_dev *hdev)
8599 struct pci_dev *pdev = hdev->pdev;
8601 pcim_iounmap(pdev, hdev->hw.io_base);
8602 pci_free_irq_vectors(pdev);
8603 pci_clear_master(pdev);
8604 pci_release_mem_regions(pdev);
8605 pci_disable_device(pdev);
8608 static void hclge_state_init(struct hclge_dev *hdev)
8610 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8611 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8612 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8613 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8614 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8615 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8618 static void hclge_state_uninit(struct hclge_dev *hdev)
8620 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8621 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8623 if (hdev->reset_timer.function)
8624 del_timer_sync(&hdev->reset_timer);
8625 if (hdev->service_task.work.func)
8626 cancel_delayed_work_sync(&hdev->service_task);
8627 if (hdev->rst_service_task.func)
8628 cancel_work_sync(&hdev->rst_service_task);
8629 if (hdev->mbx_service_task.func)
8630 cancel_work_sync(&hdev->mbx_service_task);
8633 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8635 #define HCLGE_FLR_WAIT_MS 100
8636 #define HCLGE_FLR_WAIT_CNT 50
8637 struct hclge_dev *hdev = ae_dev->priv;
8640 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8641 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8642 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8643 hclge_reset_event(hdev->pdev, NULL);
8645 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8646 cnt++ < HCLGE_FLR_WAIT_CNT)
8647 msleep(HCLGE_FLR_WAIT_MS);
8649 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8650 dev_err(&hdev->pdev->dev,
8651 "flr wait down timeout: %d\n", cnt);
8654 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8656 struct hclge_dev *hdev = ae_dev->priv;
8658 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8661 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8665 for (i = 0; i < hdev->num_alloc_vport; i++) {
8666 struct hclge_vport *vport = &hdev->vport[i];
8669 /* Send cmd to clear VF's FUNC_RST_ING */
8670 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8672 dev_warn(&hdev->pdev->dev,
8673 "clear vf(%d) rst failed %d!\n",
8674 vport->vport_id, ret);
8678 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8680 struct pci_dev *pdev = ae_dev->pdev;
8681 struct hclge_dev *hdev;
8684 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8691 hdev->ae_dev = ae_dev;
8692 hdev->reset_type = HNAE3_NONE_RESET;
8693 hdev->reset_level = HNAE3_FUNC_RESET;
8694 ae_dev->priv = hdev;
8695 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8697 mutex_init(&hdev->vport_lock);
8698 mutex_init(&hdev->vport_cfg_mutex);
8699 spin_lock_init(&hdev->fd_rule_lock);
8701 ret = hclge_pci_init(hdev);
8703 dev_err(&pdev->dev, "PCI init failed\n");
8707 /* Firmware command queue initialize */
8708 ret = hclge_cmd_queue_init(hdev);
8710 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8711 goto err_pci_uninit;
8714 /* Firmware command initialize */
8715 ret = hclge_cmd_init(hdev);
8717 goto err_cmd_uninit;
8719 ret = hclge_get_cap(hdev);
8721 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8723 goto err_cmd_uninit;
8726 ret = hclge_configure(hdev);
8728 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8729 goto err_cmd_uninit;
8732 ret = hclge_init_msi(hdev);
8734 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8735 goto err_cmd_uninit;
8738 ret = hclge_misc_irq_init(hdev);
8741 "Misc IRQ(vector0) init error, ret = %d.\n",
8743 goto err_msi_uninit;
8746 ret = hclge_alloc_tqps(hdev);
8748 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8749 goto err_msi_irq_uninit;
8752 ret = hclge_alloc_vport(hdev);
8754 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8755 goto err_msi_irq_uninit;
8758 ret = hclge_map_tqp(hdev);
8760 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8761 goto err_msi_irq_uninit;
8764 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8765 ret = hclge_mac_mdio_config(hdev);
8767 dev_err(&hdev->pdev->dev,
8768 "mdio config fail ret=%d\n", ret);
8769 goto err_msi_irq_uninit;
8773 ret = hclge_init_umv_space(hdev);
8775 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8776 goto err_mdiobus_unreg;
8779 ret = hclge_mac_init(hdev);
8781 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8782 goto err_mdiobus_unreg;
8785 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8787 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8788 goto err_mdiobus_unreg;
8791 ret = hclge_config_gro(hdev, true);
8793 goto err_mdiobus_unreg;
8795 ret = hclge_init_vlan_config(hdev);
8797 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8798 goto err_mdiobus_unreg;
8801 ret = hclge_tm_schd_init(hdev);
8803 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8804 goto err_mdiobus_unreg;
8807 hclge_rss_init_cfg(hdev);
8808 ret = hclge_rss_init_hw(hdev);
8810 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8811 goto err_mdiobus_unreg;
8814 ret = init_mgr_tbl(hdev);
8816 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8817 goto err_mdiobus_unreg;
8820 ret = hclge_init_fd_config(hdev);
8823 "fd table init fail, ret=%d\n", ret);
8824 goto err_mdiobus_unreg;
8827 INIT_KFIFO(hdev->mac_tnl_log);
8829 hclge_dcb_ops_set(hdev);
8831 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8832 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
8833 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8834 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8836 /* Setup affinity after service timer setup because add_timer_on
8837 * is called in affinity notify.
8839 hclge_misc_affinity_setup(hdev);
8841 hclge_clear_all_event_cause(hdev);
8842 hclge_clear_resetting_state(hdev);
8844 /* Log and clear the hw errors those already occurred */
8845 hclge_handle_all_hns_hw_errors(ae_dev);
8847 /* request delayed reset for the error recovery because an immediate
8848 * global reset on a PF affecting pending initialization of other PFs
8850 if (ae_dev->hw_err_reset_req) {
8851 enum hnae3_reset_type reset_level;
8853 reset_level = hclge_get_reset_level(ae_dev,
8854 &ae_dev->hw_err_reset_req);
8855 hclge_set_def_reset_request(ae_dev, reset_level);
8856 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
8859 /* Enable MISC vector(vector0) */
8860 hclge_enable_vector(&hdev->misc_vector, true);
8862 hclge_state_init(hdev);
8863 hdev->last_reset_time = jiffies;
8865 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8869 if (hdev->hw.mac.phydev)
8870 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8872 hclge_misc_irq_uninit(hdev);
8874 pci_free_irq_vectors(pdev);
8876 hclge_cmd_uninit(hdev);
8878 pcim_iounmap(pdev, hdev->hw.io_base);
8879 pci_clear_master(pdev);
8880 pci_release_regions(pdev);
8881 pci_disable_device(pdev);
8886 static void hclge_stats_clear(struct hclge_dev *hdev)
8888 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8891 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8893 struct hclge_vport *vport = hdev->vport;
8896 for (i = 0; i < hdev->num_alloc_vport; i++) {
8897 hclge_vport_stop(vport);
8902 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8904 struct hclge_dev *hdev = ae_dev->priv;
8905 struct pci_dev *pdev = ae_dev->pdev;
8908 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8910 hclge_stats_clear(hdev);
8911 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8912 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8914 ret = hclge_cmd_init(hdev);
8916 dev_err(&pdev->dev, "Cmd queue init failed\n");
8920 ret = hclge_map_tqp(hdev);
8922 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8926 hclge_reset_umv_space(hdev);
8928 ret = hclge_mac_init(hdev);
8930 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8934 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8936 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8940 ret = hclge_config_gro(hdev, true);
8944 ret = hclge_init_vlan_config(hdev);
8946 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8950 ret = hclge_tm_init_hw(hdev, true);
8952 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8956 ret = hclge_rss_init_hw(hdev);
8958 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8962 ret = hclge_init_fd_config(hdev);
8964 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8968 /* Re-enable the hw error interrupts because
8969 * the interrupts get disabled on global reset.
8971 ret = hclge_config_nic_hw_error(hdev, true);
8974 "fail(%d) to re-enable NIC hw error interrupts\n",
8979 if (hdev->roce_client) {
8980 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8983 "fail(%d) to re-enable roce ras interrupts\n",
8989 hclge_reset_vport_state(hdev);
8991 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8997 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8999 struct hclge_dev *hdev = ae_dev->priv;
9000 struct hclge_mac *mac = &hdev->hw.mac;
9002 hclge_misc_affinity_teardown(hdev);
9003 hclge_state_uninit(hdev);
9006 mdiobus_unregister(mac->mdio_bus);
9008 hclge_uninit_umv_space(hdev);
9010 /* Disable MISC vector(vector0) */
9011 hclge_enable_vector(&hdev->misc_vector, false);
9012 synchronize_irq(hdev->misc_vector.vector_irq);
9014 /* Disable all hw interrupts */
9015 hclge_config_mac_tnl_int(hdev, false);
9016 hclge_config_nic_hw_error(hdev, false);
9017 hclge_config_rocee_ras_interrupt(hdev, false);
9019 hclge_cmd_uninit(hdev);
9020 hclge_misc_irq_uninit(hdev);
9021 hclge_pci_uninit(hdev);
9022 mutex_destroy(&hdev->vport_lock);
9023 hclge_uninit_vport_mac_table(hdev);
9024 hclge_uninit_vport_vlan_table(hdev);
9025 mutex_destroy(&hdev->vport_cfg_mutex);
9026 ae_dev->priv = NULL;
9029 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9031 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9032 struct hclge_vport *vport = hclge_get_vport(handle);
9033 struct hclge_dev *hdev = vport->back;
9035 return min_t(u32, hdev->rss_size_max,
9036 vport->alloc_tqps / kinfo->num_tc);
9039 static void hclge_get_channels(struct hnae3_handle *handle,
9040 struct ethtool_channels *ch)
9042 ch->max_combined = hclge_get_max_channels(handle);
9043 ch->other_count = 1;
9045 ch->combined_count = handle->kinfo.rss_size;
9048 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9049 u16 *alloc_tqps, u16 *max_rss_size)
9051 struct hclge_vport *vport = hclge_get_vport(handle);
9052 struct hclge_dev *hdev = vport->back;
9054 *alloc_tqps = vport->alloc_tqps;
9055 *max_rss_size = hdev->rss_size_max;
9058 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9059 bool rxfh_configured)
9061 struct hclge_vport *vport = hclge_get_vport(handle);
9062 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9063 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9064 struct hclge_dev *hdev = vport->back;
9065 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9066 int cur_rss_size = kinfo->rss_size;
9067 int cur_tqps = kinfo->num_tqps;
9068 u16 tc_valid[HCLGE_MAX_TC_NUM];
9074 kinfo->req_rss_size = new_tqps_num;
9076 ret = hclge_tm_vport_map_update(hdev);
9078 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9082 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9083 roundup_size = ilog2(roundup_size);
9084 /* Set the RSS TC mode according to the new RSS size */
9085 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9088 if (!(hdev->hw_tc_map & BIT(i)))
9092 tc_size[i] = roundup_size;
9093 tc_offset[i] = kinfo->rss_size * i;
9095 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9099 /* RSS indirection table has been configuared by user */
9100 if (rxfh_configured)
9103 /* Reinitializes the rss indirect table according to the new RSS size */
9104 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9108 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9109 rss_indir[i] = i % kinfo->rss_size;
9111 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9113 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9120 dev_info(&hdev->pdev->dev,
9121 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9122 cur_rss_size, kinfo->rss_size,
9123 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9128 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9129 u32 *regs_num_64_bit)
9131 struct hclge_desc desc;
9135 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9136 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9138 dev_err(&hdev->pdev->dev,
9139 "Query register number cmd failed, ret = %d.\n", ret);
9143 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9144 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9146 total_num = *regs_num_32_bit + *regs_num_64_bit;
9153 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9156 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9157 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9159 struct hclge_desc *desc;
9160 u32 *reg_val = data;
9170 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9171 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9172 HCLGE_32_BIT_REG_RTN_DATANUM);
9173 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9177 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9178 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9180 dev_err(&hdev->pdev->dev,
9181 "Query 32 bit register cmd failed, ret = %d.\n", ret);
9186 for (i = 0; i < cmd_num; i++) {
9188 desc_data = (__le32 *)(&desc[i].data[0]);
9189 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9191 desc_data = (__le32 *)(&desc[i]);
9192 n = HCLGE_32_BIT_REG_RTN_DATANUM;
9194 for (k = 0; k < n; k++) {
9195 *reg_val++ = le32_to_cpu(*desc_data++);
9207 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9210 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9211 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9213 struct hclge_desc *desc;
9214 u64 *reg_val = data;
9224 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9225 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9226 HCLGE_64_BIT_REG_RTN_DATANUM);
9227 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9231 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9232 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9234 dev_err(&hdev->pdev->dev,
9235 "Query 64 bit register cmd failed, ret = %d.\n", ret);
9240 for (i = 0; i < cmd_num; i++) {
9242 desc_data = (__le64 *)(&desc[i].data[0]);
9243 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9245 desc_data = (__le64 *)(&desc[i]);
9246 n = HCLGE_64_BIT_REG_RTN_DATANUM;
9248 for (k = 0; k < n; k++) {
9249 *reg_val++ = le64_to_cpu(*desc_data++);
9261 #define MAX_SEPARATE_NUM 4
9262 #define SEPARATOR_VALUE 0xFFFFFFFF
9263 #define REG_NUM_PER_LINE 4
9264 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9266 static int hclge_get_regs_len(struct hnae3_handle *handle)
9268 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9269 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9270 struct hclge_vport *vport = hclge_get_vport(handle);
9271 struct hclge_dev *hdev = vport->back;
9272 u32 regs_num_32_bit, regs_num_64_bit;
9275 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9277 dev_err(&hdev->pdev->dev,
9278 "Get register number failed, ret = %d.\n", ret);
9282 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9283 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9284 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9285 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9287 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9288 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9289 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9292 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9295 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9296 struct hclge_vport *vport = hclge_get_vport(handle);
9297 struct hclge_dev *hdev = vport->back;
9298 u32 regs_num_32_bit, regs_num_64_bit;
9299 int i, j, reg_um, separator_num;
9303 *version = hdev->fw_version;
9305 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9307 dev_err(&hdev->pdev->dev,
9308 "Get register number failed, ret = %d.\n", ret);
9312 /* fetching per-PF registers valus from PF PCIe register space */
9313 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9314 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9315 for (i = 0; i < reg_um; i++)
9316 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9317 for (i = 0; i < separator_num; i++)
9318 *reg++ = SEPARATOR_VALUE;
9320 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9321 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9322 for (i = 0; i < reg_um; i++)
9323 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9324 for (i = 0; i < separator_num; i++)
9325 *reg++ = SEPARATOR_VALUE;
9327 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9328 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9329 for (j = 0; j < kinfo->num_tqps; j++) {
9330 for (i = 0; i < reg_um; i++)
9331 *reg++ = hclge_read_dev(&hdev->hw,
9332 ring_reg_addr_list[i] +
9334 for (i = 0; i < separator_num; i++)
9335 *reg++ = SEPARATOR_VALUE;
9338 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9339 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9340 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9341 for (i = 0; i < reg_um; i++)
9342 *reg++ = hclge_read_dev(&hdev->hw,
9343 tqp_intr_reg_addr_list[i] +
9345 for (i = 0; i < separator_num; i++)
9346 *reg++ = SEPARATOR_VALUE;
9349 /* fetching PF common registers values from firmware */
9350 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9352 dev_err(&hdev->pdev->dev,
9353 "Get 32 bit register failed, ret = %d.\n", ret);
9357 reg += regs_num_32_bit;
9358 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9360 dev_err(&hdev->pdev->dev,
9361 "Get 64 bit register failed, ret = %d.\n", ret);
9364 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9366 struct hclge_set_led_state_cmd *req;
9367 struct hclge_desc desc;
9370 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9372 req = (struct hclge_set_led_state_cmd *)desc.data;
9373 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9374 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9376 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9378 dev_err(&hdev->pdev->dev,
9379 "Send set led state cmd error, ret =%d\n", ret);
9384 enum hclge_led_status {
9387 HCLGE_LED_NO_CHANGE = 0xFF,
9390 static int hclge_set_led_id(struct hnae3_handle *handle,
9391 enum ethtool_phys_id_state status)
9393 struct hclge_vport *vport = hclge_get_vport(handle);
9394 struct hclge_dev *hdev = vport->back;
9397 case ETHTOOL_ID_ACTIVE:
9398 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9399 case ETHTOOL_ID_INACTIVE:
9400 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9406 static void hclge_get_link_mode(struct hnae3_handle *handle,
9407 unsigned long *supported,
9408 unsigned long *advertising)
9410 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9411 struct hclge_vport *vport = hclge_get_vport(handle);
9412 struct hclge_dev *hdev = vport->back;
9413 unsigned int idx = 0;
9415 for (; idx < size; idx++) {
9416 supported[idx] = hdev->hw.mac.supported[idx];
9417 advertising[idx] = hdev->hw.mac.advertising[idx];
9421 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9423 struct hclge_vport *vport = hclge_get_vport(handle);
9424 struct hclge_dev *hdev = vport->back;
9426 return hclge_config_gro(hdev, enable);
9429 static const struct hnae3_ae_ops hclge_ops = {
9430 .init_ae_dev = hclge_init_ae_dev,
9431 .uninit_ae_dev = hclge_uninit_ae_dev,
9432 .flr_prepare = hclge_flr_prepare,
9433 .flr_done = hclge_flr_done,
9434 .init_client_instance = hclge_init_client_instance,
9435 .uninit_client_instance = hclge_uninit_client_instance,
9436 .map_ring_to_vector = hclge_map_ring_to_vector,
9437 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9438 .get_vector = hclge_get_vector,
9439 .put_vector = hclge_put_vector,
9440 .set_promisc_mode = hclge_set_promisc_mode,
9441 .set_loopback = hclge_set_loopback,
9442 .start = hclge_ae_start,
9443 .stop = hclge_ae_stop,
9444 .client_start = hclge_client_start,
9445 .client_stop = hclge_client_stop,
9446 .get_status = hclge_get_status,
9447 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9448 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9449 .get_media_type = hclge_get_media_type,
9450 .check_port_speed = hclge_check_port_speed,
9451 .get_fec = hclge_get_fec,
9452 .set_fec = hclge_set_fec,
9453 .get_rss_key_size = hclge_get_rss_key_size,
9454 .get_rss_indir_size = hclge_get_rss_indir_size,
9455 .get_rss = hclge_get_rss,
9456 .set_rss = hclge_set_rss,
9457 .set_rss_tuple = hclge_set_rss_tuple,
9458 .get_rss_tuple = hclge_get_rss_tuple,
9459 .get_tc_size = hclge_get_tc_size,
9460 .get_mac_addr = hclge_get_mac_addr,
9461 .set_mac_addr = hclge_set_mac_addr,
9462 .do_ioctl = hclge_do_ioctl,
9463 .add_uc_addr = hclge_add_uc_addr,
9464 .rm_uc_addr = hclge_rm_uc_addr,
9465 .add_mc_addr = hclge_add_mc_addr,
9466 .rm_mc_addr = hclge_rm_mc_addr,
9467 .set_autoneg = hclge_set_autoneg,
9468 .get_autoneg = hclge_get_autoneg,
9469 .restart_autoneg = hclge_restart_autoneg,
9470 .halt_autoneg = hclge_halt_autoneg,
9471 .get_pauseparam = hclge_get_pauseparam,
9472 .set_pauseparam = hclge_set_pauseparam,
9473 .set_mtu = hclge_set_mtu,
9474 .reset_queue = hclge_reset_tqp,
9475 .get_stats = hclge_get_stats,
9476 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9477 .update_stats = hclge_update_stats,
9478 .get_strings = hclge_get_strings,
9479 .get_sset_count = hclge_get_sset_count,
9480 .get_fw_version = hclge_get_fw_version,
9481 .get_mdix_mode = hclge_get_mdix_mode,
9482 .enable_vlan_filter = hclge_enable_vlan_filter,
9483 .set_vlan_filter = hclge_set_vlan_filter,
9484 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9485 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9486 .reset_event = hclge_reset_event,
9487 .get_reset_level = hclge_get_reset_level,
9488 .set_default_reset_request = hclge_set_def_reset_request,
9489 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9490 .set_channels = hclge_set_channels,
9491 .get_channels = hclge_get_channels,
9492 .get_regs_len = hclge_get_regs_len,
9493 .get_regs = hclge_get_regs,
9494 .set_led_id = hclge_set_led_id,
9495 .get_link_mode = hclge_get_link_mode,
9496 .add_fd_entry = hclge_add_fd_entry,
9497 .del_fd_entry = hclge_del_fd_entry,
9498 .del_all_fd_entries = hclge_del_all_fd_entries,
9499 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9500 .get_fd_rule_info = hclge_get_fd_rule_info,
9501 .get_fd_all_rules = hclge_get_all_rules,
9502 .restore_fd_rules = hclge_restore_fd_entries,
9503 .enable_fd = hclge_enable_fd,
9504 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9505 .dbg_run_cmd = hclge_dbg_run_cmd,
9506 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9507 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9508 .ae_dev_resetting = hclge_ae_dev_resetting,
9509 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9510 .set_gro_en = hclge_gro_en,
9511 .get_global_queue_id = hclge_covert_handle_qid_global,
9512 .set_timer_task = hclge_set_timer_task,
9513 .mac_connect_phy = hclge_mac_connect_phy,
9514 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9515 .restore_vlan_table = hclge_restore_vlan_table,
9518 static struct hnae3_ae_algo ae_algo = {
9520 .pdev_id_table = ae_algo_pci_tbl,
9523 static int hclge_init(void)
9525 pr_info("%s is initializing\n", HCLGE_NAME);
9527 hnae3_register_ae_algo(&ae_algo);
9532 static void hclge_exit(void)
9534 hnae3_unregister_ae_algo(&ae_algo);
9536 module_init(hclge_init);
9537 module_exit(hclge_exit);
9539 MODULE_LICENSE("GPL");
9540 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9541 MODULE_DESCRIPTION("HCLGE Driver");
9542 MODULE_VERSION(HCLGE_MOD_VERSION);