1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
41 static struct hnae3_ae_algo ae_algo;
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51 /* required last entry */
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58 HCLGE_CMDQ_TX_ADDR_H_REG,
59 HCLGE_CMDQ_TX_DEPTH_REG,
60 HCLGE_CMDQ_TX_TAIL_REG,
61 HCLGE_CMDQ_TX_HEAD_REG,
62 HCLGE_CMDQ_RX_ADDR_L_REG,
63 HCLGE_CMDQ_RX_ADDR_H_REG,
64 HCLGE_CMDQ_RX_DEPTH_REG,
65 HCLGE_CMDQ_RX_TAIL_REG,
66 HCLGE_CMDQ_RX_HEAD_REG,
67 HCLGE_VECTOR0_CMDQ_SRC_REG,
68 HCLGE_CMDQ_INTR_STS_REG,
69 HCLGE_CMDQ_INTR_EN_REG,
70 HCLGE_CMDQ_INTR_GEN_REG};
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73 HCLGE_VECTOR0_OTER_EN_REG,
74 HCLGE_MISC_RESET_STS_REG,
75 HCLGE_MISC_VECTOR_INT_STS,
76 HCLGE_GLOBAL_RESET_REG,
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81 HCLGE_RING_RX_ADDR_H_REG,
82 HCLGE_RING_RX_BD_NUM_REG,
83 HCLGE_RING_RX_BD_LENGTH_REG,
84 HCLGE_RING_RX_MERGE_EN_REG,
85 HCLGE_RING_RX_TAIL_REG,
86 HCLGE_RING_RX_HEAD_REG,
87 HCLGE_RING_RX_FBD_NUM_REG,
88 HCLGE_RING_RX_OFFSET_REG,
89 HCLGE_RING_RX_FBD_OFFSET_REG,
90 HCLGE_RING_RX_STASH_REG,
91 HCLGE_RING_RX_BD_ERR_REG,
92 HCLGE_RING_TX_ADDR_L_REG,
93 HCLGE_RING_TX_ADDR_H_REG,
94 HCLGE_RING_TX_BD_NUM_REG,
95 HCLGE_RING_TX_PRIORITY_REG,
97 HCLGE_RING_TX_MERGE_EN_REG,
98 HCLGE_RING_TX_TAIL_REG,
99 HCLGE_RING_TX_HEAD_REG,
100 HCLGE_RING_TX_FBD_NUM_REG,
101 HCLGE_RING_TX_OFFSET_REG,
102 HCLGE_RING_TX_EBD_NUM_REG,
103 HCLGE_RING_TX_EBD_OFFSET_REG,
104 HCLGE_RING_TX_BD_ERR_REG,
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108 HCLGE_TQP_INTR_GL0_REG,
109 HCLGE_TQP_INTR_GL1_REG,
110 HCLGE_TQP_INTR_GL2_REG,
111 HCLGE_TQP_INTR_RL_REG};
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
115 "Serdes serial Loopback test",
116 "Serdes parallel Loopback test",
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121 {"mac_tx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123 {"mac_rx_mac_pause_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125 {"mac_tx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127 {"mac_rx_control_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129 {"mac_tx_pfc_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131 {"mac_tx_pfc_pri0_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133 {"mac_tx_pfc_pri1_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135 {"mac_tx_pfc_pri2_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137 {"mac_tx_pfc_pri3_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139 {"mac_tx_pfc_pri4_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141 {"mac_tx_pfc_pri5_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143 {"mac_tx_pfc_pri6_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145 {"mac_tx_pfc_pri7_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147 {"mac_rx_pfc_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149 {"mac_rx_pfc_pri0_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151 {"mac_rx_pfc_pri1_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153 {"mac_rx_pfc_pri2_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155 {"mac_rx_pfc_pri3_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157 {"mac_rx_pfc_pri4_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159 {"mac_rx_pfc_pri5_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161 {"mac_rx_pfc_pri6_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163 {"mac_rx_pfc_pri7_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165 {"mac_tx_total_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167 {"mac_tx_total_oct_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169 {"mac_tx_good_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171 {"mac_tx_bad_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173 {"mac_tx_good_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175 {"mac_tx_bad_oct_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177 {"mac_tx_uni_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179 {"mac_tx_multi_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181 {"mac_tx_broad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183 {"mac_tx_undersize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185 {"mac_tx_oversize_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187 {"mac_tx_64_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189 {"mac_tx_65_127_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191 {"mac_tx_128_255_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193 {"mac_tx_256_511_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195 {"mac_tx_512_1023_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197 {"mac_tx_1024_1518_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199 {"mac_tx_1519_2047_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201 {"mac_tx_2048_4095_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203 {"mac_tx_4096_8191_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205 {"mac_tx_8192_9216_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207 {"mac_tx_9217_12287_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209 {"mac_tx_12288_16383_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211 {"mac_tx_1519_max_good_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213 {"mac_tx_1519_max_bad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215 {"mac_rx_total_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217 {"mac_rx_total_oct_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219 {"mac_rx_good_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221 {"mac_rx_bad_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223 {"mac_rx_good_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225 {"mac_rx_bad_oct_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227 {"mac_rx_uni_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229 {"mac_rx_multi_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231 {"mac_rx_broad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233 {"mac_rx_undersize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235 {"mac_rx_oversize_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237 {"mac_rx_64_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239 {"mac_rx_65_127_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241 {"mac_rx_128_255_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243 {"mac_rx_256_511_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245 {"mac_rx_512_1023_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247 {"mac_rx_1024_1518_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249 {"mac_rx_1519_2047_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251 {"mac_rx_2048_4095_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253 {"mac_rx_4096_8191_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255 {"mac_rx_8192_9216_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257 {"mac_rx_9217_12287_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259 {"mac_rx_12288_16383_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261 {"mac_rx_1519_max_good_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263 {"mac_rx_1519_max_bad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
266 {"mac_tx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268 {"mac_tx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270 {"mac_tx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272 {"mac_tx_err_all_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274 {"mac_tx_from_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276 {"mac_tx_from_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278 {"mac_rx_fragment_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280 {"mac_rx_undermin_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282 {"mac_rx_jabber_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284 {"mac_rx_fcs_err_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286 {"mac_rx_send_app_good_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288 {"mac_rx_send_app_bad_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
294 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298 .i_port_bitmap = 0x1,
302 static const u8 hclge_hash_key[] = {
303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
312 #define HCLGE_MAC_CMD_NUM 21
314 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
323 dev_err(&hdev->pdev->dev,
324 "Get MAC pkt stats fail, status = %d.\n", ret);
329 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330 /* for special opcode 0032, only the first desc has the head */
331 if (unlikely(i == 0)) {
332 desc_data = (__le64 *)(&desc[i].data[0]);
333 n = HCLGE_RD_FIRST_STATS_NUM;
335 desc_data = (__le64 *)(&desc[i]);
336 n = HCLGE_RD_OTHER_STATS_NUM;
339 for (k = 0; k < n; k++) {
340 *data += le64_to_cpu(*desc_data);
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
351 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352 struct hclge_desc *desc;
357 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
360 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
367 for (i = 0; i < desc_num; i++) {
368 /* for special opcode 0034, only the first desc has the head */
370 desc_data = (__le64 *)(&desc[i].data[0]);
371 n = HCLGE_RD_FIRST_STATS_NUM;
373 desc_data = (__le64 *)(&desc[i]);
374 n = HCLGE_RD_OTHER_STATS_NUM;
377 for (k = 0; k < n; k++) {
378 *data += le64_to_cpu(*desc_data);
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
391 struct hclge_desc desc;
396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
401 desc_data = (__le32 *)(&desc.data[0]);
402 reg_num = le32_to_cpu(*desc_data);
404 *desc_num = 1 + ((reg_num - 3) >> 2) +
405 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 ret = hclge_mac_query_reg_num(hdev, &desc_num);
417 /* The firmware supports the new statistics acquisition method */
419 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420 else if (ret == -EOPNOTSUPP)
421 ret = hclge_mac_update_stats_defective(hdev);
423 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
430 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431 struct hclge_vport *vport = hclge_get_vport(handle);
432 struct hclge_dev *hdev = vport->back;
433 struct hnae3_queue *queue;
434 struct hclge_desc desc[1];
435 struct hclge_tqp *tqp;
438 for (i = 0; i < kinfo->num_tqps; i++) {
439 queue = handle->kinfo.tqp[i];
440 tqp = container_of(queue, struct hclge_tqp, q);
441 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442 hclge_cmd_setup_basic_desc(&desc[0],
443 HCLGE_OPC_QUERY_RX_STATUS,
446 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447 ret = hclge_cmd_send(&hdev->hw, desc, 1);
449 dev_err(&hdev->pdev->dev,
450 "Query tqp stat fail, status = %d,queue = %d\n",
454 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455 le32_to_cpu(desc[0].data[1]);
458 for (i = 0; i < kinfo->num_tqps; i++) {
459 queue = handle->kinfo.tqp[i];
460 tqp = container_of(queue, struct hclge_tqp, q);
461 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462 hclge_cmd_setup_basic_desc(&desc[0],
463 HCLGE_OPC_QUERY_TX_STATUS,
466 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467 ret = hclge_cmd_send(&hdev->hw, desc, 1);
469 dev_err(&hdev->pdev->dev,
470 "Query tqp stat fail, status = %d,queue = %d\n",
474 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475 le32_to_cpu(desc[0].data[1]);
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
483 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484 struct hclge_tqp *tqp;
488 for (i = 0; i < kinfo->num_tqps; i++) {
489 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
493 for (i = 0; i < kinfo->num_tqps; i++) {
494 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
505 return kinfo->num_tqps * (2);
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
510 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 for (i = 0; i < kinfo->num_tqps; i++) {
515 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516 struct hclge_tqp, q);
517 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
519 buff = buff + ETH_GSTRING_LEN;
522 for (i = 0; i < kinfo->num_tqps; i++) {
523 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524 struct hclge_tqp, q);
525 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
527 buff = buff + ETH_GSTRING_LEN;
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534 const struct hclge_comm_stats_str strs[],
540 for (i = 0; i < size; i++)
541 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546 static u8 *hclge_comm_get_strings(u32 stringset,
547 const struct hclge_comm_stats_str strs[],
550 char *buff = (char *)data;
553 if (stringset != ETH_SS_STATS)
556 for (i = 0; i < size; i++) {
557 snprintf(buff, ETH_GSTRING_LEN,
559 buff = buff + ETH_GSTRING_LEN;
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
567 struct hnae3_handle *handle;
570 handle = &hdev->vport[0].nic;
571 if (handle->client) {
572 status = hclge_tqps_update_stats(handle);
574 dev_err(&hdev->pdev->dev,
575 "Update TQPS stats fail, status = %d.\n",
580 status = hclge_mac_update_stats(hdev);
582 dev_err(&hdev->pdev->dev,
583 "Update MAC stats fail, status = %d.\n", status);
586 static void hclge_update_stats(struct hnae3_handle *handle,
587 struct net_device_stats *net_stats)
589 struct hclge_vport *vport = hclge_get_vport(handle);
590 struct hclge_dev *hdev = vport->back;
593 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
596 status = hclge_mac_update_stats(hdev);
598 dev_err(&hdev->pdev->dev,
599 "Update MAC stats fail, status = %d.\n",
602 status = hclge_tqps_update_stats(handle);
604 dev_err(&hdev->pdev->dev,
605 "Update TQPS stats fail, status = %d.\n",
608 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614 HNAE3_SUPPORT_PHY_LOOPBACK |\
615 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
618 struct hclge_vport *vport = hclge_get_vport(handle);
619 struct hclge_dev *hdev = vport->back;
622 /* Loopback test support rules:
623 * mac: only GE mode support
624 * serdes: all mac mode will support include GE/XGE/LGE/CGE
625 * phy: only support when phy device exist on board
627 if (stringset == ETH_SS_TEST) {
628 /* clear loopback bit flags at first */
629 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630 if (hdev->pdev->revision >= 0x21 ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
635 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
639 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641 } else if (stringset == ETH_SS_STATS) {
642 count = ARRAY_SIZE(g_mac_stats_string) +
643 hclge_tqps_get_sset_count(handle, stringset);
649 static void hclge_get_strings(struct hnae3_handle *handle,
653 u8 *p = (char *)data;
656 if (stringset == ETH_SS_STATS) {
657 size = ARRAY_SIZE(g_mac_stats_string);
658 p = hclge_comm_get_strings(stringset,
662 p = hclge_tqps_get_strings(handle, p);
663 } else if (stringset == ETH_SS_TEST) {
664 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666 hns3_nic_test_strs[HNAE3_LOOP_APP],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
672 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
678 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
680 p += ETH_GSTRING_LEN;
682 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684 hns3_nic_test_strs[HNAE3_LOOP_PHY],
686 p += ETH_GSTRING_LEN;
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
697 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
699 ARRAY_SIZE(g_mac_stats_string),
701 p = hclge_tqps_get_stats(handle, p);
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
707 struct hclge_vport *vport = hclge_get_vport(handle);
708 struct hclge_dev *hdev = vport->back;
710 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715 struct hclge_func_status_cmd *status)
717 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
720 /* Set the pf to main pf */
721 if (status->pf_state & HCLGE_PF_STATE_MAIN)
722 hdev->flag |= HCLGE_FLAG_MAIN;
724 hdev->flag &= ~HCLGE_FLAG_MAIN;
729 static int hclge_query_function_status(struct hclge_dev *hdev)
731 struct hclge_func_status_cmd *req;
732 struct hclge_desc desc;
736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737 req = (struct hclge_func_status_cmd *)desc.data;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
742 dev_err(&hdev->pdev->dev,
743 "query function status failed %d.\n",
749 /* Check pf reset is done */
752 usleep_range(1000, 2000);
753 } while (timeout++ < 5);
755 ret = hclge_parse_func_status(hdev, req);
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 struct hclge_pf_res_cmd *req;
763 struct hclge_desc desc;
766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769 dev_err(&hdev->pdev->dev,
770 "query pf resource failed %d.\n", ret);
774 req = (struct hclge_pf_res_cmd *)desc.data;
775 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778 if (req->tx_buf_size)
780 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786 if (req->dv_buf_size)
788 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794 if (hnae3_dev_roce_supported(hdev)) {
795 hdev->roce_base_msix_offset =
796 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 /* PF should have NIC vectors and Roce vectors,
803 * NIC vectors are queued before Roce vectors.
805 hdev->num_msi = hdev->num_roce_msi +
806 hdev->roce_base_msix_offset;
809 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
816 static int hclge_parse_speed(int speed_cmd, int *speed)
820 *speed = HCLGE_MAC_SPEED_10M;
823 *speed = HCLGE_MAC_SPEED_100M;
826 *speed = HCLGE_MAC_SPEED_1G;
829 *speed = HCLGE_MAC_SPEED_10G;
832 *speed = HCLGE_MAC_SPEED_25G;
835 *speed = HCLGE_MAC_SPEED_40G;
838 *speed = HCLGE_MAC_SPEED_50G;
841 *speed = HCLGE_MAC_SPEED_100G;
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 struct hclge_vport *vport = hclge_get_vport(handle);
853 struct hclge_dev *hdev = vport->back;
854 u32 speed_ability = hdev->hw.mac.speed_ability;
858 case HCLGE_MAC_SPEED_10M:
859 speed_bit = HCLGE_SUPPORT_10M_BIT;
861 case HCLGE_MAC_SPEED_100M:
862 speed_bit = HCLGE_SUPPORT_100M_BIT;
864 case HCLGE_MAC_SPEED_1G:
865 speed_bit = HCLGE_SUPPORT_1G_BIT;
867 case HCLGE_MAC_SPEED_10G:
868 speed_bit = HCLGE_SUPPORT_10G_BIT;
870 case HCLGE_MAC_SPEED_25G:
871 speed_bit = HCLGE_SUPPORT_25G_BIT;
873 case HCLGE_MAC_SPEED_40G:
874 speed_bit = HCLGE_SUPPORT_40G_BIT;
876 case HCLGE_MAC_SPEED_50G:
877 speed_bit = HCLGE_SUPPORT_50G_BIT;
879 case HCLGE_MAC_SPEED_100G:
880 speed_bit = HCLGE_SUPPORT_100G_BIT;
886 if (speed_bit & speed_ability)
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 switch (mac->speed) {
977 case HCLGE_MAC_SPEED_10G:
978 case HCLGE_MAC_SPEED_40G:
979 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
982 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984 case HCLGE_MAC_SPEED_25G:
985 case HCLGE_MAC_SPEED_50G:
986 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
989 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
992 case HCLGE_MAC_SPEED_100G:
993 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
997 mac->fec_ability = 0;
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1005 struct hclge_mac *mac = &hdev->hw.mac;
1007 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1011 hclge_convert_setting_sr(mac, speed_ability);
1012 hclge_convert_setting_lr(mac, speed_ability);
1013 hclge_convert_setting_cr(mac, speed_ability);
1014 if (hdev->pdev->revision >= 0x21)
1015 hclge_convert_setting_fec(mac);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1025 struct hclge_mac *mac = &hdev->hw.mac;
1027 hclge_convert_setting_kr(mac, speed_ability);
1028 if (hdev->pdev->revision >= 0x21)
1029 hclge_convert_setting_fec(mac);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1038 unsigned long *supported = hdev->hw.mac.supported;
1040 /* default to support all speed for GE port */
1042 speed_ability = HCLGE_SUPPORT_GE;
1044 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1067 u8 media_type = hdev->hw.mac.media_type;
1069 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072 hclge_parse_copper_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074 hclge_parse_backplane_link_mode(hdev, speed_ability);
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1078 struct hclge_cfg_param_cmd *req;
1079 u64 mac_addr_tmp_high;
1083 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1085 /* get the configuration */
1086 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TQP_DESC_N_M,
1093 HCLGE_CFG_TQP_DESC_N_S);
1095 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096 HCLGE_CFG_PHY_ADDR_M,
1097 HCLGE_CFG_PHY_ADDR_S);
1098 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099 HCLGE_CFG_MEDIA_TP_M,
1100 HCLGE_CFG_MEDIA_TP_S);
1101 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102 HCLGE_CFG_RX_BUF_LEN_M,
1103 HCLGE_CFG_RX_BUF_LEN_S);
1104 /* get mac_address */
1105 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107 HCLGE_CFG_MAC_ADDR_H_M,
1108 HCLGE_CFG_MAC_ADDR_H_S);
1110 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1112 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113 HCLGE_CFG_DEFAULT_SPEED_M,
1114 HCLGE_CFG_DEFAULT_SPEED_S);
1115 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116 HCLGE_CFG_RSS_SIZE_M,
1117 HCLGE_CFG_RSS_SIZE_S);
1119 for (i = 0; i < ETH_ALEN; i++)
1120 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1122 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1125 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126 HCLGE_CFG_SPEED_ABILITY_M,
1127 HCLGE_CFG_SPEED_ABILITY_S);
1128 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129 HCLGE_CFG_UMV_TBL_SPACE_M,
1130 HCLGE_CFG_UMV_TBL_SPACE_S);
1131 if (!cfg->umv_space)
1132 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 /* hclge_get_cfg: query the static parameter from flash
1136 * @hdev: pointer to struct hclge_dev
1137 * @hcfg: the config structure to be getted
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1141 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142 struct hclge_cfg_param_cmd *req;
1145 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1148 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1151 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153 /* Len should be united by 4 bytes when send to hardware */
1154 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156 req->offset = cpu_to_le32(offset);
1159 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1161 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 hclge_parse_cfg(hcfg, desc);
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1174 ret = hclge_query_function_status(hdev);
1176 dev_err(&hdev->pdev->dev,
1177 "query function status error %d.\n", ret);
1181 /* get pf resource */
1182 ret = hclge_query_pf_resource(hdev);
1184 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1191 #define HCLGE_MIN_TX_DESC 64
1192 #define HCLGE_MIN_RX_DESC 64
1194 if (!is_kdump_kernel())
1197 dev_info(&hdev->pdev->dev,
1198 "Running kdump kernel. Using minimal resources\n");
1200 /* minimal queue pairs equals to the number of vports */
1201 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1206 static int hclge_configure(struct hclge_dev *hdev)
1208 struct hclge_cfg cfg;
1211 ret = hclge_get_cfg(hdev, &cfg);
1213 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1217 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218 hdev->base_tqp_pid = 0;
1219 hdev->rss_size_max = cfg.rss_size_max;
1220 hdev->rx_buf_len = cfg.rx_buf_len;
1221 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222 hdev->hw.mac.media_type = cfg.media_type;
1223 hdev->hw.mac.phy_addr = cfg.phy_addr;
1224 hdev->num_tx_desc = cfg.tqp_desc_num;
1225 hdev->num_rx_desc = cfg.tqp_desc_num;
1226 hdev->tm_info.num_pg = 1;
1227 hdev->tc_max = cfg.tc_num;
1228 hdev->tm_info.hw_pfc_map = 0;
1229 hdev->wanted_umv_size = cfg.umv_space;
1231 if (hnae3_dev_fd_supported(hdev)) {
1233 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1236 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1238 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1242 hclge_parse_link_mode(hdev, cfg.speed_ability);
1244 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245 (hdev->tc_max < 1)) {
1246 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 /* Dev does not support DCB */
1252 if (!hnae3_dev_dcb_supported(hdev)) {
1256 hdev->pfc_max = hdev->tc_max;
1259 hdev->tm_info.num_tc = 1;
1261 /* Currently not support uncontiuous tc */
1262 for (i = 0; i < hdev->tm_info.num_tc; i++)
1263 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1265 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1267 hclge_init_kdump_kernel_config(hdev);
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1275 struct hclge_cfg_tso_status_cmd *req;
1276 struct hclge_desc desc;
1279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1281 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1284 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286 req->tso_mss_min = cpu_to_le16(tso_mss);
1289 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291 req->tso_mss_max = cpu_to_le16(tso_mss);
1293 return hclge_cmd_send(&hdev->hw, &desc, 1);
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1298 struct hclge_cfg_gro_status_cmd *req;
1299 struct hclge_desc desc;
1302 if (!hnae3_dev_gro_supported(hdev))
1305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1308 req->gro_en = cpu_to_le16(en ? 1 : 0);
1310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1312 dev_err(&hdev->pdev->dev,
1313 "GRO hardware config cmd failed, ret = %d\n", ret);
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1320 struct hclge_tqp *tqp;
1323 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324 sizeof(struct hclge_tqp), GFP_KERNEL);
1330 for (i = 0; i < hdev->num_tqps; i++) {
1331 tqp->dev = &hdev->pdev->dev;
1334 tqp->q.ae_algo = &ae_algo;
1335 tqp->q.buf_size = hdev->rx_buf_len;
1336 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339 i * HCLGE_TQP_REG_SIZE;
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1350 struct hclge_tqp_map_cmd *req;
1351 struct hclge_desc desc;
1354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1356 req = (struct hclge_tqp_map_cmd *)desc.data;
1357 req->tqp_id = cpu_to_le16(tqp_pid);
1358 req->tqp_vf = func_id;
1359 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360 1 << HCLGE_TQP_MAP_EN_B;
1361 req->tqp_vid = cpu_to_le16(tqp_vid);
1363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1365 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1372 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373 struct hclge_dev *hdev = vport->back;
1376 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377 alloced < num_tqps; i++) {
1378 if (!hdev->htqp[i].alloced) {
1379 hdev->htqp[i].q.handle = &vport->nic;
1380 hdev->htqp[i].q.tqp_index = alloced;
1381 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384 hdev->htqp[i].alloced = true;
1388 vport->alloc_tqps = alloced;
1389 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390 vport->alloc_tqps / hdev->tm_info.num_tc);
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396 u16 num_tx_desc, u16 num_rx_desc)
1399 struct hnae3_handle *nic = &vport->nic;
1400 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401 struct hclge_dev *hdev = vport->back;
1404 kinfo->num_tx_desc = num_tx_desc;
1405 kinfo->num_rx_desc = num_rx_desc;
1407 kinfo->rx_buf_len = hdev->rx_buf_len;
1409 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410 sizeof(struct hnae3_queue *), GFP_KERNEL);
1414 ret = hclge_assign_tqp(vport, num_tqps);
1416 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422 struct hclge_vport *vport)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hnae3_knic_private_info *kinfo;
1428 kinfo = &nic->kinfo;
1429 for (i = 0; i < vport->alloc_tqps; i++) {
1430 struct hclge_tqp *q =
1431 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1435 is_pf = !(vport->vport_id);
1436 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1447 struct hclge_vport *vport = hdev->vport;
1450 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451 for (i = 0; i < num_vport; i++) {
1454 ret = hclge_map_tqp_to_vport(hdev, vport);
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1466 /* this would be initialized later */
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1471 struct hnae3_handle *nic = &vport->nic;
1472 struct hclge_dev *hdev = vport->back;
1475 nic->pdev = hdev->pdev;
1476 nic->ae_algo = &ae_algo;
1477 nic->numa_node_mask = hdev->numa_node_mask;
1479 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480 ret = hclge_knic_setup(vport, num_tqps,
1481 hdev->num_tx_desc, hdev->num_rx_desc);
1484 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1489 hclge_unic_setup(vport, num_tqps);
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1497 struct pci_dev *pdev = hdev->pdev;
1498 struct hclge_vport *vport;
1504 /* We need to alloc a vport for main NIC of PF */
1505 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1507 if (hdev->num_tqps < num_vport) {
1508 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509 hdev->num_tqps, num_vport);
1513 /* Alloc the same number of TQPs for every vport */
1514 tqp_per_vport = hdev->num_tqps / num_vport;
1515 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1517 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1522 hdev->vport = vport;
1523 hdev->num_alloc_vport = num_vport;
1525 if (IS_ENABLED(CONFIG_PCI_IOV))
1526 hdev->num_alloc_vfs = hdev->num_req_vfs;
1528 for (i = 0; i < num_vport; i++) {
1530 vport->vport_id = i;
1531 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534 INIT_LIST_HEAD(&vport->vlan_list);
1535 INIT_LIST_HEAD(&vport->uc_mac_list);
1536 INIT_LIST_HEAD(&vport->mc_mac_list);
1539 ret = hclge_vport_setup(vport, tqp_main_vport);
1541 ret = hclge_vport_setup(vport, tqp_per_vport);
1544 "vport setup failed for vport %d, %d\n",
1555 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556 struct hclge_pkt_buf_alloc *buf_alloc)
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1561 struct hclge_tx_buff_alloc_cmd *req;
1562 struct hclge_desc desc;
1566 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1568 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1572 req->tx_pkt_buff[i] =
1573 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1579 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1591 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1600 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601 if (hdev->hw_tc_map & BIT(i))
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 struct hclge_priv_buf *priv;
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625 struct hclge_pkt_buf_alloc *buf_alloc)
1627 struct hclge_priv_buf *priv;
1630 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631 priv = &buf_alloc->priv_buf[i];
1632 if (hdev->hw_tc_map & BIT(i) &&
1633 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1643 struct hclge_priv_buf *priv;
1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648 priv = &buf_alloc->priv_buf[i];
1650 rx_priv += priv->buf_size;
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1657 u32 i, total_tx_size = 0;
1659 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1662 return total_tx_size;
1665 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666 struct hclge_pkt_buf_alloc *buf_alloc,
1669 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670 u32 tc_num = hclge_get_tc_num(hdev);
1671 u32 shared_buf, aligned_mps;
1675 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1677 if (hnae3_dev_dcb_supported(hdev))
1678 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1680 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681 + hdev->dv_buf_size;
1683 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685 HCLGE_BUF_SIZE_UNIT);
1687 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688 if (rx_all < rx_priv + shared_std)
1691 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692 buf_alloc->s_buf.buf_size = shared_buf;
1693 if (hnae3_dev_dcb_supported(hdev)) {
1694 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1698 buf_alloc->s_buf.self.high = aligned_mps +
1699 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700 buf_alloc->s_buf.self.low = aligned_mps;
1703 if (hnae3_dev_dcb_supported(hdev)) {
1705 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1707 hi_thrd = shared_buf - hdev->dv_buf_size;
1709 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711 lo_thrd = hi_thrd - aligned_mps / 2;
1713 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714 lo_thrd = aligned_mps;
1717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726 struct hclge_pkt_buf_alloc *buf_alloc)
1730 total_size = hdev->pkt_buf_size;
1732 /* alloc tx buffer for all enabled tc */
1733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736 if (hdev->hw_tc_map & BIT(i)) {
1737 if (total_size < hdev->tx_buf_size)
1740 priv->tx_buf_size = hdev->tx_buf_size;
1742 priv->tx_buf_size = 0;
1745 total_size -= priv->tx_buf_size;
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
1754 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1758 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1766 if (!(hdev->hw_tc_map & BIT(i)))
1771 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772 priv->wl.low = max ? aligned_mps : 256;
1773 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774 HCLGE_BUF_SIZE_UNIT);
1777 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1780 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1783 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787 struct hclge_pkt_buf_alloc *buf_alloc)
1789 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1793 /* let the last to be cleared first */
1794 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 if (hdev->hw_tc_map & BIT(i) &&
1798 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799 /* Clear the no pfc TC private buffer */
1807 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808 no_pfc_priv_num == 0)
1812 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816 struct hclge_pkt_buf_alloc *buf_alloc)
1818 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1822 /* let the last to be cleared first */
1823 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1826 if (hdev->hw_tc_map & BIT(i) &&
1827 hdev->tm_info.hw_pfc_map & BIT(i)) {
1828 /* Reduce the number of pfc TC with private buffer */
1836 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1841 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845 * @hdev: pointer to struct hclge_dev
1846 * @buf_alloc: pointer to buffer calculation data
1847 * @return: 0: calculate sucessful, negative: fail
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850 struct hclge_pkt_buf_alloc *buf_alloc)
1852 /* When DCB is not supported, rx private buffer is not allocated. */
1853 if (!hnae3_dev_dcb_supported(hdev)) {
1854 u32 rx_all = hdev->pkt_buf_size;
1856 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1863 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1866 /* try to decrease the buffer size */
1867 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1870 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1873 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1882 struct hclge_rx_priv_buff_cmd *req;
1883 struct hclge_desc desc;
1887 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1890 /* Alloc private buffer TCs */
1891 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1897 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1901 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1906 dev_err(&hdev->pdev->dev,
1907 "rx private buffer alloc cmd failed %d\n", ret);
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913 struct hclge_pkt_buf_alloc *buf_alloc)
1915 struct hclge_rx_priv_wl_buf *req;
1916 struct hclge_priv_buf *priv;
1917 struct hclge_desc desc[2];
1921 for (i = 0; i < 2; i++) {
1922 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1924 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1926 /* The first descriptor set the NEXT bit to 1 */
1928 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1930 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1932 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1935 priv = &buf_alloc->priv_buf[idx];
1936 req->tc_wl[j].high =
1937 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938 req->tc_wl[j].high |=
1939 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1941 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942 req->tc_wl[j].low |=
1943 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1947 /* Send 2 descriptor at one time */
1948 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1950 dev_err(&hdev->pdev->dev,
1951 "rx private waterline config cmd failed %d\n",
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957 struct hclge_pkt_buf_alloc *buf_alloc)
1959 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960 struct hclge_rx_com_thrd *req;
1961 struct hclge_desc desc[2];
1962 struct hclge_tc_thrd *tc;
1966 for (i = 0; i < 2; i++) {
1967 hclge_cmd_setup_basic_desc(&desc[i],
1968 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1971 /* The first descriptor set the NEXT bit to 1 */
1973 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1975 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1977 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1980 req->com_thrd[j].high =
1981 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982 req->com_thrd[j].high |=
1983 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984 req->com_thrd[j].low =
1985 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986 req->com_thrd[j].low |=
1987 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1991 /* Send 2 descriptors at one time */
1992 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1994 dev_err(&hdev->pdev->dev,
1995 "common threshold config cmd failed %d\n", ret);
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000 struct hclge_pkt_buf_alloc *buf_alloc)
2002 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003 struct hclge_rx_com_wl *req;
2004 struct hclge_desc desc;
2007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2009 req = (struct hclge_rx_com_wl *)desc.data;
2010 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2013 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2016 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2018 dev_err(&hdev->pdev->dev,
2019 "common waterline config cmd failed %d\n", ret);
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2026 struct hclge_pkt_buf_alloc *pkt_buf;
2029 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2033 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2035 dev_err(&hdev->pdev->dev,
2036 "could not calc tx buffer size for all TCs %d\n", ret);
2040 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2042 dev_err(&hdev->pdev->dev,
2043 "could not alloc tx buffers %d\n", ret);
2047 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2049 dev_err(&hdev->pdev->dev,
2050 "could not calc rx priv buffer size for all TCs %d\n",
2055 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2057 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2062 if (hnae3_dev_dcb_supported(hdev)) {
2063 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2065 dev_err(&hdev->pdev->dev,
2066 "could not configure rx private waterline %d\n",
2071 ret = hclge_common_thrd_config(hdev, pkt_buf);
2073 dev_err(&hdev->pdev->dev,
2074 "could not configure common threshold %d\n",
2080 ret = hclge_common_wl_config(hdev, pkt_buf);
2082 dev_err(&hdev->pdev->dev,
2083 "could not configure common waterline %d\n", ret);
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2092 struct hnae3_handle *roce = &vport->roce;
2093 struct hnae3_handle *nic = &vport->nic;
2095 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2097 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098 vport->back->num_msi_left == 0)
2101 roce->rinfo.base_vector = vport->back->roce_base_vector;
2103 roce->rinfo.netdev = nic->kinfo.netdev;
2104 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2106 roce->pdev = nic->pdev;
2107 roce->ae_algo = nic->ae_algo;
2108 roce->numa_node_mask = nic->numa_node_mask;
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2115 struct pci_dev *pdev = hdev->pdev;
2119 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2123 "failed(%d) to allocate MSI/MSI-X vectors\n",
2127 if (vectors < hdev->num_msi)
2128 dev_warn(&hdev->pdev->dev,
2129 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130 hdev->num_msi, vectors);
2132 hdev->num_msi = vectors;
2133 hdev->num_msi_left = vectors;
2134 hdev->base_msi_vector = pdev->irq;
2135 hdev->roce_base_vector = hdev->base_msi_vector +
2136 hdev->roce_base_msix_offset;
2138 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139 sizeof(u16), GFP_KERNEL);
2140 if (!hdev->vector_status) {
2141 pci_free_irq_vectors(pdev);
2145 for (i = 0; i < hdev->num_msi; i++)
2146 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2148 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149 sizeof(int), GFP_KERNEL);
2150 if (!hdev->vector_irq) {
2151 pci_free_irq_vectors(pdev);
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2161 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162 duplex = HCLGE_MAC_FULL;
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2170 struct hclge_config_mac_speed_dup_cmd *req;
2171 struct hclge_desc desc;
2174 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2178 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2181 case HCLGE_MAC_SPEED_10M:
2182 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 HCLGE_CFG_SPEED_S, 6);
2185 case HCLGE_MAC_SPEED_100M:
2186 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 HCLGE_CFG_SPEED_S, 7);
2189 case HCLGE_MAC_SPEED_1G:
2190 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191 HCLGE_CFG_SPEED_S, 0);
2193 case HCLGE_MAC_SPEED_10G:
2194 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195 HCLGE_CFG_SPEED_S, 1);
2197 case HCLGE_MAC_SPEED_25G:
2198 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199 HCLGE_CFG_SPEED_S, 2);
2201 case HCLGE_MAC_SPEED_40G:
2202 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203 HCLGE_CFG_SPEED_S, 3);
2205 case HCLGE_MAC_SPEED_50G:
2206 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207 HCLGE_CFG_SPEED_S, 4);
2209 case HCLGE_MAC_SPEED_100G:
2210 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211 HCLGE_CFG_SPEED_S, 5);
2214 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2218 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2223 dev_err(&hdev->pdev->dev,
2224 "mac speed/duplex config cmd failed %d.\n", ret);
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2235 duplex = hclge_check_speed_dup(duplex, speed);
2236 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2239 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2243 hdev->hw.mac.speed = speed;
2244 hdev->hw.mac.duplex = duplex;
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2252 struct hclge_vport *vport = hclge_get_vport(handle);
2253 struct hclge_dev *hdev = vport->back;
2255 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2260 struct hclge_config_auto_neg_cmd *req;
2261 struct hclge_desc desc;
2265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2267 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2273 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2281 struct hclge_vport *vport = hclge_get_vport(handle);
2282 struct hclge_dev *hdev = vport->back;
2284 if (!hdev->hw.mac.support_autoneg) {
2286 dev_err(&hdev->pdev->dev,
2287 "autoneg is not supported by current port\n");
2294 return hclge_set_autoneg_en(hdev, enable);
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2299 struct hclge_vport *vport = hclge_get_vport(handle);
2300 struct hclge_dev *hdev = vport->back;
2301 struct phy_device *phydev = hdev->hw.mac.phydev;
2304 return phydev->autoneg;
2306 return hdev->hw.mac.autoneg;
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2311 struct hclge_vport *vport = hclge_get_vport(handle);
2312 struct hclge_dev *hdev = vport->back;
2315 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2317 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2320 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2325 struct hclge_config_fec_cmd *req;
2326 struct hclge_desc desc;
2329 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2331 req = (struct hclge_config_fec_cmd *)desc.data;
2332 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334 if (fec_mode & BIT(HNAE3_FEC_RS))
2335 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337 if (fec_mode & BIT(HNAE3_FEC_BASER))
2338 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2341 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2343 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2350 struct hclge_vport *vport = hclge_get_vport(handle);
2351 struct hclge_dev *hdev = vport->back;
2352 struct hclge_mac *mac = &hdev->hw.mac;
2355 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2360 ret = hclge_set_fec_hw(hdev, fec_mode);
2364 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2371 struct hclge_vport *vport = hclge_get_vport(handle);
2372 struct hclge_dev *hdev = vport->back;
2373 struct hclge_mac *mac = &hdev->hw.mac;
2376 *fec_ability = mac->fec_ability;
2378 *fec_mode = mac->fec_mode;
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2383 struct hclge_mac *mac = &hdev->hw.mac;
2386 hdev->support_sfp_query = true;
2387 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389 hdev->hw.mac.duplex);
2391 dev_err(&hdev->pdev->dev,
2392 "Config mac speed dup fail ret=%d\n", ret);
2398 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2401 dev_err(&hdev->pdev->dev,
2402 "Fec mode init fail, ret = %d\n", ret);
2407 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2409 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2413 ret = hclge_buffer_alloc(hdev);
2415 dev_err(&hdev->pdev->dev,
2416 "allocate buffer fail, ret=%d\n", ret);
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2423 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425 schedule_work(&hdev->mbx_service_task);
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2430 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431 schedule_work(&hdev->rst_service_task);
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2436 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439 (void)schedule_work(&hdev->service_task);
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2444 struct hclge_link_status_cmd *req;
2445 struct hclge_desc desc;
2449 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2452 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2457 req = (struct hclge_link_status_cmd *)desc.data;
2458 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2460 return !!link_status;
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2468 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2471 mac_state = hclge_get_mac_link_status(hdev);
2473 if (hdev->hw.mac.phydev) {
2474 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475 link_stat = mac_state &
2476 hdev->hw.mac.phydev->link;
2481 link_stat = mac_state;
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2489 struct hnae3_client *rclient = hdev->roce_client;
2490 struct hnae3_client *client = hdev->nic_client;
2491 struct hnae3_handle *rhandle;
2492 struct hnae3_handle *handle;
2498 state = hclge_get_mac_phy_link(hdev);
2499 if (state != hdev->hw.mac.link) {
2500 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501 handle = &hdev->vport[i].nic;
2502 client->ops->link_status_change(handle, state);
2503 hclge_config_mac_tnl_int(hdev, state);
2504 rhandle = &hdev->vport[i].roce;
2505 if (rclient && rclient->ops->link_status_change)
2506 rclient->ops->link_status_change(rhandle,
2509 hdev->hw.mac.link = state;
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2515 /* update fec ability by speed */
2516 hclge_convert_setting_fec(mac);
2518 /* firmware can not identify back plane type, the media type
2519 * read from configuration can help deal it
2521 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525 mac->module_type = HNAE3_MODULE_TYPE_TP;
2527 if (mac->support_autoneg == true) {
2528 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529 linkmode_copy(mac->advertising, mac->supported);
2531 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2533 linkmode_zero(mac->advertising);
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2539 struct hclge_sfp_info_cmd *resp = NULL;
2540 struct hclge_desc desc;
2543 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544 resp = (struct hclge_sfp_info_cmd *)desc.data;
2545 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546 if (ret == -EOPNOTSUPP) {
2547 dev_warn(&hdev->pdev->dev,
2548 "IMP do not support get SFP speed %d\n", ret);
2551 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2555 *speed = le32_to_cpu(resp->speed);
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2562 struct hclge_sfp_info_cmd *resp;
2563 struct hclge_desc desc;
2566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567 resp = (struct hclge_sfp_info_cmd *)desc.data;
2569 resp->query_type = QUERY_ACTIVE_SPEED;
2571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572 if (ret == -EOPNOTSUPP) {
2573 dev_warn(&hdev->pdev->dev,
2574 "IMP does not support get SFP info %d\n", ret);
2577 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2581 mac->speed = le32_to_cpu(resp->speed);
2582 /* if resp->speed_ability is 0, it means it's an old version
2583 * firmware, do not update these params
2585 if (resp->speed_ability) {
2586 mac->module_type = le32_to_cpu(resp->module_type);
2587 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588 mac->autoneg = resp->autoneg;
2589 mac->support_autoneg = resp->autoneg_ability;
2590 if (!resp->active_fec)
2593 mac->fec_mode = BIT(resp->active_fec);
2595 mac->speed_type = QUERY_SFP_SPEED;
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2603 struct hclge_mac *mac = &hdev->hw.mac;
2604 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2607 /* get the port info from SFP cmd if not copper port */
2608 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2611 /* if IMP does not support get SFP/qSFP info, return directly */
2612 if (!hdev->support_sfp_query)
2615 if (hdev->pdev->revision >= 0x21)
2616 ret = hclge_get_sfp_info(hdev, mac);
2618 ret = hclge_get_sfp_speed(hdev, &speed);
2620 if (ret == -EOPNOTSUPP) {
2621 hdev->support_sfp_query = false;
2627 if (hdev->pdev->revision >= 0x21) {
2628 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629 hclge_update_port_capability(mac);
2632 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2635 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636 return 0; /* do nothing if no SFP */
2638 /* must config full duplex for SFP */
2639 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2643 static int hclge_get_status(struct hnae3_handle *handle)
2645 struct hclge_vport *vport = hclge_get_vport(handle);
2646 struct hclge_dev *hdev = vport->back;
2648 hclge_update_link_status(hdev);
2650 return hdev->hw.mac.link;
2653 static void hclge_service_timer(struct timer_list *t)
2655 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2657 mod_timer(&hdev->service_timer, jiffies + HZ);
2658 hdev->hw_stats.stats_timer++;
2659 hdev->fd_arfs_expire_timer++;
2660 hclge_task_schedule(hdev);
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2665 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2667 /* Flush memory before next watchdog */
2668 smp_mb__before_atomic();
2669 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2674 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2676 /* fetch the events from their corresponding regs */
2677 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679 msix_src_reg = hclge_read_dev(&hdev->hw,
2680 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2682 /* Assumption: If by any chance reset and mailbox events are reported
2683 * together then we will only process reset event in this go and will
2684 * defer the processing of the mailbox events. Since, we would have not
2685 * cleared RX CMDQ event this time we would receive again another
2686 * interrupt from H/W just for the mailbox.
2689 /* check for vector0 reset event sources */
2690 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695 hdev->rst_stats.imp_rst_cnt++;
2696 return HCLGE_VECTOR0_EVENT_RST;
2699 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704 hdev->rst_stats.global_rst_cnt++;
2705 return HCLGE_VECTOR0_EVENT_RST;
2708 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713 hdev->rst_stats.core_rst_cnt++;
2714 return HCLGE_VECTOR0_EVENT_RST;
2717 /* check for vector0 msix event source */
2718 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2721 return HCLGE_VECTOR0_EVENT_ERR;
2724 /* check for vector0 mailbox(=CMDQ RX) event source */
2725 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727 *clearval = cmdq_src_reg;
2728 return HCLGE_VECTOR0_EVENT_MBX;
2731 /* print other vector0 event source */
2732 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733 cmdq_src_reg, msix_src_reg);
2734 return HCLGE_VECTOR0_EVENT_OTHER;
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2740 switch (event_type) {
2741 case HCLGE_VECTOR0_EVENT_RST:
2742 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2744 case HCLGE_VECTOR0_EVENT_MBX:
2745 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2754 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2763 writel(enable ? 1 : 0, vector->addr);
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2768 struct hclge_dev *hdev = data;
2772 hclge_enable_vector(&hdev->misc_vector, false);
2773 event_cause = hclge_check_event_cause(hdev, &clearval);
2775 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776 switch (event_cause) {
2777 case HCLGE_VECTOR0_EVENT_ERR:
2778 /* we do not know what type of reset is required now. This could
2779 * only be decided after we fetch the type of errors which
2780 * caused this event. Therefore, we will do below for now:
2781 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782 * have defered type of reset to be used.
2783 * 2. Schedule the reset serivce task.
2784 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2785 * will fetch the correct type of reset. This would be done
2786 * by first decoding the types of errors.
2788 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2790 case HCLGE_VECTOR0_EVENT_RST:
2791 hclge_reset_task_schedule(hdev);
2793 case HCLGE_VECTOR0_EVENT_MBX:
2794 /* If we are here then,
2795 * 1. Either we are not handling any mbx task and we are not
2798 * 2. We could be handling a mbx task but nothing more is
2800 * In both cases, we should schedule mbx task as there are more
2801 * mbx messages reported by this interrupt.
2803 hclge_mbx_task_schedule(hdev);
2806 dev_warn(&hdev->pdev->dev,
2807 "received unknown or unhandled event of vector0\n");
2811 /* clear the source of interrupt if it is not cause by reset */
2812 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813 hclge_clear_event_cause(hdev, event_cause, clearval);
2814 hclge_enable_vector(&hdev->misc_vector, true);
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2822 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823 dev_warn(&hdev->pdev->dev,
2824 "vector(vector_id %d) has been freed.\n", vector_id);
2828 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829 hdev->num_msi_left += 1;
2830 hdev->num_msi_used -= 1;
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2835 struct hclge_misc_vector *vector = &hdev->misc_vector;
2837 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2839 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840 hdev->vector_status[0] = 0;
2842 hdev->num_msi_left -= 1;
2843 hdev->num_msi_used += 1;
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2850 hclge_get_misc_vector(hdev);
2852 /* this would be explicitly freed in the end */
2853 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854 0, "hclge_misc", hdev);
2856 hclge_free_vector(hdev, 0);
2857 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858 hdev->misc_vector.vector_irq);
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2866 free_irq(hdev->misc_vector.vector_irq, hdev);
2867 hclge_free_vector(hdev, 0);
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871 enum hnae3_reset_notify_type type)
2873 struct hnae3_client *client = hdev->nic_client;
2876 if (!client->ops->reset_notify)
2879 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2880 struct hnae3_handle *handle = &hdev->vport[i].nic;
2883 ret = client->ops->reset_notify(handle, type);
2885 dev_err(&hdev->pdev->dev,
2886 "notify nic client failed %d(%d)\n", type, ret);
2894 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2895 enum hnae3_reset_notify_type type)
2897 struct hnae3_client *client = hdev->roce_client;
2904 if (!client->ops->reset_notify)
2907 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2908 struct hnae3_handle *handle = &hdev->vport[i].roce;
2910 ret = client->ops->reset_notify(handle, type);
2912 dev_err(&hdev->pdev->dev,
2913 "notify roce client failed %d(%d)",
2922 static int hclge_reset_wait(struct hclge_dev *hdev)
2924 #define HCLGE_RESET_WATI_MS 100
2925 #define HCLGE_RESET_WAIT_CNT 200
2926 u32 val, reg, reg_bit;
2929 switch (hdev->reset_type) {
2930 case HNAE3_IMP_RESET:
2931 reg = HCLGE_GLOBAL_RESET_REG;
2932 reg_bit = HCLGE_IMP_RESET_BIT;
2934 case HNAE3_GLOBAL_RESET:
2935 reg = HCLGE_GLOBAL_RESET_REG;
2936 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2938 case HNAE3_CORE_RESET:
2939 reg = HCLGE_GLOBAL_RESET_REG;
2940 reg_bit = HCLGE_CORE_RESET_BIT;
2942 case HNAE3_FUNC_RESET:
2943 reg = HCLGE_FUN_RST_ING;
2944 reg_bit = HCLGE_FUN_RST_ING_B;
2946 case HNAE3_FLR_RESET:
2949 dev_err(&hdev->pdev->dev,
2950 "Wait for unsupported reset type: %d\n",
2955 if (hdev->reset_type == HNAE3_FLR_RESET) {
2956 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2957 cnt++ < HCLGE_RESET_WAIT_CNT)
2958 msleep(HCLGE_RESET_WATI_MS);
2960 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2961 dev_err(&hdev->pdev->dev,
2962 "flr wait timeout: %d\n", cnt);
2969 val = hclge_read_dev(&hdev->hw, reg);
2970 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2971 msleep(HCLGE_RESET_WATI_MS);
2972 val = hclge_read_dev(&hdev->hw, reg);
2976 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2977 dev_warn(&hdev->pdev->dev,
2978 "Wait for reset timeout: %d\n", hdev->reset_type);
2985 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2987 struct hclge_vf_rst_cmd *req;
2988 struct hclge_desc desc;
2990 req = (struct hclge_vf_rst_cmd *)desc.data;
2991 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2992 req->dest_vfid = func_id;
2997 return hclge_cmd_send(&hdev->hw, &desc, 1);
3000 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3004 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3005 struct hclge_vport *vport = &hdev->vport[i];
3008 /* Send cmd to set/clear VF's FUNC_RST_ING */
3009 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3011 dev_err(&hdev->pdev->dev,
3012 "set vf(%d) rst failed %d!\n",
3013 vport->vport_id, ret);
3017 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3020 /* Inform VF to process the reset.
3021 * hclge_inform_reset_assert_to_vf may fail if VF
3022 * driver is not loaded.
3024 ret = hclge_inform_reset_assert_to_vf(vport);
3026 dev_warn(&hdev->pdev->dev,
3027 "inform reset to vf(%d) failed %d!\n",
3028 vport->vport_id, ret);
3034 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3036 struct hclge_desc desc;
3037 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3041 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3042 req->fun_reset_vfid = func_id;
3044 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3046 dev_err(&hdev->pdev->dev,
3047 "send function reset cmd fail, status =%d\n", ret);
3052 static void hclge_do_reset(struct hclge_dev *hdev)
3054 struct hnae3_handle *handle = &hdev->vport[0].nic;
3055 struct pci_dev *pdev = hdev->pdev;
3058 if (hclge_get_hw_reset_stat(handle)) {
3059 dev_info(&pdev->dev, "Hardware reset not finish\n");
3060 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3061 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3062 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3066 switch (hdev->reset_type) {
3067 case HNAE3_GLOBAL_RESET:
3068 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3069 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3070 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3071 dev_info(&pdev->dev, "Global Reset requested\n");
3073 case HNAE3_CORE_RESET:
3074 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3075 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3076 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3077 dev_info(&pdev->dev, "Core Reset requested\n");
3079 case HNAE3_FUNC_RESET:
3080 dev_info(&pdev->dev, "PF Reset requested\n");
3081 /* schedule again to check later */
3082 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3083 hclge_reset_task_schedule(hdev);
3085 case HNAE3_FLR_RESET:
3086 dev_info(&pdev->dev, "FLR requested\n");
3087 /* schedule again to check later */
3088 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3089 hclge_reset_task_schedule(hdev);
3092 dev_warn(&pdev->dev,
3093 "Unsupported reset type: %d\n", hdev->reset_type);
3098 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3099 unsigned long *addr)
3101 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3103 /* first, resolve any unknown reset type to the known type(s) */
3104 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3105 /* we will intentionally ignore any errors from this function
3106 * as we will end up in *some* reset request in any case
3108 hclge_handle_hw_msix_error(hdev, addr);
3109 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3110 /* We defered the clearing of the error event which caused
3111 * interrupt since it was not posssible to do that in
3112 * interrupt context (and this is the reason we introduced
3113 * new UNKNOWN reset type). Now, the errors have been
3114 * handled and cleared in hardware we can safely enable
3115 * interrupts. This is an exception to the norm.
3117 hclge_enable_vector(&hdev->misc_vector, true);
3120 /* return the highest priority reset level amongst all */
3121 if (test_bit(HNAE3_IMP_RESET, addr)) {
3122 rst_level = HNAE3_IMP_RESET;
3123 clear_bit(HNAE3_IMP_RESET, addr);
3124 clear_bit(HNAE3_GLOBAL_RESET, addr);
3125 clear_bit(HNAE3_CORE_RESET, addr);
3126 clear_bit(HNAE3_FUNC_RESET, addr);
3127 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3128 rst_level = HNAE3_GLOBAL_RESET;
3129 clear_bit(HNAE3_GLOBAL_RESET, addr);
3130 clear_bit(HNAE3_CORE_RESET, addr);
3131 clear_bit(HNAE3_FUNC_RESET, addr);
3132 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3133 rst_level = HNAE3_CORE_RESET;
3134 clear_bit(HNAE3_CORE_RESET, addr);
3135 clear_bit(HNAE3_FUNC_RESET, addr);
3136 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3137 rst_level = HNAE3_FUNC_RESET;
3138 clear_bit(HNAE3_FUNC_RESET, addr);
3139 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3140 rst_level = HNAE3_FLR_RESET;
3141 clear_bit(HNAE3_FLR_RESET, addr);
3144 if (hdev->reset_type != HNAE3_NONE_RESET &&
3145 rst_level < hdev->reset_type)
3146 return HNAE3_NONE_RESET;
3151 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3155 switch (hdev->reset_type) {
3156 case HNAE3_IMP_RESET:
3157 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3159 case HNAE3_GLOBAL_RESET:
3160 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3162 case HNAE3_CORE_RESET:
3163 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3172 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3173 hclge_enable_vector(&hdev->misc_vector, true);
3176 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3180 switch (hdev->reset_type) {
3181 case HNAE3_FUNC_RESET:
3183 case HNAE3_FLR_RESET:
3184 ret = hclge_set_all_vf_rst(hdev, true);
3193 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3198 switch (hdev->reset_type) {
3199 case HNAE3_FUNC_RESET:
3200 /* There is no mechanism for PF to know if VF has stopped IO
3201 * for now, just wait 100 ms for VF to stop IO
3204 ret = hclge_func_reset_cmd(hdev, 0);
3206 dev_err(&hdev->pdev->dev,
3207 "asserting function reset fail %d!\n", ret);
3211 /* After performaning pf reset, it is not necessary to do the
3212 * mailbox handling or send any command to firmware, because
3213 * any mailbox handling or command to firmware is only valid
3214 * after hclge_cmd_init is called.
3216 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3217 hdev->rst_stats.pf_rst_cnt++;
3219 case HNAE3_FLR_RESET:
3220 /* There is no mechanism for PF to know if VF has stopped IO
3221 * for now, just wait 100 ms for VF to stop IO
3224 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3225 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3226 hdev->rst_stats.flr_rst_cnt++;
3228 case HNAE3_IMP_RESET:
3229 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3230 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3231 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3237 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3242 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3244 #define MAX_RESET_FAIL_CNT 5
3245 #define RESET_UPGRADE_DELAY_SEC 10
3247 if (hdev->reset_pending) {
3248 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3249 hdev->reset_pending);
3251 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3252 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3253 BIT(HCLGE_IMP_RESET_BIT))) {
3254 dev_info(&hdev->pdev->dev,
3255 "reset failed because IMP Reset is pending\n");
3256 hclge_clear_reset_cause(hdev);
3258 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3259 hdev->reset_fail_cnt++;
3261 set_bit(hdev->reset_type, &hdev->reset_pending);
3262 dev_info(&hdev->pdev->dev,
3263 "re-schedule to wait for hw reset done\n");
3267 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3268 hclge_clear_reset_cause(hdev);
3269 mod_timer(&hdev->reset_timer,
3270 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3275 hclge_clear_reset_cause(hdev);
3276 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3280 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3284 switch (hdev->reset_type) {
3285 case HNAE3_FUNC_RESET:
3287 case HNAE3_FLR_RESET:
3288 ret = hclge_set_all_vf_rst(hdev, false);
3297 static void hclge_reset(struct hclge_dev *hdev)
3299 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3300 bool is_timeout = false;
3303 /* Initialize ae_dev reset status as well, in case enet layer wants to
3304 * know if device is undergoing reset
3306 ae_dev->reset_type = hdev->reset_type;
3307 hdev->rst_stats.reset_cnt++;
3308 /* perform reset of the stack & ae device for a client */
3309 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3313 ret = hclge_reset_prepare_down(hdev);
3318 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3320 goto err_reset_lock;
3324 ret = hclge_reset_prepare_wait(hdev);
3328 if (hclge_reset_wait(hdev)) {
3333 hdev->rst_stats.hw_reset_done_cnt++;
3335 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3340 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3342 goto err_reset_lock;
3344 ret = hclge_reset_ae_dev(hdev->ae_dev);
3346 goto err_reset_lock;
3348 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3350 goto err_reset_lock;
3352 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3354 goto err_reset_lock;
3356 hclge_clear_reset_cause(hdev);
3358 ret = hclge_reset_prepare_up(hdev);
3360 goto err_reset_lock;
3362 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3364 goto err_reset_lock;
3368 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3372 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3376 hdev->last_reset_time = jiffies;
3377 hdev->reset_fail_cnt = 0;
3378 hdev->rst_stats.reset_done_cnt++;
3379 ae_dev->reset_type = HNAE3_NONE_RESET;
3380 del_timer(&hdev->reset_timer);
3387 if (hclge_reset_err_handle(hdev, is_timeout))
3388 hclge_reset_task_schedule(hdev);
3391 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3393 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3394 struct hclge_dev *hdev = ae_dev->priv;
3396 /* We might end up getting called broadly because of 2 below cases:
3397 * 1. Recoverable error was conveyed through APEI and only way to bring
3398 * normalcy is to reset.
3399 * 2. A new reset request from the stack due to timeout
3401 * For the first case,error event might not have ae handle available.
3402 * check if this is a new reset request and we are not here just because
3403 * last reset attempt did not succeed and watchdog hit us again. We will
3404 * know this if last reset request did not occur very recently (watchdog
3405 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3406 * In case of new request we reset the "reset level" to PF reset.
3407 * And if it is a repeat reset request of the most recent one then we
3408 * want to make sure we throttle the reset request. Therefore, we will
3409 * not allow it again before 3*HZ times.
3412 handle = &hdev->vport[0].nic;
3414 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3416 else if (hdev->default_reset_request)
3418 hclge_get_reset_level(hdev,
3419 &hdev->default_reset_request);
3420 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3421 hdev->reset_level = HNAE3_FUNC_RESET;
3423 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3426 /* request reset & schedule reset task */
3427 set_bit(hdev->reset_level, &hdev->reset_request);
3428 hclge_reset_task_schedule(hdev);
3430 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3431 hdev->reset_level++;
3434 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3435 enum hnae3_reset_type rst_type)
3437 struct hclge_dev *hdev = ae_dev->priv;
3439 set_bit(rst_type, &hdev->default_reset_request);
3442 static void hclge_reset_timer(struct timer_list *t)
3444 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3446 dev_info(&hdev->pdev->dev,
3447 "triggering global reset in reset timer\n");
3448 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3449 hclge_reset_event(hdev->pdev, NULL);
3452 static void hclge_reset_subtask(struct hclge_dev *hdev)
3454 /* check if there is any ongoing reset in the hardware. This status can
3455 * be checked from reset_pending. If there is then, we need to wait for
3456 * hardware to complete reset.
3457 * a. If we are able to figure out in reasonable time that hardware
3458 * has fully resetted then, we can proceed with driver, client
3460 * b. else, we can come back later to check this status so re-sched
3463 hdev->last_reset_time = jiffies;
3464 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3465 if (hdev->reset_type != HNAE3_NONE_RESET)
3468 /* check if we got any *new* reset requests to be honored */
3469 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3470 if (hdev->reset_type != HNAE3_NONE_RESET)
3471 hclge_do_reset(hdev);
3473 hdev->reset_type = HNAE3_NONE_RESET;
3476 static void hclge_reset_service_task(struct work_struct *work)
3478 struct hclge_dev *hdev =
3479 container_of(work, struct hclge_dev, rst_service_task);
3481 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3484 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3486 hclge_reset_subtask(hdev);
3488 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3491 static void hclge_mailbox_service_task(struct work_struct *work)
3493 struct hclge_dev *hdev =
3494 container_of(work, struct hclge_dev, mbx_service_task);
3496 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3499 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3501 hclge_mbx_handler(hdev);
3503 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3506 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3510 /* start from vport 1 for PF is always alive */
3511 for (i = 1; i < hdev->num_alloc_vport; i++) {
3512 struct hclge_vport *vport = &hdev->vport[i];
3514 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3515 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3517 /* If vf is not alive, set to default value */
3518 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3519 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3523 static void hclge_service_task(struct work_struct *work)
3525 struct hclge_dev *hdev =
3526 container_of(work, struct hclge_dev, service_task);
3528 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3529 hclge_update_stats_for_all(hdev);
3530 hdev->hw_stats.stats_timer = 0;
3533 hclge_update_port_info(hdev);
3534 hclge_update_link_status(hdev);
3535 hclge_update_vport_alive(hdev);
3536 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3537 hclge_rfs_filter_expire(hdev);
3538 hdev->fd_arfs_expire_timer = 0;
3540 hclge_service_complete(hdev);
3543 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3545 /* VF handle has no client */
3546 if (!handle->client)
3547 return container_of(handle, struct hclge_vport, nic);
3548 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3549 return container_of(handle, struct hclge_vport, roce);
3551 return container_of(handle, struct hclge_vport, nic);
3554 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3555 struct hnae3_vector_info *vector_info)
3557 struct hclge_vport *vport = hclge_get_vport(handle);
3558 struct hnae3_vector_info *vector = vector_info;
3559 struct hclge_dev *hdev = vport->back;
3563 vector_num = min(hdev->num_msi_left, vector_num);
3565 for (j = 0; j < vector_num; j++) {
3566 for (i = 1; i < hdev->num_msi; i++) {
3567 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3568 vector->vector = pci_irq_vector(hdev->pdev, i);
3569 vector->io_addr = hdev->hw.io_base +
3570 HCLGE_VECTOR_REG_BASE +
3571 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3573 HCLGE_VECTOR_VF_OFFSET;
3574 hdev->vector_status[i] = vport->vport_id;
3575 hdev->vector_irq[i] = vector->vector;
3584 hdev->num_msi_left -= alloc;
3585 hdev->num_msi_used += alloc;
3590 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3594 for (i = 0; i < hdev->num_msi; i++)
3595 if (vector == hdev->vector_irq[i])
3601 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3603 struct hclge_vport *vport = hclge_get_vport(handle);
3604 struct hclge_dev *hdev = vport->back;
3607 vector_id = hclge_get_vector_index(hdev, vector);
3608 if (vector_id < 0) {
3609 dev_err(&hdev->pdev->dev,
3610 "Get vector index fail. vector_id =%d\n", vector_id);
3614 hclge_free_vector(hdev, vector_id);
3619 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3621 return HCLGE_RSS_KEY_SIZE;
3624 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3626 return HCLGE_RSS_IND_TBL_SIZE;
3629 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3630 const u8 hfunc, const u8 *key)
3632 struct hclge_rss_config_cmd *req;
3633 struct hclge_desc desc;
3638 req = (struct hclge_rss_config_cmd *)desc.data;
3640 for (key_offset = 0; key_offset < 3; key_offset++) {
3641 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3644 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3645 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3647 if (key_offset == 2)
3649 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3651 key_size = HCLGE_RSS_HASH_KEY_NUM;
3653 memcpy(req->hash_key,
3654 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3656 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3658 dev_err(&hdev->pdev->dev,
3659 "Configure RSS config fail, status = %d\n",
3667 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3669 struct hclge_rss_indirection_table_cmd *req;
3670 struct hclge_desc desc;
3674 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3676 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3677 hclge_cmd_setup_basic_desc
3678 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3680 req->start_table_index =
3681 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3682 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3684 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3685 req->rss_result[j] =
3686 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3690 dev_err(&hdev->pdev->dev,
3691 "Configure rss indir table fail,status = %d\n",
3699 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3700 u16 *tc_size, u16 *tc_offset)
3702 struct hclge_rss_tc_mode_cmd *req;
3703 struct hclge_desc desc;
3707 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3708 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3710 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3713 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3714 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3715 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3716 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3717 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3719 req->rss_tc_mode[i] = cpu_to_le16(mode);
3722 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3724 dev_err(&hdev->pdev->dev,
3725 "Configure rss tc mode fail, status = %d\n", ret);
3730 static void hclge_get_rss_type(struct hclge_vport *vport)
3732 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3733 vport->rss_tuple_sets.ipv4_udp_en ||
3734 vport->rss_tuple_sets.ipv4_sctp_en ||
3735 vport->rss_tuple_sets.ipv6_tcp_en ||
3736 vport->rss_tuple_sets.ipv6_udp_en ||
3737 vport->rss_tuple_sets.ipv6_sctp_en)
3738 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3739 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3740 vport->rss_tuple_sets.ipv6_fragment_en)
3741 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3743 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3746 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3748 struct hclge_rss_input_tuple_cmd *req;
3749 struct hclge_desc desc;
3752 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3754 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3756 /* Get the tuple cfg from pf */
3757 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3758 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3759 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3760 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3761 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3762 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3763 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3764 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3765 hclge_get_rss_type(&hdev->vport[0]);
3766 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3768 dev_err(&hdev->pdev->dev,
3769 "Configure rss input fail, status = %d\n", ret);
3773 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3776 struct hclge_vport *vport = hclge_get_vport(handle);
3779 /* Get hash algorithm */
3781 switch (vport->rss_algo) {
3782 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3783 *hfunc = ETH_RSS_HASH_TOP;
3785 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3786 *hfunc = ETH_RSS_HASH_XOR;
3789 *hfunc = ETH_RSS_HASH_UNKNOWN;
3794 /* Get the RSS Key required by the user */
3796 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3798 /* Get indirect table */
3800 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3801 indir[i] = vport->rss_indirection_tbl[i];
3806 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3807 const u8 *key, const u8 hfunc)
3809 struct hclge_vport *vport = hclge_get_vport(handle);
3810 struct hclge_dev *hdev = vport->back;
3814 /* Set the RSS Hash Key if specififed by the user */
3817 case ETH_RSS_HASH_TOP:
3818 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3820 case ETH_RSS_HASH_XOR:
3821 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3823 case ETH_RSS_HASH_NO_CHANGE:
3824 hash_algo = vport->rss_algo;
3830 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3834 /* Update the shadow RSS key with user specified qids */
3835 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3836 vport->rss_algo = hash_algo;
3839 /* Update the shadow RSS table with user specified qids */
3840 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3841 vport->rss_indirection_tbl[i] = indir[i];
3843 /* Update the hardware */
3844 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3847 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3849 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3851 if (nfc->data & RXH_L4_B_2_3)
3852 hash_sets |= HCLGE_D_PORT_BIT;
3854 hash_sets &= ~HCLGE_D_PORT_BIT;
3856 if (nfc->data & RXH_IP_SRC)
3857 hash_sets |= HCLGE_S_IP_BIT;
3859 hash_sets &= ~HCLGE_S_IP_BIT;
3861 if (nfc->data & RXH_IP_DST)
3862 hash_sets |= HCLGE_D_IP_BIT;
3864 hash_sets &= ~HCLGE_D_IP_BIT;
3866 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3867 hash_sets |= HCLGE_V_TAG_BIT;
3872 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3873 struct ethtool_rxnfc *nfc)
3875 struct hclge_vport *vport = hclge_get_vport(handle);
3876 struct hclge_dev *hdev = vport->back;
3877 struct hclge_rss_input_tuple_cmd *req;
3878 struct hclge_desc desc;
3882 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3883 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3886 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3887 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3889 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3890 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3891 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3892 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3893 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3894 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3895 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3896 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3898 tuple_sets = hclge_get_rss_hash_bits(nfc);
3899 switch (nfc->flow_type) {
3901 req->ipv4_tcp_en = tuple_sets;
3904 req->ipv6_tcp_en = tuple_sets;
3907 req->ipv4_udp_en = tuple_sets;
3910 req->ipv6_udp_en = tuple_sets;
3913 req->ipv4_sctp_en = tuple_sets;
3916 if ((nfc->data & RXH_L4_B_0_1) ||
3917 (nfc->data & RXH_L4_B_2_3))
3920 req->ipv6_sctp_en = tuple_sets;
3923 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3926 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3932 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3934 dev_err(&hdev->pdev->dev,
3935 "Set rss tuple fail, status = %d\n", ret);
3939 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3940 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3941 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3942 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3943 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3944 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3945 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3946 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3947 hclge_get_rss_type(vport);
3951 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3952 struct ethtool_rxnfc *nfc)
3954 struct hclge_vport *vport = hclge_get_vport(handle);
3959 switch (nfc->flow_type) {
3961 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3964 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3967 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3970 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3973 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3976 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3980 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3989 if (tuple_sets & HCLGE_D_PORT_BIT)
3990 nfc->data |= RXH_L4_B_2_3;
3991 if (tuple_sets & HCLGE_S_PORT_BIT)
3992 nfc->data |= RXH_L4_B_0_1;
3993 if (tuple_sets & HCLGE_D_IP_BIT)
3994 nfc->data |= RXH_IP_DST;
3995 if (tuple_sets & HCLGE_S_IP_BIT)
3996 nfc->data |= RXH_IP_SRC;
4001 static int hclge_get_tc_size(struct hnae3_handle *handle)
4003 struct hclge_vport *vport = hclge_get_vport(handle);
4004 struct hclge_dev *hdev = vport->back;
4006 return hdev->rss_size_max;
4009 int hclge_rss_init_hw(struct hclge_dev *hdev)
4011 struct hclge_vport *vport = hdev->vport;
4012 u8 *rss_indir = vport[0].rss_indirection_tbl;
4013 u16 rss_size = vport[0].alloc_rss_size;
4014 u8 *key = vport[0].rss_hash_key;
4015 u8 hfunc = vport[0].rss_algo;
4016 u16 tc_offset[HCLGE_MAX_TC_NUM];
4017 u16 tc_valid[HCLGE_MAX_TC_NUM];
4018 u16 tc_size[HCLGE_MAX_TC_NUM];
4022 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4026 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4030 ret = hclge_set_rss_input_tuple(hdev);
4034 /* Each TC have the same queue size, and tc_size set to hardware is
4035 * the log2 of roundup power of two of rss_size, the acutal queue
4036 * size is limited by indirection table.
4038 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4039 dev_err(&hdev->pdev->dev,
4040 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4045 roundup_size = roundup_pow_of_two(rss_size);
4046 roundup_size = ilog2(roundup_size);
4048 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4051 if (!(hdev->hw_tc_map & BIT(i)))
4055 tc_size[i] = roundup_size;
4056 tc_offset[i] = rss_size * i;
4059 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4062 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4064 struct hclge_vport *vport = hdev->vport;
4067 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4068 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4069 vport[j].rss_indirection_tbl[i] =
4070 i % vport[j].alloc_rss_size;
4074 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4076 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4077 struct hclge_vport *vport = hdev->vport;
4079 if (hdev->pdev->revision >= 0x21)
4080 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4082 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4083 vport[i].rss_tuple_sets.ipv4_tcp_en =
4084 HCLGE_RSS_INPUT_TUPLE_OTHER;
4085 vport[i].rss_tuple_sets.ipv4_udp_en =
4086 HCLGE_RSS_INPUT_TUPLE_OTHER;
4087 vport[i].rss_tuple_sets.ipv4_sctp_en =
4088 HCLGE_RSS_INPUT_TUPLE_SCTP;
4089 vport[i].rss_tuple_sets.ipv4_fragment_en =
4090 HCLGE_RSS_INPUT_TUPLE_OTHER;
4091 vport[i].rss_tuple_sets.ipv6_tcp_en =
4092 HCLGE_RSS_INPUT_TUPLE_OTHER;
4093 vport[i].rss_tuple_sets.ipv6_udp_en =
4094 HCLGE_RSS_INPUT_TUPLE_OTHER;
4095 vport[i].rss_tuple_sets.ipv6_sctp_en =
4096 HCLGE_RSS_INPUT_TUPLE_SCTP;
4097 vport[i].rss_tuple_sets.ipv6_fragment_en =
4098 HCLGE_RSS_INPUT_TUPLE_OTHER;
4100 vport[i].rss_algo = rss_algo;
4102 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4103 HCLGE_RSS_KEY_SIZE);
4106 hclge_rss_indir_init_cfg(hdev);
4109 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4110 int vector_id, bool en,
4111 struct hnae3_ring_chain_node *ring_chain)
4113 struct hclge_dev *hdev = vport->back;
4114 struct hnae3_ring_chain_node *node;
4115 struct hclge_desc desc;
4116 struct hclge_ctrl_vector_chain_cmd *req
4117 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4118 enum hclge_cmd_status status;
4119 enum hclge_opcode_type op;
4120 u16 tqp_type_and_id;
4123 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4124 hclge_cmd_setup_basic_desc(&desc, op, false);
4125 req->int_vector_id = vector_id;
4128 for (node = ring_chain; node; node = node->next) {
4129 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4130 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4132 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4133 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4134 HCLGE_TQP_ID_S, node->tqp_index);
4135 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4137 hnae3_get_field(node->int_gl_idx,
4138 HNAE3_RING_GL_IDX_M,
4139 HNAE3_RING_GL_IDX_S));
4140 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4141 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4142 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4143 req->vfid = vport->vport_id;
4145 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4147 dev_err(&hdev->pdev->dev,
4148 "Map TQP fail, status is %d.\n",
4154 hclge_cmd_setup_basic_desc(&desc,
4157 req->int_vector_id = vector_id;
4162 req->int_cause_num = i;
4163 req->vfid = vport->vport_id;
4164 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4166 dev_err(&hdev->pdev->dev,
4167 "Map TQP fail, status is %d.\n", status);
4175 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4177 struct hnae3_ring_chain_node *ring_chain)
4179 struct hclge_vport *vport = hclge_get_vport(handle);
4180 struct hclge_dev *hdev = vport->back;
4183 vector_id = hclge_get_vector_index(hdev, vector);
4184 if (vector_id < 0) {
4185 dev_err(&hdev->pdev->dev,
4186 "Get vector index fail. vector_id =%d\n", vector_id);
4190 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4193 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4195 struct hnae3_ring_chain_node *ring_chain)
4197 struct hclge_vport *vport = hclge_get_vport(handle);
4198 struct hclge_dev *hdev = vport->back;
4201 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4204 vector_id = hclge_get_vector_index(hdev, vector);
4205 if (vector_id < 0) {
4206 dev_err(&handle->pdev->dev,
4207 "Get vector index fail. ret =%d\n", vector_id);
4211 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4213 dev_err(&handle->pdev->dev,
4214 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4221 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4222 struct hclge_promisc_param *param)
4224 struct hclge_promisc_cfg_cmd *req;
4225 struct hclge_desc desc;
4228 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4230 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4231 req->vf_id = param->vf_id;
4233 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4234 * pdev revision(0x20), new revision support them. The
4235 * value of this two fields will not return error when driver
4236 * send command to fireware in revision(0x20).
4238 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4239 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4241 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4243 dev_err(&hdev->pdev->dev,
4244 "Set promisc mode fail, status is %d.\n", ret);
4249 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4250 bool en_mc, bool en_bc, int vport_id)
4255 memset(param, 0, sizeof(struct hclge_promisc_param));
4257 param->enable = HCLGE_PROMISC_EN_UC;
4259 param->enable |= HCLGE_PROMISC_EN_MC;
4261 param->enable |= HCLGE_PROMISC_EN_BC;
4262 param->vf_id = vport_id;
4265 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4268 struct hclge_vport *vport = hclge_get_vport(handle);
4269 struct hclge_dev *hdev = vport->back;
4270 struct hclge_promisc_param param;
4271 bool en_bc_pmc = true;
4273 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4274 * always bypassed. So broadcast promisc should be disabled until
4275 * user enable promisc mode
4277 if (handle->pdev->revision == 0x20)
4278 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4280 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4282 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4285 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4287 struct hclge_get_fd_mode_cmd *req;
4288 struct hclge_desc desc;
4291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4293 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4295 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4297 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4301 *fd_mode = req->mode;
4306 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4307 u32 *stage1_entry_num,
4308 u32 *stage2_entry_num,
4309 u16 *stage1_counter_num,
4310 u16 *stage2_counter_num)
4312 struct hclge_get_fd_allocation_cmd *req;
4313 struct hclge_desc desc;
4316 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4318 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4320 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4322 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4327 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4328 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4329 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4330 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4335 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4337 struct hclge_set_fd_key_config_cmd *req;
4338 struct hclge_fd_key_cfg *stage;
4339 struct hclge_desc desc;
4342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4344 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4345 stage = &hdev->fd_cfg.key_cfg[stage_num];
4346 req->stage = stage_num;
4347 req->key_select = stage->key_sel;
4348 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4349 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4350 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4351 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4352 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4353 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4355 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4357 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4362 static int hclge_init_fd_config(struct hclge_dev *hdev)
4364 #define LOW_2_WORDS 0x03
4365 struct hclge_fd_key_cfg *key_cfg;
4368 if (!hnae3_dev_fd_supported(hdev))
4371 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4375 switch (hdev->fd_cfg.fd_mode) {
4376 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4377 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4379 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4380 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4383 dev_err(&hdev->pdev->dev,
4384 "Unsupported flow director mode %d\n",
4385 hdev->fd_cfg.fd_mode);
4389 hdev->fd_cfg.proto_support =
4390 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4391 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4392 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4393 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4394 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4395 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4396 key_cfg->outer_sipv6_word_en = 0;
4397 key_cfg->outer_dipv6_word_en = 0;
4399 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4400 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4401 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4402 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4404 /* If use max 400bit key, we can support tuples for ether type */
4405 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4406 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4407 key_cfg->tuple_active |=
4408 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4411 /* roce_type is used to filter roce frames
4412 * dst_vport is used to specify the rule
4414 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4416 ret = hclge_get_fd_allocation(hdev,
4417 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4418 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4419 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4420 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4424 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4427 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4428 int loc, u8 *key, bool is_add)
4430 struct hclge_fd_tcam_config_1_cmd *req1;
4431 struct hclge_fd_tcam_config_2_cmd *req2;
4432 struct hclge_fd_tcam_config_3_cmd *req3;
4433 struct hclge_desc desc[3];
4436 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4437 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4438 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4439 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4440 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4442 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4443 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4444 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4446 req1->stage = stage;
4447 req1->xy_sel = sel_x ? 1 : 0;
4448 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4449 req1->index = cpu_to_le32(loc);
4450 req1->entry_vld = sel_x ? is_add : 0;
4453 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4454 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4455 sizeof(req2->tcam_data));
4456 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4457 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4460 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4462 dev_err(&hdev->pdev->dev,
4463 "config tcam key fail, ret=%d\n",
4469 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4470 struct hclge_fd_ad_data *action)
4472 struct hclge_fd_ad_config_cmd *req;
4473 struct hclge_desc desc;
4477 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4479 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4480 req->index = cpu_to_le32(loc);
4483 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4484 action->write_rule_id_to_bd);
4485 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4488 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4489 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4490 action->forward_to_direct_queue);
4491 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4493 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4494 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4495 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4496 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4497 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4498 action->counter_id);
4500 req->ad_data = cpu_to_le64(ad_data);
4501 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4503 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4508 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4509 struct hclge_fd_rule *rule)
4511 u16 tmp_x_s, tmp_y_s;
4512 u32 tmp_x_l, tmp_y_l;
4515 if (rule->unused_tuple & tuple_bit)
4518 switch (tuple_bit) {
4521 case BIT(INNER_DST_MAC):
4522 for (i = 0; i < 6; i++) {
4523 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4524 rule->tuples_mask.dst_mac[i]);
4525 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4526 rule->tuples_mask.dst_mac[i]);
4530 case BIT(INNER_SRC_MAC):
4531 for (i = 0; i < 6; i++) {
4532 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4533 rule->tuples.src_mac[i]);
4534 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4535 rule->tuples.src_mac[i]);
4539 case BIT(INNER_VLAN_TAG_FST):
4540 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4541 rule->tuples_mask.vlan_tag1);
4542 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4543 rule->tuples_mask.vlan_tag1);
4544 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4545 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4548 case BIT(INNER_ETH_TYPE):
4549 calc_x(tmp_x_s, rule->tuples.ether_proto,
4550 rule->tuples_mask.ether_proto);
4551 calc_y(tmp_y_s, rule->tuples.ether_proto,
4552 rule->tuples_mask.ether_proto);
4553 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4554 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4557 case BIT(INNER_IP_TOS):
4558 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4559 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4562 case BIT(INNER_IP_PROTO):
4563 calc_x(*key_x, rule->tuples.ip_proto,
4564 rule->tuples_mask.ip_proto);
4565 calc_y(*key_y, rule->tuples.ip_proto,
4566 rule->tuples_mask.ip_proto);
4569 case BIT(INNER_SRC_IP):
4570 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4571 rule->tuples_mask.src_ip[3]);
4572 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4573 rule->tuples_mask.src_ip[3]);
4574 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4575 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4578 case BIT(INNER_DST_IP):
4579 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4580 rule->tuples_mask.dst_ip[3]);
4581 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4582 rule->tuples_mask.dst_ip[3]);
4583 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4584 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4587 case BIT(INNER_SRC_PORT):
4588 calc_x(tmp_x_s, rule->tuples.src_port,
4589 rule->tuples_mask.src_port);
4590 calc_y(tmp_y_s, rule->tuples.src_port,
4591 rule->tuples_mask.src_port);
4592 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4593 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4596 case BIT(INNER_DST_PORT):
4597 calc_x(tmp_x_s, rule->tuples.dst_port,
4598 rule->tuples_mask.dst_port);
4599 calc_y(tmp_y_s, rule->tuples.dst_port,
4600 rule->tuples_mask.dst_port);
4601 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4602 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4610 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4611 u8 vf_id, u8 network_port_id)
4613 u32 port_number = 0;
4615 if (port_type == HOST_PORT) {
4616 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4618 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4620 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4622 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4623 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4624 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4630 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4631 __le32 *key_x, __le32 *key_y,
4632 struct hclge_fd_rule *rule)
4634 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4635 u8 cur_pos = 0, tuple_size, shift_bits;
4638 for (i = 0; i < MAX_META_DATA; i++) {
4639 tuple_size = meta_data_key_info[i].key_length;
4640 tuple_bit = key_cfg->meta_data_active & BIT(i);
4642 switch (tuple_bit) {
4643 case BIT(ROCE_TYPE):
4644 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4645 cur_pos += tuple_size;
4647 case BIT(DST_VPORT):
4648 port_number = hclge_get_port_number(HOST_PORT, 0,
4650 hnae3_set_field(meta_data,
4651 GENMASK(cur_pos + tuple_size, cur_pos),
4652 cur_pos, port_number);
4653 cur_pos += tuple_size;
4660 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4661 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4662 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4664 *key_x = cpu_to_le32(tmp_x << shift_bits);
4665 *key_y = cpu_to_le32(tmp_y << shift_bits);
4668 /* A complete key is combined with meta data key and tuple key.
4669 * Meta data key is stored at the MSB region, and tuple key is stored at
4670 * the LSB region, unused bits will be filled 0.
4672 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4673 struct hclge_fd_rule *rule)
4675 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4676 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4677 u8 *cur_key_x, *cur_key_y;
4678 int i, ret, tuple_size;
4679 u8 meta_data_region;
4681 memset(key_x, 0, sizeof(key_x));
4682 memset(key_y, 0, sizeof(key_y));
4686 for (i = 0 ; i < MAX_TUPLE; i++) {
4690 tuple_size = tuple_key_info[i].key_length / 8;
4691 check_tuple = key_cfg->tuple_active & BIT(i);
4693 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4696 cur_key_x += tuple_size;
4697 cur_key_y += tuple_size;
4701 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4702 MAX_META_DATA_LENGTH / 8;
4704 hclge_fd_convert_meta_data(key_cfg,
4705 (__le32 *)(key_x + meta_data_region),
4706 (__le32 *)(key_y + meta_data_region),
4709 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4712 dev_err(&hdev->pdev->dev,
4713 "fd key_y config fail, loc=%d, ret=%d\n",
4714 rule->queue_id, ret);
4718 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4721 dev_err(&hdev->pdev->dev,
4722 "fd key_x config fail, loc=%d, ret=%d\n",
4723 rule->queue_id, ret);
4727 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4728 struct hclge_fd_rule *rule)
4730 struct hclge_fd_ad_data ad_data;
4732 ad_data.ad_id = rule->location;
4734 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4735 ad_data.drop_packet = true;
4736 ad_data.forward_to_direct_queue = false;
4737 ad_data.queue_id = 0;
4739 ad_data.drop_packet = false;
4740 ad_data.forward_to_direct_queue = true;
4741 ad_data.queue_id = rule->queue_id;
4744 ad_data.use_counter = false;
4745 ad_data.counter_id = 0;
4747 ad_data.use_next_stage = false;
4748 ad_data.next_input_key = 0;
4750 ad_data.write_rule_id_to_bd = true;
4751 ad_data.rule_id = rule->location;
4753 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4756 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4757 struct ethtool_rx_flow_spec *fs, u32 *unused)
4759 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4760 struct ethtool_usrip4_spec *usr_ip4_spec;
4761 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4762 struct ethtool_usrip6_spec *usr_ip6_spec;
4763 struct ethhdr *ether_spec;
4765 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4768 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4771 if ((fs->flow_type & FLOW_EXT) &&
4772 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4773 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4777 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4781 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4782 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4784 if (!tcp_ip4_spec->ip4src)
4785 *unused |= BIT(INNER_SRC_IP);
4787 if (!tcp_ip4_spec->ip4dst)
4788 *unused |= BIT(INNER_DST_IP);
4790 if (!tcp_ip4_spec->psrc)
4791 *unused |= BIT(INNER_SRC_PORT);
4793 if (!tcp_ip4_spec->pdst)
4794 *unused |= BIT(INNER_DST_PORT);
4796 if (!tcp_ip4_spec->tos)
4797 *unused |= BIT(INNER_IP_TOS);
4801 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4802 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4803 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4805 if (!usr_ip4_spec->ip4src)
4806 *unused |= BIT(INNER_SRC_IP);
4808 if (!usr_ip4_spec->ip4dst)
4809 *unused |= BIT(INNER_DST_IP);
4811 if (!usr_ip4_spec->tos)
4812 *unused |= BIT(INNER_IP_TOS);
4814 if (!usr_ip4_spec->proto)
4815 *unused |= BIT(INNER_IP_PROTO);
4817 if (usr_ip4_spec->l4_4_bytes)
4820 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4827 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4828 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4831 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4832 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4833 *unused |= BIT(INNER_SRC_IP);
4835 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4836 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4837 *unused |= BIT(INNER_DST_IP);
4839 if (!tcp_ip6_spec->psrc)
4840 *unused |= BIT(INNER_SRC_PORT);
4842 if (!tcp_ip6_spec->pdst)
4843 *unused |= BIT(INNER_DST_PORT);
4845 if (tcp_ip6_spec->tclass)
4849 case IPV6_USER_FLOW:
4850 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4851 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4852 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4853 BIT(INNER_DST_PORT);
4855 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4856 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4857 *unused |= BIT(INNER_SRC_IP);
4859 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4860 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4861 *unused |= BIT(INNER_DST_IP);
4863 if (!usr_ip6_spec->l4_proto)
4864 *unused |= BIT(INNER_IP_PROTO);
4866 if (usr_ip6_spec->tclass)
4869 if (usr_ip6_spec->l4_4_bytes)
4874 ether_spec = &fs->h_u.ether_spec;
4875 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4876 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4877 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4879 if (is_zero_ether_addr(ether_spec->h_source))
4880 *unused |= BIT(INNER_SRC_MAC);
4882 if (is_zero_ether_addr(ether_spec->h_dest))
4883 *unused |= BIT(INNER_DST_MAC);
4885 if (!ether_spec->h_proto)
4886 *unused |= BIT(INNER_ETH_TYPE);
4893 if ((fs->flow_type & FLOW_EXT)) {
4894 if (fs->h_ext.vlan_etype)
4896 if (!fs->h_ext.vlan_tci)
4897 *unused |= BIT(INNER_VLAN_TAG_FST);
4899 if (fs->m_ext.vlan_tci) {
4900 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4904 *unused |= BIT(INNER_VLAN_TAG_FST);
4907 if (fs->flow_type & FLOW_MAC_EXT) {
4908 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4911 if (is_zero_ether_addr(fs->h_ext.h_dest))
4912 *unused |= BIT(INNER_DST_MAC);
4914 *unused &= ~(BIT(INNER_DST_MAC));
4920 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4922 struct hclge_fd_rule *rule = NULL;
4923 struct hlist_node *node2;
4925 spin_lock_bh(&hdev->fd_rule_lock);
4926 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4927 if (rule->location >= location)
4931 spin_unlock_bh(&hdev->fd_rule_lock);
4933 return rule && rule->location == location;
4936 /* make sure being called after lock up with fd_rule_lock */
4937 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4938 struct hclge_fd_rule *new_rule,
4942 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4943 struct hlist_node *node2;
4945 if (is_add && !new_rule)
4948 hlist_for_each_entry_safe(rule, node2,
4949 &hdev->fd_rule_list, rule_node) {
4950 if (rule->location >= location)
4955 if (rule && rule->location == location) {
4956 hlist_del(&rule->rule_node);
4958 hdev->hclge_fd_rule_num--;
4961 if (!hdev->hclge_fd_rule_num)
4962 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4963 clear_bit(location, hdev->fd_bmap);
4967 } else if (!is_add) {
4968 dev_err(&hdev->pdev->dev,
4969 "delete fail, rule %d is inexistent\n",
4974 INIT_HLIST_NODE(&new_rule->rule_node);
4977 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4979 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4981 set_bit(location, hdev->fd_bmap);
4982 hdev->hclge_fd_rule_num++;
4983 hdev->fd_active_type = new_rule->rule_type;
4988 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4989 struct ethtool_rx_flow_spec *fs,
4990 struct hclge_fd_rule *rule)
4992 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4994 switch (flow_type) {
4998 rule->tuples.src_ip[3] =
4999 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5000 rule->tuples_mask.src_ip[3] =
5001 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5003 rule->tuples.dst_ip[3] =
5004 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5005 rule->tuples_mask.dst_ip[3] =
5006 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5008 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5009 rule->tuples_mask.src_port =
5010 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5012 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5013 rule->tuples_mask.dst_port =
5014 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5016 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5017 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5019 rule->tuples.ether_proto = ETH_P_IP;
5020 rule->tuples_mask.ether_proto = 0xFFFF;
5024 rule->tuples.src_ip[3] =
5025 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5026 rule->tuples_mask.src_ip[3] =
5027 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5029 rule->tuples.dst_ip[3] =
5030 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5031 rule->tuples_mask.dst_ip[3] =
5032 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5034 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5035 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5037 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5038 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5040 rule->tuples.ether_proto = ETH_P_IP;
5041 rule->tuples_mask.ether_proto = 0xFFFF;
5047 be32_to_cpu_array(rule->tuples.src_ip,
5048 fs->h_u.tcp_ip6_spec.ip6src, 4);
5049 be32_to_cpu_array(rule->tuples_mask.src_ip,
5050 fs->m_u.tcp_ip6_spec.ip6src, 4);
5052 be32_to_cpu_array(rule->tuples.dst_ip,
5053 fs->h_u.tcp_ip6_spec.ip6dst, 4);
5054 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5055 fs->m_u.tcp_ip6_spec.ip6dst, 4);
5057 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5058 rule->tuples_mask.src_port =
5059 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5061 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5062 rule->tuples_mask.dst_port =
5063 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5065 rule->tuples.ether_proto = ETH_P_IPV6;
5066 rule->tuples_mask.ether_proto = 0xFFFF;
5069 case IPV6_USER_FLOW:
5070 be32_to_cpu_array(rule->tuples.src_ip,
5071 fs->h_u.usr_ip6_spec.ip6src, 4);
5072 be32_to_cpu_array(rule->tuples_mask.src_ip,
5073 fs->m_u.usr_ip6_spec.ip6src, 4);
5075 be32_to_cpu_array(rule->tuples.dst_ip,
5076 fs->h_u.usr_ip6_spec.ip6dst, 4);
5077 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5078 fs->m_u.usr_ip6_spec.ip6dst, 4);
5080 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5081 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5083 rule->tuples.ether_proto = ETH_P_IPV6;
5084 rule->tuples_mask.ether_proto = 0xFFFF;
5088 ether_addr_copy(rule->tuples.src_mac,
5089 fs->h_u.ether_spec.h_source);
5090 ether_addr_copy(rule->tuples_mask.src_mac,
5091 fs->m_u.ether_spec.h_source);
5093 ether_addr_copy(rule->tuples.dst_mac,
5094 fs->h_u.ether_spec.h_dest);
5095 ether_addr_copy(rule->tuples_mask.dst_mac,
5096 fs->m_u.ether_spec.h_dest);
5098 rule->tuples.ether_proto =
5099 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5100 rule->tuples_mask.ether_proto =
5101 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5108 switch (flow_type) {
5111 rule->tuples.ip_proto = IPPROTO_SCTP;
5112 rule->tuples_mask.ip_proto = 0xFF;
5116 rule->tuples.ip_proto = IPPROTO_TCP;
5117 rule->tuples_mask.ip_proto = 0xFF;
5121 rule->tuples.ip_proto = IPPROTO_UDP;
5122 rule->tuples_mask.ip_proto = 0xFF;
5128 if ((fs->flow_type & FLOW_EXT)) {
5129 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5130 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5133 if (fs->flow_type & FLOW_MAC_EXT) {
5134 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5135 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5141 /* make sure being called after lock up with fd_rule_lock */
5142 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5143 struct hclge_fd_rule *rule)
5148 dev_err(&hdev->pdev->dev,
5149 "The flow director rule is NULL\n");
5153 /* it will never fail here, so needn't to check return value */
5154 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5156 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5160 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5167 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5171 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5172 struct ethtool_rxnfc *cmd)
5174 struct hclge_vport *vport = hclge_get_vport(handle);
5175 struct hclge_dev *hdev = vport->back;
5176 u16 dst_vport_id = 0, q_index = 0;
5177 struct ethtool_rx_flow_spec *fs;
5178 struct hclge_fd_rule *rule;
5183 if (!hnae3_dev_fd_supported(hdev))
5187 dev_warn(&hdev->pdev->dev,
5188 "Please enable flow director first\n");
5192 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5194 ret = hclge_fd_check_spec(hdev, fs, &unused);
5196 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5200 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5201 action = HCLGE_FD_ACTION_DROP_PACKET;
5203 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5204 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5207 if (vf > hdev->num_req_vfs) {
5208 dev_err(&hdev->pdev->dev,
5209 "Error: vf id (%d) > max vf num (%d)\n",
5210 vf, hdev->num_req_vfs);
5214 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5215 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5218 dev_err(&hdev->pdev->dev,
5219 "Error: queue id (%d) > max tqp num (%d)\n",
5224 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5228 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5232 ret = hclge_fd_get_tuple(hdev, fs, rule);
5238 rule->flow_type = fs->flow_type;
5240 rule->location = fs->location;
5241 rule->unused_tuple = unused;
5242 rule->vf_id = dst_vport_id;
5243 rule->queue_id = q_index;
5244 rule->action = action;
5245 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5247 /* to avoid rule conflict, when user configure rule by ethtool,
5248 * we need to clear all arfs rules
5250 hclge_clear_arfs_rules(handle);
5252 spin_lock_bh(&hdev->fd_rule_lock);
5253 ret = hclge_fd_config_rule(hdev, rule);
5255 spin_unlock_bh(&hdev->fd_rule_lock);
5260 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5261 struct ethtool_rxnfc *cmd)
5263 struct hclge_vport *vport = hclge_get_vport(handle);
5264 struct hclge_dev *hdev = vport->back;
5265 struct ethtool_rx_flow_spec *fs;
5268 if (!hnae3_dev_fd_supported(hdev))
5271 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5273 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5276 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5277 dev_err(&hdev->pdev->dev,
5278 "Delete fail, rule %d is inexistent\n",
5283 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5284 fs->location, NULL, false);
5288 spin_lock_bh(&hdev->fd_rule_lock);
5289 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5291 spin_unlock_bh(&hdev->fd_rule_lock);
5296 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5299 struct hclge_vport *vport = hclge_get_vport(handle);
5300 struct hclge_dev *hdev = vport->back;
5301 struct hclge_fd_rule *rule;
5302 struct hlist_node *node;
5305 if (!hnae3_dev_fd_supported(hdev))
5308 spin_lock_bh(&hdev->fd_rule_lock);
5309 for_each_set_bit(location, hdev->fd_bmap,
5310 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5311 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5315 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5317 hlist_del(&rule->rule_node);
5320 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5321 hdev->hclge_fd_rule_num = 0;
5322 bitmap_zero(hdev->fd_bmap,
5323 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5326 spin_unlock_bh(&hdev->fd_rule_lock);
5329 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5331 struct hclge_vport *vport = hclge_get_vport(handle);
5332 struct hclge_dev *hdev = vport->back;
5333 struct hclge_fd_rule *rule;
5334 struct hlist_node *node;
5337 /* Return ok here, because reset error handling will check this
5338 * return value. If error is returned here, the reset process will
5341 if (!hnae3_dev_fd_supported(hdev))
5344 /* if fd is disabled, should not restore it when reset */
5348 spin_lock_bh(&hdev->fd_rule_lock);
5349 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5350 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5352 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5355 dev_warn(&hdev->pdev->dev,
5356 "Restore rule %d failed, remove it\n",
5358 clear_bit(rule->location, hdev->fd_bmap);
5359 hlist_del(&rule->rule_node);
5361 hdev->hclge_fd_rule_num--;
5365 if (hdev->hclge_fd_rule_num)
5366 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5368 spin_unlock_bh(&hdev->fd_rule_lock);
5373 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5374 struct ethtool_rxnfc *cmd)
5376 struct hclge_vport *vport = hclge_get_vport(handle);
5377 struct hclge_dev *hdev = vport->back;
5379 if (!hnae3_dev_fd_supported(hdev))
5382 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5383 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5388 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5389 struct ethtool_rxnfc *cmd)
5391 struct hclge_vport *vport = hclge_get_vport(handle);
5392 struct hclge_fd_rule *rule = NULL;
5393 struct hclge_dev *hdev = vport->back;
5394 struct ethtool_rx_flow_spec *fs;
5395 struct hlist_node *node2;
5397 if (!hnae3_dev_fd_supported(hdev))
5400 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5402 spin_lock_bh(&hdev->fd_rule_lock);
5404 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5405 if (rule->location >= fs->location)
5409 if (!rule || fs->location != rule->location) {
5410 spin_unlock_bh(&hdev->fd_rule_lock);
5415 fs->flow_type = rule->flow_type;
5416 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5420 fs->h_u.tcp_ip4_spec.ip4src =
5421 cpu_to_be32(rule->tuples.src_ip[3]);
5422 fs->m_u.tcp_ip4_spec.ip4src =
5423 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5424 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5426 fs->h_u.tcp_ip4_spec.ip4dst =
5427 cpu_to_be32(rule->tuples.dst_ip[3]);
5428 fs->m_u.tcp_ip4_spec.ip4dst =
5429 rule->unused_tuple & BIT(INNER_DST_IP) ?
5430 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5432 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5433 fs->m_u.tcp_ip4_spec.psrc =
5434 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5435 0 : cpu_to_be16(rule->tuples_mask.src_port);
5437 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5438 fs->m_u.tcp_ip4_spec.pdst =
5439 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5440 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5442 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5443 fs->m_u.tcp_ip4_spec.tos =
5444 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5445 0 : rule->tuples_mask.ip_tos;
5449 fs->h_u.usr_ip4_spec.ip4src =
5450 cpu_to_be32(rule->tuples.src_ip[3]);
5451 fs->m_u.tcp_ip4_spec.ip4src =
5452 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5453 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5455 fs->h_u.usr_ip4_spec.ip4dst =
5456 cpu_to_be32(rule->tuples.dst_ip[3]);
5457 fs->m_u.usr_ip4_spec.ip4dst =
5458 rule->unused_tuple & BIT(INNER_DST_IP) ?
5459 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5461 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5462 fs->m_u.usr_ip4_spec.tos =
5463 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5464 0 : rule->tuples_mask.ip_tos;
5466 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5467 fs->m_u.usr_ip4_spec.proto =
5468 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5469 0 : rule->tuples_mask.ip_proto;
5471 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5477 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5478 rule->tuples.src_ip, 4);
5479 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5480 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5482 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5483 rule->tuples_mask.src_ip, 4);
5485 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5486 rule->tuples.dst_ip, 4);
5487 if (rule->unused_tuple & BIT(INNER_DST_IP))
5488 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5490 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5491 rule->tuples_mask.dst_ip, 4);
5493 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5494 fs->m_u.tcp_ip6_spec.psrc =
5495 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5496 0 : cpu_to_be16(rule->tuples_mask.src_port);
5498 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5499 fs->m_u.tcp_ip6_spec.pdst =
5500 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5501 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5504 case IPV6_USER_FLOW:
5505 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5506 rule->tuples.src_ip, 4);
5507 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5508 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5510 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5511 rule->tuples_mask.src_ip, 4);
5513 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5514 rule->tuples.dst_ip, 4);
5515 if (rule->unused_tuple & BIT(INNER_DST_IP))
5516 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5518 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5519 rule->tuples_mask.dst_ip, 4);
5521 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5522 fs->m_u.usr_ip6_spec.l4_proto =
5523 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5524 0 : rule->tuples_mask.ip_proto;
5528 ether_addr_copy(fs->h_u.ether_spec.h_source,
5529 rule->tuples.src_mac);
5530 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5531 eth_zero_addr(fs->m_u.ether_spec.h_source);
5533 ether_addr_copy(fs->m_u.ether_spec.h_source,
5534 rule->tuples_mask.src_mac);
5536 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5537 rule->tuples.dst_mac);
5538 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5539 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5541 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5542 rule->tuples_mask.dst_mac);
5544 fs->h_u.ether_spec.h_proto =
5545 cpu_to_be16(rule->tuples.ether_proto);
5546 fs->m_u.ether_spec.h_proto =
5547 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5548 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5552 spin_unlock_bh(&hdev->fd_rule_lock);
5556 if (fs->flow_type & FLOW_EXT) {
5557 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5558 fs->m_ext.vlan_tci =
5559 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5560 cpu_to_be16(VLAN_VID_MASK) :
5561 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5564 if (fs->flow_type & FLOW_MAC_EXT) {
5565 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5566 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5567 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5569 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5570 rule->tuples_mask.dst_mac);
5573 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5574 fs->ring_cookie = RX_CLS_FLOW_DISC;
5578 fs->ring_cookie = rule->queue_id;
5579 vf_id = rule->vf_id;
5580 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5581 fs->ring_cookie |= vf_id;
5584 spin_unlock_bh(&hdev->fd_rule_lock);
5589 static int hclge_get_all_rules(struct hnae3_handle *handle,
5590 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5592 struct hclge_vport *vport = hclge_get_vport(handle);
5593 struct hclge_dev *hdev = vport->back;
5594 struct hclge_fd_rule *rule;
5595 struct hlist_node *node2;
5598 if (!hnae3_dev_fd_supported(hdev))
5601 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5603 spin_lock_bh(&hdev->fd_rule_lock);
5604 hlist_for_each_entry_safe(rule, node2,
5605 &hdev->fd_rule_list, rule_node) {
5606 if (cnt == cmd->rule_cnt) {
5607 spin_unlock_bh(&hdev->fd_rule_lock);
5611 rule_locs[cnt] = rule->location;
5615 spin_unlock_bh(&hdev->fd_rule_lock);
5617 cmd->rule_cnt = cnt;
5622 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5623 struct hclge_fd_rule_tuples *tuples)
5625 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5626 tuples->ip_proto = fkeys->basic.ip_proto;
5627 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5629 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5630 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5631 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5633 memcpy(tuples->src_ip,
5634 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5635 sizeof(tuples->src_ip));
5636 memcpy(tuples->dst_ip,
5637 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5638 sizeof(tuples->dst_ip));
5642 /* traverse all rules, check whether an existed rule has the same tuples */
5643 static struct hclge_fd_rule *
5644 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5645 const struct hclge_fd_rule_tuples *tuples)
5647 struct hclge_fd_rule *rule = NULL;
5648 struct hlist_node *node;
5650 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5651 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5658 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5659 struct hclge_fd_rule *rule)
5661 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5662 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5663 BIT(INNER_SRC_PORT);
5666 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5667 if (tuples->ether_proto == ETH_P_IP) {
5668 if (tuples->ip_proto == IPPROTO_TCP)
5669 rule->flow_type = TCP_V4_FLOW;
5671 rule->flow_type = UDP_V4_FLOW;
5673 if (tuples->ip_proto == IPPROTO_TCP)
5674 rule->flow_type = TCP_V6_FLOW;
5676 rule->flow_type = UDP_V6_FLOW;
5678 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5679 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5682 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5683 u16 flow_id, struct flow_keys *fkeys)
5685 struct hclge_vport *vport = hclge_get_vport(handle);
5686 struct hclge_fd_rule_tuples new_tuples;
5687 struct hclge_dev *hdev = vport->back;
5688 struct hclge_fd_rule *rule;
5693 if (!hnae3_dev_fd_supported(hdev))
5696 memset(&new_tuples, 0, sizeof(new_tuples));
5697 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5699 spin_lock_bh(&hdev->fd_rule_lock);
5701 /* when there is already fd rule existed add by user,
5702 * arfs should not work
5704 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5705 spin_unlock_bh(&hdev->fd_rule_lock);
5710 /* check is there flow director filter existed for this flow,
5711 * if not, create a new filter for it;
5712 * if filter exist with different queue id, modify the filter;
5713 * if filter exist with same queue id, do nothing
5715 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5717 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5718 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5719 spin_unlock_bh(&hdev->fd_rule_lock);
5724 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5726 spin_unlock_bh(&hdev->fd_rule_lock);
5731 set_bit(bit_id, hdev->fd_bmap);
5732 rule->location = bit_id;
5733 rule->flow_id = flow_id;
5734 rule->queue_id = queue_id;
5735 hclge_fd_build_arfs_rule(&new_tuples, rule);
5736 ret = hclge_fd_config_rule(hdev, rule);
5738 spin_unlock_bh(&hdev->fd_rule_lock);
5743 return rule->location;
5746 spin_unlock_bh(&hdev->fd_rule_lock);
5748 if (rule->queue_id == queue_id)
5749 return rule->location;
5751 tmp_queue_id = rule->queue_id;
5752 rule->queue_id = queue_id;
5753 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5755 rule->queue_id = tmp_queue_id;
5759 return rule->location;
5762 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5764 #ifdef CONFIG_RFS_ACCEL
5765 struct hnae3_handle *handle = &hdev->vport[0].nic;
5766 struct hclge_fd_rule *rule;
5767 struct hlist_node *node;
5768 HLIST_HEAD(del_list);
5770 spin_lock_bh(&hdev->fd_rule_lock);
5771 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5772 spin_unlock_bh(&hdev->fd_rule_lock);
5775 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5776 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5777 rule->flow_id, rule->location)) {
5778 hlist_del_init(&rule->rule_node);
5779 hlist_add_head(&rule->rule_node, &del_list);
5780 hdev->hclge_fd_rule_num--;
5781 clear_bit(rule->location, hdev->fd_bmap);
5784 spin_unlock_bh(&hdev->fd_rule_lock);
5786 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5787 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5788 rule->location, NULL, false);
5794 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5796 #ifdef CONFIG_RFS_ACCEL
5797 struct hclge_vport *vport = hclge_get_vport(handle);
5798 struct hclge_dev *hdev = vport->back;
5800 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5801 hclge_del_all_fd_entries(handle, true);
5805 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5807 struct hclge_vport *vport = hclge_get_vport(handle);
5808 struct hclge_dev *hdev = vport->back;
5810 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5811 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5814 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5816 struct hclge_vport *vport = hclge_get_vport(handle);
5817 struct hclge_dev *hdev = vport->back;
5819 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5822 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5824 struct hclge_vport *vport = hclge_get_vport(handle);
5825 struct hclge_dev *hdev = vport->back;
5827 return hdev->rst_stats.hw_reset_done_cnt;
5830 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5832 struct hclge_vport *vport = hclge_get_vport(handle);
5833 struct hclge_dev *hdev = vport->back;
5836 hdev->fd_en = enable;
5837 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5839 hclge_del_all_fd_entries(handle, clear);
5841 hclge_restore_fd_entries(handle);
5844 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5846 struct hclge_desc desc;
5847 struct hclge_config_mac_mode_cmd *req =
5848 (struct hclge_config_mac_mode_cmd *)desc.data;
5852 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5853 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5854 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5855 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5856 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5857 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5858 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5859 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5860 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5861 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5862 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5863 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5864 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5865 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5866 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5867 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5869 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5871 dev_err(&hdev->pdev->dev,
5872 "mac enable fail, ret =%d.\n", ret);
5875 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5877 struct hclge_config_mac_mode_cmd *req;
5878 struct hclge_desc desc;
5882 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5883 /* 1 Read out the MAC mode config at first */
5884 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5887 dev_err(&hdev->pdev->dev,
5888 "mac loopback get fail, ret =%d.\n", ret);
5892 /* 2 Then setup the loopback flag */
5893 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5894 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5895 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5896 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5898 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5900 /* 3 Config mac work mode with loopback flag
5901 * and its original configure parameters
5903 hclge_cmd_reuse_desc(&desc, false);
5904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5906 dev_err(&hdev->pdev->dev,
5907 "mac loopback set fail, ret =%d.\n", ret);
5911 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5912 enum hnae3_loop loop_mode)
5914 #define HCLGE_SERDES_RETRY_MS 10
5915 #define HCLGE_SERDES_RETRY_NUM 100
5917 #define HCLGE_MAC_LINK_STATUS_MS 10
5918 #define HCLGE_MAC_LINK_STATUS_NUM 100
5919 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5920 #define HCLGE_MAC_LINK_STATUS_UP 1
5922 struct hclge_serdes_lb_cmd *req;
5923 struct hclge_desc desc;
5924 int mac_link_ret = 0;
5928 req = (struct hclge_serdes_lb_cmd *)desc.data;
5929 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5931 switch (loop_mode) {
5932 case HNAE3_LOOP_SERIAL_SERDES:
5933 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5935 case HNAE3_LOOP_PARALLEL_SERDES:
5936 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5939 dev_err(&hdev->pdev->dev,
5940 "unsupported serdes loopback mode %d\n", loop_mode);
5945 req->enable = loop_mode_b;
5946 req->mask = loop_mode_b;
5947 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5949 req->mask = loop_mode_b;
5950 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5953 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5955 dev_err(&hdev->pdev->dev,
5956 "serdes loopback set fail, ret = %d\n", ret);
5961 msleep(HCLGE_SERDES_RETRY_MS);
5962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5964 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5966 dev_err(&hdev->pdev->dev,
5967 "serdes loopback get, ret = %d\n", ret);
5970 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5971 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5973 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5974 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5976 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5977 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5981 hclge_cfg_mac_mode(hdev, en);
5985 /* serdes Internal loopback, independent of the network cable.*/
5986 msleep(HCLGE_MAC_LINK_STATUS_MS);
5987 ret = hclge_get_mac_link_status(hdev);
5988 if (ret == mac_link_ret)
5990 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5992 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5997 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5998 int stream_id, bool enable)
6000 struct hclge_desc desc;
6001 struct hclge_cfg_com_tqp_queue_cmd *req =
6002 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6005 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6006 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6007 req->stream_id = cpu_to_le16(stream_id);
6008 req->enable |= enable << HCLGE_TQP_ENABLE_B;
6010 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6012 dev_err(&hdev->pdev->dev,
6013 "Tqp enable fail, status =%d.\n", ret);
6017 static int hclge_set_loopback(struct hnae3_handle *handle,
6018 enum hnae3_loop loop_mode, bool en)
6020 struct hclge_vport *vport = hclge_get_vport(handle);
6021 struct hnae3_knic_private_info *kinfo;
6022 struct hclge_dev *hdev = vport->back;
6025 switch (loop_mode) {
6026 case HNAE3_LOOP_APP:
6027 ret = hclge_set_app_loopback(hdev, en);
6029 case HNAE3_LOOP_SERIAL_SERDES:
6030 case HNAE3_LOOP_PARALLEL_SERDES:
6031 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6035 dev_err(&hdev->pdev->dev,
6036 "loop_mode %d is not supported\n", loop_mode);
6043 kinfo = &vport->nic.kinfo;
6044 for (i = 0; i < kinfo->num_tqps; i++) {
6045 ret = hclge_tqp_enable(hdev, i, 0, en);
6053 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6055 struct hclge_vport *vport = hclge_get_vport(handle);
6056 struct hnae3_knic_private_info *kinfo;
6057 struct hnae3_queue *queue;
6058 struct hclge_tqp *tqp;
6061 kinfo = &vport->nic.kinfo;
6062 for (i = 0; i < kinfo->num_tqps; i++) {
6063 queue = handle->kinfo.tqp[i];
6064 tqp = container_of(queue, struct hclge_tqp, q);
6065 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6069 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6071 struct hclge_vport *vport = hclge_get_vport(handle);
6072 struct hclge_dev *hdev = vport->back;
6075 mod_timer(&hdev->service_timer, jiffies + HZ);
6077 del_timer_sync(&hdev->service_timer);
6078 cancel_work_sync(&hdev->service_task);
6079 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6083 static int hclge_ae_start(struct hnae3_handle *handle)
6085 struct hclge_vport *vport = hclge_get_vport(handle);
6086 struct hclge_dev *hdev = vport->back;
6089 hclge_cfg_mac_mode(hdev, true);
6090 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6091 hdev->hw.mac.link = 0;
6093 /* reset tqp stats */
6094 hclge_reset_tqp_stats(handle);
6096 hclge_mac_start_phy(hdev);
6101 static void hclge_ae_stop(struct hnae3_handle *handle)
6103 struct hclge_vport *vport = hclge_get_vport(handle);
6104 struct hclge_dev *hdev = vport->back;
6107 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6109 hclge_clear_arfs_rules(handle);
6111 /* If it is not PF reset, the firmware will disable the MAC,
6112 * so it only need to stop phy here.
6114 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6115 hdev->reset_type != HNAE3_FUNC_RESET) {
6116 hclge_mac_stop_phy(hdev);
6120 for (i = 0; i < handle->kinfo.num_tqps; i++)
6121 hclge_reset_tqp(handle, i);
6124 hclge_cfg_mac_mode(hdev, false);
6126 hclge_mac_stop_phy(hdev);
6128 /* reset tqp stats */
6129 hclge_reset_tqp_stats(handle);
6130 hclge_update_link_status(hdev);
6133 int hclge_vport_start(struct hclge_vport *vport)
6135 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6136 vport->last_active_jiffies = jiffies;
6140 void hclge_vport_stop(struct hclge_vport *vport)
6142 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6145 static int hclge_client_start(struct hnae3_handle *handle)
6147 struct hclge_vport *vport = hclge_get_vport(handle);
6149 return hclge_vport_start(vport);
6152 static void hclge_client_stop(struct hnae3_handle *handle)
6154 struct hclge_vport *vport = hclge_get_vport(handle);
6156 hclge_vport_stop(vport);
6159 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6160 u16 cmdq_resp, u8 resp_code,
6161 enum hclge_mac_vlan_tbl_opcode op)
6163 struct hclge_dev *hdev = vport->back;
6164 int return_status = -EIO;
6167 dev_err(&hdev->pdev->dev,
6168 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6173 if (op == HCLGE_MAC_VLAN_ADD) {
6174 if ((!resp_code) || (resp_code == 1)) {
6176 } else if (resp_code == 2) {
6177 return_status = -ENOSPC;
6178 dev_err(&hdev->pdev->dev,
6179 "add mac addr failed for uc_overflow.\n");
6180 } else if (resp_code == 3) {
6181 return_status = -ENOSPC;
6182 dev_err(&hdev->pdev->dev,
6183 "add mac addr failed for mc_overflow.\n");
6185 dev_err(&hdev->pdev->dev,
6186 "add mac addr failed for undefined, code=%d.\n",
6189 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6192 } else if (resp_code == 1) {
6193 return_status = -ENOENT;
6194 dev_dbg(&hdev->pdev->dev,
6195 "remove mac addr failed for miss.\n");
6197 dev_err(&hdev->pdev->dev,
6198 "remove mac addr failed for undefined, code=%d.\n",
6201 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6204 } else if (resp_code == 1) {
6205 return_status = -ENOENT;
6206 dev_dbg(&hdev->pdev->dev,
6207 "lookup mac addr failed for miss.\n");
6209 dev_err(&hdev->pdev->dev,
6210 "lookup mac addr failed for undefined, code=%d.\n",
6214 return_status = -EINVAL;
6215 dev_err(&hdev->pdev->dev,
6216 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6220 return return_status;
6223 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6228 if (vfid > 255 || vfid < 0)
6231 if (vfid >= 0 && vfid <= 191) {
6232 word_num = vfid / 32;
6233 bit_num = vfid % 32;
6235 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6237 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6239 word_num = (vfid - 192) / 32;
6240 bit_num = vfid % 32;
6242 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6244 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6250 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6252 #define HCLGE_DESC_NUMBER 3
6253 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6256 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6257 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6258 if (desc[i].data[j])
6264 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6265 const u8 *addr, bool is_mc)
6267 const unsigned char *mac_addr = addr;
6268 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6269 (mac_addr[0]) | (mac_addr[1] << 8);
6270 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6272 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6274 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6275 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6278 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6279 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6282 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6283 struct hclge_mac_vlan_tbl_entry_cmd *req)
6285 struct hclge_dev *hdev = vport->back;
6286 struct hclge_desc desc;
6291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6293 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6295 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6297 dev_err(&hdev->pdev->dev,
6298 "del mac addr failed for cmd_send, ret =%d.\n",
6302 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6303 retval = le16_to_cpu(desc.retval);
6305 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6306 HCLGE_MAC_VLAN_REMOVE);
6309 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6310 struct hclge_mac_vlan_tbl_entry_cmd *req,
6311 struct hclge_desc *desc,
6314 struct hclge_dev *hdev = vport->back;
6319 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6321 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6322 memcpy(desc[0].data,
6324 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6325 hclge_cmd_setup_basic_desc(&desc[1],
6326 HCLGE_OPC_MAC_VLAN_ADD,
6328 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6329 hclge_cmd_setup_basic_desc(&desc[2],
6330 HCLGE_OPC_MAC_VLAN_ADD,
6332 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6334 memcpy(desc[0].data,
6336 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6337 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6340 dev_err(&hdev->pdev->dev,
6341 "lookup mac addr failed for cmd_send, ret =%d.\n",
6345 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6346 retval = le16_to_cpu(desc[0].retval);
6348 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6349 HCLGE_MAC_VLAN_LKUP);
6352 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6353 struct hclge_mac_vlan_tbl_entry_cmd *req,
6354 struct hclge_desc *mc_desc)
6356 struct hclge_dev *hdev = vport->back;
6363 struct hclge_desc desc;
6365 hclge_cmd_setup_basic_desc(&desc,
6366 HCLGE_OPC_MAC_VLAN_ADD,
6368 memcpy(desc.data, req,
6369 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6370 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6371 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6372 retval = le16_to_cpu(desc.retval);
6374 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6376 HCLGE_MAC_VLAN_ADD);
6378 hclge_cmd_reuse_desc(&mc_desc[0], false);
6379 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6380 hclge_cmd_reuse_desc(&mc_desc[1], false);
6381 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6382 hclge_cmd_reuse_desc(&mc_desc[2], false);
6383 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6384 memcpy(mc_desc[0].data, req,
6385 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6386 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6387 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6388 retval = le16_to_cpu(mc_desc[0].retval);
6390 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6392 HCLGE_MAC_VLAN_ADD);
6396 dev_err(&hdev->pdev->dev,
6397 "add mac addr failed for cmd_send, ret =%d.\n",
6405 static int hclge_init_umv_space(struct hclge_dev *hdev)
6407 u16 allocated_size = 0;
6410 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6415 if (allocated_size < hdev->wanted_umv_size)
6416 dev_warn(&hdev->pdev->dev,
6417 "Alloc umv space failed, want %d, get %d\n",
6418 hdev->wanted_umv_size, allocated_size);
6420 mutex_init(&hdev->umv_mutex);
6421 hdev->max_umv_size = allocated_size;
6422 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6423 hdev->share_umv_size = hdev->priv_umv_size +
6424 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6429 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6433 if (hdev->max_umv_size > 0) {
6434 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6438 hdev->max_umv_size = 0;
6440 mutex_destroy(&hdev->umv_mutex);
6445 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6446 u16 *allocated_size, bool is_alloc)
6448 struct hclge_umv_spc_alc_cmd *req;
6449 struct hclge_desc desc;
6452 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6453 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6454 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6455 req->space_size = cpu_to_le32(space_size);
6457 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6459 dev_err(&hdev->pdev->dev,
6460 "%s umv space failed for cmd_send, ret =%d\n",
6461 is_alloc ? "allocate" : "free", ret);
6465 if (is_alloc && allocated_size)
6466 *allocated_size = le32_to_cpu(desc.data[1]);
6471 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6473 struct hclge_vport *vport;
6476 for (i = 0; i < hdev->num_alloc_vport; i++) {
6477 vport = &hdev->vport[i];
6478 vport->used_umv_num = 0;
6481 mutex_lock(&hdev->umv_mutex);
6482 hdev->share_umv_size = hdev->priv_umv_size +
6483 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6484 mutex_unlock(&hdev->umv_mutex);
6487 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6489 struct hclge_dev *hdev = vport->back;
6492 mutex_lock(&hdev->umv_mutex);
6493 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6494 hdev->share_umv_size == 0);
6495 mutex_unlock(&hdev->umv_mutex);
6500 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6502 struct hclge_dev *hdev = vport->back;
6504 mutex_lock(&hdev->umv_mutex);
6506 if (vport->used_umv_num > hdev->priv_umv_size)
6507 hdev->share_umv_size++;
6509 if (vport->used_umv_num > 0)
6510 vport->used_umv_num--;
6512 if (vport->used_umv_num >= hdev->priv_umv_size &&
6513 hdev->share_umv_size > 0)
6514 hdev->share_umv_size--;
6515 vport->used_umv_num++;
6517 mutex_unlock(&hdev->umv_mutex);
6520 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6521 const unsigned char *addr)
6523 struct hclge_vport *vport = hclge_get_vport(handle);
6525 return hclge_add_uc_addr_common(vport, addr);
6528 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6529 const unsigned char *addr)
6531 struct hclge_dev *hdev = vport->back;
6532 struct hclge_mac_vlan_tbl_entry_cmd req;
6533 struct hclge_desc desc;
6534 u16 egress_port = 0;
6537 /* mac addr check */
6538 if (is_zero_ether_addr(addr) ||
6539 is_broadcast_ether_addr(addr) ||
6540 is_multicast_ether_addr(addr)) {
6541 dev_err(&hdev->pdev->dev,
6542 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6544 is_zero_ether_addr(addr),
6545 is_broadcast_ether_addr(addr),
6546 is_multicast_ether_addr(addr));
6550 memset(&req, 0, sizeof(req));
6552 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6553 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6555 req.egress_port = cpu_to_le16(egress_port);
6557 hclge_prepare_mac_addr(&req, addr, false);
6559 /* Lookup the mac address in the mac_vlan table, and add
6560 * it if the entry is inexistent. Repeated unicast entry
6561 * is not allowed in the mac vlan table.
6563 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6564 if (ret == -ENOENT) {
6565 if (!hclge_is_umv_space_full(vport)) {
6566 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6568 hclge_update_umv_space(vport, false);
6572 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6573 hdev->priv_umv_size);
6578 /* check if we just hit the duplicate */
6580 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6581 vport->vport_id, addr);
6585 dev_err(&hdev->pdev->dev,
6586 "PF failed to add unicast entry(%pM) in the MAC table\n",
6592 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6593 const unsigned char *addr)
6595 struct hclge_vport *vport = hclge_get_vport(handle);
6597 return hclge_rm_uc_addr_common(vport, addr);
6600 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6601 const unsigned char *addr)
6603 struct hclge_dev *hdev = vport->back;
6604 struct hclge_mac_vlan_tbl_entry_cmd req;
6607 /* mac addr check */
6608 if (is_zero_ether_addr(addr) ||
6609 is_broadcast_ether_addr(addr) ||
6610 is_multicast_ether_addr(addr)) {
6611 dev_dbg(&hdev->pdev->dev,
6612 "Remove mac err! invalid mac:%pM.\n",
6617 memset(&req, 0, sizeof(req));
6618 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6619 hclge_prepare_mac_addr(&req, addr, false);
6620 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6622 hclge_update_umv_space(vport, true);
6627 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6628 const unsigned char *addr)
6630 struct hclge_vport *vport = hclge_get_vport(handle);
6632 return hclge_add_mc_addr_common(vport, addr);
6635 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6636 const unsigned char *addr)
6638 struct hclge_dev *hdev = vport->back;
6639 struct hclge_mac_vlan_tbl_entry_cmd req;
6640 struct hclge_desc desc[3];
6643 /* mac addr check */
6644 if (!is_multicast_ether_addr(addr)) {
6645 dev_err(&hdev->pdev->dev,
6646 "Add mc mac err! invalid mac:%pM.\n",
6650 memset(&req, 0, sizeof(req));
6651 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6652 hclge_prepare_mac_addr(&req, addr, true);
6653 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6655 /* This mac addr exist, update VFID for it */
6656 hclge_update_desc_vfid(desc, vport->vport_id, false);
6657 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6659 /* This mac addr do not exist, add new entry for it */
6660 memset(desc[0].data, 0, sizeof(desc[0].data));
6661 memset(desc[1].data, 0, sizeof(desc[0].data));
6662 memset(desc[2].data, 0, sizeof(desc[0].data));
6663 hclge_update_desc_vfid(desc, vport->vport_id, false);
6664 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6667 if (status == -ENOSPC)
6668 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6673 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6674 const unsigned char *addr)
6676 struct hclge_vport *vport = hclge_get_vport(handle);
6678 return hclge_rm_mc_addr_common(vport, addr);
6681 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6682 const unsigned char *addr)
6684 struct hclge_dev *hdev = vport->back;
6685 struct hclge_mac_vlan_tbl_entry_cmd req;
6686 enum hclge_cmd_status status;
6687 struct hclge_desc desc[3];
6689 /* mac addr check */
6690 if (!is_multicast_ether_addr(addr)) {
6691 dev_dbg(&hdev->pdev->dev,
6692 "Remove mc mac err! invalid mac:%pM.\n",
6697 memset(&req, 0, sizeof(req));
6698 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6699 hclge_prepare_mac_addr(&req, addr, true);
6700 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6702 /* This mac addr exist, remove this handle's VFID for it */
6703 hclge_update_desc_vfid(desc, vport->vport_id, true);
6705 if (hclge_is_all_function_id_zero(desc))
6706 /* All the vfid is zero, so need to delete this entry */
6707 status = hclge_remove_mac_vlan_tbl(vport, &req);
6709 /* Not all the vfid is zero, update the vfid */
6710 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6713 /* Maybe this mac address is in mta table, but it cannot be
6714 * deleted here because an entry of mta represents an address
6715 * range rather than a specific address. the delete action to
6716 * all entries will take effect in update_mta_status called by
6717 * hns3_nic_set_rx_mode.
6725 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6726 enum HCLGE_MAC_ADDR_TYPE mac_type)
6728 struct hclge_vport_mac_addr_cfg *mac_cfg;
6729 struct list_head *list;
6731 if (!vport->vport_id)
6734 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6738 mac_cfg->hd_tbl_status = true;
6739 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6741 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6742 &vport->uc_mac_list : &vport->mc_mac_list;
6744 list_add_tail(&mac_cfg->node, list);
6747 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6749 enum HCLGE_MAC_ADDR_TYPE mac_type)
6751 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6752 struct list_head *list;
6753 bool uc_flag, mc_flag;
6755 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6756 &vport->uc_mac_list : &vport->mc_mac_list;
6758 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6759 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6761 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6762 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6763 if (uc_flag && mac_cfg->hd_tbl_status)
6764 hclge_rm_uc_addr_common(vport, mac_addr);
6766 if (mc_flag && mac_cfg->hd_tbl_status)
6767 hclge_rm_mc_addr_common(vport, mac_addr);
6769 list_del(&mac_cfg->node);
6776 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6777 enum HCLGE_MAC_ADDR_TYPE mac_type)
6779 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6780 struct list_head *list;
6782 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6783 &vport->uc_mac_list : &vport->mc_mac_list;
6785 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6786 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6787 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6789 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6790 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6792 mac_cfg->hd_tbl_status = false;
6794 list_del(&mac_cfg->node);
6800 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6802 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6803 struct hclge_vport *vport;
6806 mutex_lock(&hdev->vport_cfg_mutex);
6807 for (i = 0; i < hdev->num_alloc_vport; i++) {
6808 vport = &hdev->vport[i];
6809 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6810 list_del(&mac->node);
6814 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6815 list_del(&mac->node);
6819 mutex_unlock(&hdev->vport_cfg_mutex);
6822 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6823 u16 cmdq_resp, u8 resp_code)
6825 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6826 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6827 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6828 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6833 dev_err(&hdev->pdev->dev,
6834 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6839 switch (resp_code) {
6840 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6841 case HCLGE_ETHERTYPE_ALREADY_ADD:
6844 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6845 dev_err(&hdev->pdev->dev,
6846 "add mac ethertype failed for manager table overflow.\n");
6847 return_status = -EIO;
6849 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6850 dev_err(&hdev->pdev->dev,
6851 "add mac ethertype failed for key conflict.\n");
6852 return_status = -EIO;
6855 dev_err(&hdev->pdev->dev,
6856 "add mac ethertype failed for undefined, code=%d.\n",
6858 return_status = -EIO;
6861 return return_status;
6864 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6865 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6867 struct hclge_desc desc;
6872 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6873 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6875 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6877 dev_err(&hdev->pdev->dev,
6878 "add mac ethertype failed for cmd_send, ret =%d.\n",
6883 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6884 retval = le16_to_cpu(desc.retval);
6886 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6889 static int init_mgr_tbl(struct hclge_dev *hdev)
6894 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6895 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6897 dev_err(&hdev->pdev->dev,
6898 "add mac ethertype failed, ret =%d.\n",
6907 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6909 struct hclge_vport *vport = hclge_get_vport(handle);
6910 struct hclge_dev *hdev = vport->back;
6912 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6915 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6918 const unsigned char *new_addr = (const unsigned char *)p;
6919 struct hclge_vport *vport = hclge_get_vport(handle);
6920 struct hclge_dev *hdev = vport->back;
6923 /* mac addr check */
6924 if (is_zero_ether_addr(new_addr) ||
6925 is_broadcast_ether_addr(new_addr) ||
6926 is_multicast_ether_addr(new_addr)) {
6927 dev_err(&hdev->pdev->dev,
6928 "Change uc mac err! invalid mac:%p.\n",
6933 if ((!is_first || is_kdump_kernel()) &&
6934 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6935 dev_warn(&hdev->pdev->dev,
6936 "remove old uc mac address fail.\n");
6938 ret = hclge_add_uc_addr(handle, new_addr);
6940 dev_err(&hdev->pdev->dev,
6941 "add uc mac address fail, ret =%d.\n",
6945 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6946 dev_err(&hdev->pdev->dev,
6947 "restore uc mac address fail.\n");
6952 ret = hclge_pause_addr_cfg(hdev, new_addr);
6954 dev_err(&hdev->pdev->dev,
6955 "configure mac pause address fail, ret =%d.\n",
6960 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6965 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6968 struct hclge_vport *vport = hclge_get_vport(handle);
6969 struct hclge_dev *hdev = vport->back;
6971 if (!hdev->hw.mac.phydev)
6974 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6977 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6978 u8 fe_type, bool filter_en, u8 vf_id)
6980 struct hclge_vlan_filter_ctrl_cmd *req;
6981 struct hclge_desc desc;
6984 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6986 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6987 req->vlan_type = vlan_type;
6988 req->vlan_fe = filter_en ? fe_type : 0;
6991 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6993 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6999 #define HCLGE_FILTER_TYPE_VF 0
7000 #define HCLGE_FILTER_TYPE_PORT 1
7001 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7002 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7003 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7004 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7005 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7006 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7007 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7008 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7009 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7011 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7013 struct hclge_vport *vport = hclge_get_vport(handle);
7014 struct hclge_dev *hdev = vport->back;
7016 if (hdev->pdev->revision >= 0x21) {
7017 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7018 HCLGE_FILTER_FE_EGRESS, enable, 0);
7019 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7020 HCLGE_FILTER_FE_INGRESS, enable, 0);
7022 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7023 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7027 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7029 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7032 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7033 bool is_kill, u16 vlan, u8 qos,
7036 #define HCLGE_MAX_VF_BYTES 16
7037 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7038 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7039 struct hclge_desc desc[2];
7044 hclge_cmd_setup_basic_desc(&desc[0],
7045 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7046 hclge_cmd_setup_basic_desc(&desc[1],
7047 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7049 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7051 vf_byte_off = vfid / 8;
7052 vf_byte_val = 1 << (vfid % 8);
7054 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7055 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7057 req0->vlan_id = cpu_to_le16(vlan);
7058 req0->vlan_cfg = is_kill;
7060 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7061 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7063 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7065 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7067 dev_err(&hdev->pdev->dev,
7068 "Send vf vlan command fail, ret =%d.\n",
7074 #define HCLGE_VF_VLAN_NO_ENTRY 2
7075 if (!req0->resp_code || req0->resp_code == 1)
7078 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7079 dev_warn(&hdev->pdev->dev,
7080 "vf vlan table is full, vf vlan filter is disabled\n");
7084 dev_err(&hdev->pdev->dev,
7085 "Add vf vlan filter fail, ret =%d.\n",
7088 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7089 if (!req0->resp_code)
7092 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7093 dev_warn(&hdev->pdev->dev,
7094 "vlan %d filter is not in vf vlan table\n",
7099 dev_err(&hdev->pdev->dev,
7100 "Kill vf vlan filter fail, ret =%d.\n",
7107 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7108 u16 vlan_id, bool is_kill)
7110 struct hclge_vlan_filter_pf_cfg_cmd *req;
7111 struct hclge_desc desc;
7112 u8 vlan_offset_byte_val;
7113 u8 vlan_offset_byte;
7117 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7119 vlan_offset_160 = vlan_id / 160;
7120 vlan_offset_byte = (vlan_id % 160) / 8;
7121 vlan_offset_byte_val = 1 << (vlan_id % 8);
7123 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7124 req->vlan_offset = vlan_offset_160;
7125 req->vlan_cfg = is_kill;
7126 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7128 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7130 dev_err(&hdev->pdev->dev,
7131 "port vlan command, send fail, ret =%d.\n", ret);
7135 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7136 u16 vport_id, u16 vlan_id, u8 qos,
7139 u16 vport_idx, vport_num = 0;
7142 if (is_kill && !vlan_id)
7145 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7148 dev_err(&hdev->pdev->dev,
7149 "Set %d vport vlan filter config fail, ret =%d.\n",
7154 /* vlan 0 may be added twice when 8021q module is enabled */
7155 if (!is_kill && !vlan_id &&
7156 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7159 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7160 dev_err(&hdev->pdev->dev,
7161 "Add port vlan failed, vport %d is already in vlan %d\n",
7167 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7168 dev_err(&hdev->pdev->dev,
7169 "Delete port vlan failed, vport %d is not in vlan %d\n",
7174 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7177 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7178 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7184 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7186 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7187 struct hclge_vport_vtag_tx_cfg_cmd *req;
7188 struct hclge_dev *hdev = vport->back;
7189 struct hclge_desc desc;
7192 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7194 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7195 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7196 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7197 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7198 vcfg->accept_tag1 ? 1 : 0);
7199 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7200 vcfg->accept_untag1 ? 1 : 0);
7201 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7202 vcfg->accept_tag2 ? 1 : 0);
7203 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7204 vcfg->accept_untag2 ? 1 : 0);
7205 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7206 vcfg->insert_tag1_en ? 1 : 0);
7207 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7208 vcfg->insert_tag2_en ? 1 : 0);
7209 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7211 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7212 req->vf_bitmap[req->vf_offset] =
7213 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7215 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7217 dev_err(&hdev->pdev->dev,
7218 "Send port txvlan cfg command fail, ret =%d\n",
7224 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7226 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7227 struct hclge_vport_vtag_rx_cfg_cmd *req;
7228 struct hclge_dev *hdev = vport->back;
7229 struct hclge_desc desc;
7232 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7234 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7235 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7236 vcfg->strip_tag1_en ? 1 : 0);
7237 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7238 vcfg->strip_tag2_en ? 1 : 0);
7239 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7240 vcfg->vlan1_vlan_prionly ? 1 : 0);
7241 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7242 vcfg->vlan2_vlan_prionly ? 1 : 0);
7244 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7245 req->vf_bitmap[req->vf_offset] =
7246 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7248 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7250 dev_err(&hdev->pdev->dev,
7251 "Send port rxvlan cfg command fail, ret =%d\n",
7257 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7258 u16 port_base_vlan_state,
7263 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7264 vport->txvlan_cfg.accept_tag1 = true;
7265 vport->txvlan_cfg.insert_tag1_en = false;
7266 vport->txvlan_cfg.default_tag1 = 0;
7268 vport->txvlan_cfg.accept_tag1 = false;
7269 vport->txvlan_cfg.insert_tag1_en = true;
7270 vport->txvlan_cfg.default_tag1 = vlan_tag;
7273 vport->txvlan_cfg.accept_untag1 = true;
7275 /* accept_tag2 and accept_untag2 are not supported on
7276 * pdev revision(0x20), new revision support them,
7277 * this two fields can not be configured by user.
7279 vport->txvlan_cfg.accept_tag2 = true;
7280 vport->txvlan_cfg.accept_untag2 = true;
7281 vport->txvlan_cfg.insert_tag2_en = false;
7282 vport->txvlan_cfg.default_tag2 = 0;
7284 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7285 vport->rxvlan_cfg.strip_tag1_en = false;
7286 vport->rxvlan_cfg.strip_tag2_en =
7287 vport->rxvlan_cfg.rx_vlan_offload_en;
7289 vport->rxvlan_cfg.strip_tag1_en =
7290 vport->rxvlan_cfg.rx_vlan_offload_en;
7291 vport->rxvlan_cfg.strip_tag2_en = true;
7293 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7294 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7296 ret = hclge_set_vlan_tx_offload_cfg(vport);
7300 return hclge_set_vlan_rx_offload_cfg(vport);
7303 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7305 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7306 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7307 struct hclge_desc desc;
7310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7311 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7312 rx_req->ot_fst_vlan_type =
7313 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7314 rx_req->ot_sec_vlan_type =
7315 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7316 rx_req->in_fst_vlan_type =
7317 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7318 rx_req->in_sec_vlan_type =
7319 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7321 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7323 dev_err(&hdev->pdev->dev,
7324 "Send rxvlan protocol type command fail, ret =%d\n",
7329 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7331 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7332 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7333 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7335 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7337 dev_err(&hdev->pdev->dev,
7338 "Send txvlan protocol type command fail, ret =%d\n",
7344 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7346 #define HCLGE_DEF_VLAN_TYPE 0x8100
7348 struct hnae3_handle *handle = &hdev->vport[0].nic;
7349 struct hclge_vport *vport;
7353 if (hdev->pdev->revision >= 0x21) {
7354 /* for revision 0x21, vf vlan filter is per function */
7355 for (i = 0; i < hdev->num_alloc_vport; i++) {
7356 vport = &hdev->vport[i];
7357 ret = hclge_set_vlan_filter_ctrl(hdev,
7358 HCLGE_FILTER_TYPE_VF,
7359 HCLGE_FILTER_FE_EGRESS,
7366 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7367 HCLGE_FILTER_FE_INGRESS, true,
7372 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7373 HCLGE_FILTER_FE_EGRESS_V1_B,
7379 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7381 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7382 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7383 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7384 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7385 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7386 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7388 ret = hclge_set_vlan_protocol_type(hdev);
7392 for (i = 0; i < hdev->num_alloc_vport; i++) {
7395 vport = &hdev->vport[i];
7396 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7398 ret = hclge_vlan_offload_cfg(vport,
7399 vport->port_base_vlan_cfg.state,
7405 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7408 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7411 struct hclge_vport_vlan_cfg *vlan;
7413 /* vlan 0 is reserved */
7417 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7421 vlan->hd_tbl_status = writen_to_tbl;
7422 vlan->vlan_id = vlan_id;
7424 list_add_tail(&vlan->node, &vport->vlan_list);
7427 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7429 struct hclge_vport_vlan_cfg *vlan, *tmp;
7430 struct hclge_dev *hdev = vport->back;
7433 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7434 if (!vlan->hd_tbl_status) {
7435 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7437 vlan->vlan_id, 0, false);
7439 dev_err(&hdev->pdev->dev,
7440 "restore vport vlan list failed, ret=%d\n",
7445 vlan->hd_tbl_status = true;
7451 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7454 struct hclge_vport_vlan_cfg *vlan, *tmp;
7455 struct hclge_dev *hdev = vport->back;
7457 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7458 if (vlan->vlan_id == vlan_id) {
7459 if (is_write_tbl && vlan->hd_tbl_status)
7460 hclge_set_vlan_filter_hw(hdev,
7466 list_del(&vlan->node);
7473 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7475 struct hclge_vport_vlan_cfg *vlan, *tmp;
7476 struct hclge_dev *hdev = vport->back;
7478 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7479 if (vlan->hd_tbl_status)
7480 hclge_set_vlan_filter_hw(hdev,
7486 vlan->hd_tbl_status = false;
7488 list_del(&vlan->node);
7494 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7496 struct hclge_vport_vlan_cfg *vlan, *tmp;
7497 struct hclge_vport *vport;
7500 mutex_lock(&hdev->vport_cfg_mutex);
7501 for (i = 0; i < hdev->num_alloc_vport; i++) {
7502 vport = &hdev->vport[i];
7503 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7504 list_del(&vlan->node);
7508 mutex_unlock(&hdev->vport_cfg_mutex);
7511 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7513 struct hclge_vport *vport = hclge_get_vport(handle);
7515 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7516 vport->rxvlan_cfg.strip_tag1_en = false;
7517 vport->rxvlan_cfg.strip_tag2_en = enable;
7519 vport->rxvlan_cfg.strip_tag1_en = enable;
7520 vport->rxvlan_cfg.strip_tag2_en = true;
7522 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7523 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7524 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7526 return hclge_set_vlan_rx_offload_cfg(vport);
7529 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7530 u16 port_base_vlan_state,
7531 struct hclge_vlan_info *new_info,
7532 struct hclge_vlan_info *old_info)
7534 struct hclge_dev *hdev = vport->back;
7537 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7538 hclge_rm_vport_all_vlan_table(vport, false);
7539 return hclge_set_vlan_filter_hw(hdev,
7540 htons(new_info->vlan_proto),
7543 new_info->qos, false);
7546 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7547 vport->vport_id, old_info->vlan_tag,
7548 old_info->qos, true);
7552 return hclge_add_vport_all_vlan_table(vport);
7555 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7556 struct hclge_vlan_info *vlan_info)
7558 struct hnae3_handle *nic = &vport->nic;
7559 struct hclge_vlan_info *old_vlan_info;
7560 struct hclge_dev *hdev = vport->back;
7563 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7565 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7569 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7570 /* add new VLAN tag */
7571 ret = hclge_set_vlan_filter_hw(hdev,
7572 htons(vlan_info->vlan_proto),
7574 vlan_info->vlan_tag,
7575 vlan_info->qos, false);
7579 /* remove old VLAN tag */
7580 ret = hclge_set_vlan_filter_hw(hdev,
7581 htons(old_vlan_info->vlan_proto),
7583 old_vlan_info->vlan_tag,
7584 old_vlan_info->qos, true);
7591 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7596 /* update state only when disable/enable port based VLAN */
7597 vport->port_base_vlan_cfg.state = state;
7598 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7599 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7601 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7604 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7605 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7606 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7611 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7612 enum hnae3_port_base_vlan_state state,
7615 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7617 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7619 return HNAE3_PORT_BASE_VLAN_ENABLE;
7622 return HNAE3_PORT_BASE_VLAN_DISABLE;
7623 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7624 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7626 return HNAE3_PORT_BASE_VLAN_MODIFY;
7630 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7631 u16 vlan, u8 qos, __be16 proto)
7633 struct hclge_vport *vport = hclge_get_vport(handle);
7634 struct hclge_dev *hdev = vport->back;
7635 struct hclge_vlan_info vlan_info;
7639 if (hdev->pdev->revision == 0x20)
7642 /* qos is a 3 bits value, so can not be bigger than 7 */
7643 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7645 if (proto != htons(ETH_P_8021Q))
7646 return -EPROTONOSUPPORT;
7648 vport = &hdev->vport[vfid];
7649 state = hclge_get_port_base_vlan_state(vport,
7650 vport->port_base_vlan_cfg.state,
7652 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7655 vlan_info.vlan_tag = vlan;
7656 vlan_info.qos = qos;
7657 vlan_info.vlan_proto = ntohs(proto);
7659 /* update port based VLAN for PF */
7661 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7662 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7663 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7668 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7669 return hclge_update_port_base_vlan_cfg(vport, state,
7672 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7680 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7681 u16 vlan_id, bool is_kill)
7683 struct hclge_vport *vport = hclge_get_vport(handle);
7684 struct hclge_dev *hdev = vport->back;
7685 bool writen_to_tbl = false;
7688 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7689 * filter entry. In this case, we don't update VLAN filter table
7690 * when user add new VLAN or remove exist VLAN, just update the vport
7691 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7692 * table until port based VLAN disabled
7694 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7695 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7696 vlan_id, 0, is_kill);
7697 writen_to_tbl = true;
7704 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7706 hclge_add_vport_vlan_table(vport, vlan_id,
7712 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7714 struct hclge_config_max_frm_size_cmd *req;
7715 struct hclge_desc desc;
7717 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7719 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7720 req->max_frm_size = cpu_to_le16(new_mps);
7721 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7723 return hclge_cmd_send(&hdev->hw, &desc, 1);
7726 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7728 struct hclge_vport *vport = hclge_get_vport(handle);
7730 return hclge_set_vport_mtu(vport, new_mtu);
7733 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7735 struct hclge_dev *hdev = vport->back;
7736 int i, max_frm_size, ret = 0;
7738 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7739 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7740 max_frm_size > HCLGE_MAC_MAX_FRAME)
7743 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7744 mutex_lock(&hdev->vport_lock);
7745 /* VF's mps must fit within hdev->mps */
7746 if (vport->vport_id && max_frm_size > hdev->mps) {
7747 mutex_unlock(&hdev->vport_lock);
7749 } else if (vport->vport_id) {
7750 vport->mps = max_frm_size;
7751 mutex_unlock(&hdev->vport_lock);
7755 /* PF's mps must be greater then VF's mps */
7756 for (i = 1; i < hdev->num_alloc_vport; i++)
7757 if (max_frm_size < hdev->vport[i].mps) {
7758 mutex_unlock(&hdev->vport_lock);
7762 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7764 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7766 dev_err(&hdev->pdev->dev,
7767 "Change mtu fail, ret =%d\n", ret);
7771 hdev->mps = max_frm_size;
7772 vport->mps = max_frm_size;
7774 ret = hclge_buffer_alloc(hdev);
7776 dev_err(&hdev->pdev->dev,
7777 "Allocate buffer fail, ret =%d\n", ret);
7780 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7781 mutex_unlock(&hdev->vport_lock);
7785 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7788 struct hclge_reset_tqp_queue_cmd *req;
7789 struct hclge_desc desc;
7792 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7794 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7795 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7796 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7798 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7800 dev_err(&hdev->pdev->dev,
7801 "Send tqp reset cmd error, status =%d\n", ret);
7808 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7810 struct hclge_reset_tqp_queue_cmd *req;
7811 struct hclge_desc desc;
7814 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7816 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7817 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7819 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7821 dev_err(&hdev->pdev->dev,
7822 "Get reset status error, status =%d\n", ret);
7826 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7829 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7831 struct hnae3_queue *queue;
7832 struct hclge_tqp *tqp;
7834 queue = handle->kinfo.tqp[queue_id];
7835 tqp = container_of(queue, struct hclge_tqp, q);
7840 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7842 struct hclge_vport *vport = hclge_get_vport(handle);
7843 struct hclge_dev *hdev = vport->back;
7844 int reset_try_times = 0;
7849 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7851 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7853 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7857 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7859 dev_err(&hdev->pdev->dev,
7860 "Send reset tqp cmd fail, ret = %d\n", ret);
7864 reset_try_times = 0;
7865 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7866 /* Wait for tqp hw reset */
7868 reset_status = hclge_get_reset_status(hdev, queue_gid);
7873 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7874 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7878 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7880 dev_err(&hdev->pdev->dev,
7881 "Deassert the soft reset fail, ret = %d\n", ret);
7886 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7888 struct hclge_dev *hdev = vport->back;
7889 int reset_try_times = 0;
7894 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7896 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7898 dev_warn(&hdev->pdev->dev,
7899 "Send reset tqp cmd fail, ret = %d\n", ret);
7903 reset_try_times = 0;
7904 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7905 /* Wait for tqp hw reset */
7907 reset_status = hclge_get_reset_status(hdev, queue_gid);
7912 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7913 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7917 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7919 dev_warn(&hdev->pdev->dev,
7920 "Deassert the soft reset fail, ret = %d\n", ret);
7923 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7925 struct hclge_vport *vport = hclge_get_vport(handle);
7926 struct hclge_dev *hdev = vport->back;
7928 return hdev->fw_version;
7931 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7933 struct phy_device *phydev = hdev->hw.mac.phydev;
7938 phy_set_asym_pause(phydev, rx_en, tx_en);
7941 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7946 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7947 else if (rx_en && !tx_en)
7948 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7949 else if (!rx_en && tx_en)
7950 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7952 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7954 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7957 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7959 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7964 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7969 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7971 struct phy_device *phydev = hdev->hw.mac.phydev;
7972 u16 remote_advertising = 0;
7973 u16 local_advertising = 0;
7974 u32 rx_pause, tx_pause;
7977 if (!phydev->link || !phydev->autoneg)
7980 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7983 remote_advertising = LPA_PAUSE_CAP;
7985 if (phydev->asym_pause)
7986 remote_advertising |= LPA_PAUSE_ASYM;
7988 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7989 remote_advertising);
7990 tx_pause = flowctl & FLOW_CTRL_TX;
7991 rx_pause = flowctl & FLOW_CTRL_RX;
7993 if (phydev->duplex == HCLGE_MAC_HALF) {
7998 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8001 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8002 u32 *rx_en, u32 *tx_en)
8004 struct hclge_vport *vport = hclge_get_vport(handle);
8005 struct hclge_dev *hdev = vport->back;
8007 *auto_neg = hclge_get_autoneg(handle);
8009 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8015 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8018 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8021 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8030 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8031 u32 rx_en, u32 tx_en)
8033 struct hclge_vport *vport = hclge_get_vport(handle);
8034 struct hclge_dev *hdev = vport->back;
8035 struct phy_device *phydev = hdev->hw.mac.phydev;
8038 fc_autoneg = hclge_get_autoneg(handle);
8039 if (auto_neg != fc_autoneg) {
8040 dev_info(&hdev->pdev->dev,
8041 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8045 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8046 dev_info(&hdev->pdev->dev,
8047 "Priority flow control enabled. Cannot set link flow control.\n");
8051 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8054 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8057 return phy_start_aneg(phydev);
8059 if (hdev->pdev->revision == 0x20)
8062 return hclge_restart_autoneg(handle);
8065 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8066 u8 *auto_neg, u32 *speed, u8 *duplex)
8068 struct hclge_vport *vport = hclge_get_vport(handle);
8069 struct hclge_dev *hdev = vport->back;
8072 *speed = hdev->hw.mac.speed;
8074 *duplex = hdev->hw.mac.duplex;
8076 *auto_neg = hdev->hw.mac.autoneg;
8079 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8082 struct hclge_vport *vport = hclge_get_vport(handle);
8083 struct hclge_dev *hdev = vport->back;
8086 *media_type = hdev->hw.mac.media_type;
8089 *module_type = hdev->hw.mac.module_type;
8092 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8093 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8095 struct hclge_vport *vport = hclge_get_vport(handle);
8096 struct hclge_dev *hdev = vport->back;
8097 struct phy_device *phydev = hdev->hw.mac.phydev;
8098 int mdix_ctrl, mdix, retval, is_resolved;
8101 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8102 *tp_mdix = ETH_TP_MDI_INVALID;
8106 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8108 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8109 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8110 HCLGE_PHY_MDIX_CTRL_S);
8112 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8113 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8114 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8116 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8118 switch (mdix_ctrl) {
8120 *tp_mdix_ctrl = ETH_TP_MDI;
8123 *tp_mdix_ctrl = ETH_TP_MDI_X;
8126 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8129 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8134 *tp_mdix = ETH_TP_MDI_INVALID;
8136 *tp_mdix = ETH_TP_MDI_X;
8138 *tp_mdix = ETH_TP_MDI;
8141 static void hclge_info_show(struct hclge_dev *hdev)
8143 struct device *dev = &hdev->pdev->dev;
8145 dev_info(dev, "PF info begin:\n");
8147 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8148 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8149 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8150 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8151 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8152 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8153 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8154 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8155 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8156 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8157 dev_info(dev, "This is %s PF\n",
8158 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8159 dev_info(dev, "DCB %s\n",
8160 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8161 dev_info(dev, "MQPRIO %s\n",
8162 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8164 dev_info(dev, "PF info end.\n");
8167 static int hclge_init_client_instance(struct hnae3_client *client,
8168 struct hnae3_ae_dev *ae_dev)
8170 struct hclge_dev *hdev = ae_dev->priv;
8171 struct hclge_vport *vport;
8174 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8175 vport = &hdev->vport[i];
8177 switch (client->type) {
8178 case HNAE3_CLIENT_KNIC:
8180 hdev->nic_client = client;
8181 vport->nic.client = client;
8182 ret = client->ops->init_instance(&vport->nic);
8186 hnae3_set_client_init_flag(client, ae_dev, 1);
8188 if (netif_msg_drv(&hdev->vport->nic))
8189 hclge_info_show(hdev);
8191 if (hdev->roce_client &&
8192 hnae3_dev_roce_supported(hdev)) {
8193 struct hnae3_client *rc = hdev->roce_client;
8195 ret = hclge_init_roce_base_info(vport);
8199 ret = rc->ops->init_instance(&vport->roce);
8203 hnae3_set_client_init_flag(hdev->roce_client,
8208 case HNAE3_CLIENT_UNIC:
8209 hdev->nic_client = client;
8210 vport->nic.client = client;
8212 ret = client->ops->init_instance(&vport->nic);
8216 hnae3_set_client_init_flag(client, ae_dev, 1);
8219 case HNAE3_CLIENT_ROCE:
8220 if (hnae3_dev_roce_supported(hdev)) {
8221 hdev->roce_client = client;
8222 vport->roce.client = client;
8225 if (hdev->roce_client && hdev->nic_client) {
8226 ret = hclge_init_roce_base_info(vport);
8230 ret = client->ops->init_instance(&vport->roce);
8234 hnae3_set_client_init_flag(client, ae_dev, 1);
8246 hdev->nic_client = NULL;
8247 vport->nic.client = NULL;
8250 hdev->roce_client = NULL;
8251 vport->roce.client = NULL;
8255 static void hclge_uninit_client_instance(struct hnae3_client *client,
8256 struct hnae3_ae_dev *ae_dev)
8258 struct hclge_dev *hdev = ae_dev->priv;
8259 struct hclge_vport *vport;
8262 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8263 vport = &hdev->vport[i];
8264 if (hdev->roce_client) {
8265 hdev->roce_client->ops->uninit_instance(&vport->roce,
8267 hdev->roce_client = NULL;
8268 vport->roce.client = NULL;
8270 if (client->type == HNAE3_CLIENT_ROCE)
8272 if (hdev->nic_client && client->ops->uninit_instance) {
8273 client->ops->uninit_instance(&vport->nic, 0);
8274 hdev->nic_client = NULL;
8275 vport->nic.client = NULL;
8280 static int hclge_pci_init(struct hclge_dev *hdev)
8282 struct pci_dev *pdev = hdev->pdev;
8283 struct hclge_hw *hw;
8286 ret = pci_enable_device(pdev);
8288 dev_err(&pdev->dev, "failed to enable PCI device\n");
8292 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8294 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8297 "can't set consistent PCI DMA");
8298 goto err_disable_device;
8300 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8303 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8305 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8306 goto err_disable_device;
8309 pci_set_master(pdev);
8311 hw->io_base = pcim_iomap(pdev, 2, 0);
8313 dev_err(&pdev->dev, "Can't map configuration register space\n");
8315 goto err_clr_master;
8318 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8322 pci_clear_master(pdev);
8323 pci_release_regions(pdev);
8325 pci_disable_device(pdev);
8330 static void hclge_pci_uninit(struct hclge_dev *hdev)
8332 struct pci_dev *pdev = hdev->pdev;
8334 pcim_iounmap(pdev, hdev->hw.io_base);
8335 pci_free_irq_vectors(pdev);
8336 pci_clear_master(pdev);
8337 pci_release_mem_regions(pdev);
8338 pci_disable_device(pdev);
8341 static void hclge_state_init(struct hclge_dev *hdev)
8343 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8344 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8345 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8346 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8347 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8348 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8351 static void hclge_state_uninit(struct hclge_dev *hdev)
8353 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8355 if (hdev->service_timer.function)
8356 del_timer_sync(&hdev->service_timer);
8357 if (hdev->reset_timer.function)
8358 del_timer_sync(&hdev->reset_timer);
8359 if (hdev->service_task.func)
8360 cancel_work_sync(&hdev->service_task);
8361 if (hdev->rst_service_task.func)
8362 cancel_work_sync(&hdev->rst_service_task);
8363 if (hdev->mbx_service_task.func)
8364 cancel_work_sync(&hdev->mbx_service_task);
8367 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8369 #define HCLGE_FLR_WAIT_MS 100
8370 #define HCLGE_FLR_WAIT_CNT 50
8371 struct hclge_dev *hdev = ae_dev->priv;
8374 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8375 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8376 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8377 hclge_reset_event(hdev->pdev, NULL);
8379 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8380 cnt++ < HCLGE_FLR_WAIT_CNT)
8381 msleep(HCLGE_FLR_WAIT_MS);
8383 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8384 dev_err(&hdev->pdev->dev,
8385 "flr wait down timeout: %d\n", cnt);
8388 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8390 struct hclge_dev *hdev = ae_dev->priv;
8392 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8395 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8397 struct pci_dev *pdev = ae_dev->pdev;
8398 struct hclge_dev *hdev;
8401 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8408 hdev->ae_dev = ae_dev;
8409 hdev->reset_type = HNAE3_NONE_RESET;
8410 hdev->reset_level = HNAE3_FUNC_RESET;
8411 ae_dev->priv = hdev;
8412 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8414 mutex_init(&hdev->vport_lock);
8415 mutex_init(&hdev->vport_cfg_mutex);
8416 spin_lock_init(&hdev->fd_rule_lock);
8418 ret = hclge_pci_init(hdev);
8420 dev_err(&pdev->dev, "PCI init failed\n");
8424 /* Firmware command queue initialize */
8425 ret = hclge_cmd_queue_init(hdev);
8427 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8428 goto err_pci_uninit;
8431 /* Firmware command initialize */
8432 ret = hclge_cmd_init(hdev);
8434 goto err_cmd_uninit;
8436 ret = hclge_get_cap(hdev);
8438 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8440 goto err_cmd_uninit;
8443 ret = hclge_configure(hdev);
8445 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8446 goto err_cmd_uninit;
8449 ret = hclge_init_msi(hdev);
8451 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8452 goto err_cmd_uninit;
8455 ret = hclge_misc_irq_init(hdev);
8458 "Misc IRQ(vector0) init error, ret = %d.\n",
8460 goto err_msi_uninit;
8463 ret = hclge_alloc_tqps(hdev);
8465 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8466 goto err_msi_irq_uninit;
8469 ret = hclge_alloc_vport(hdev);
8471 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8472 goto err_msi_irq_uninit;
8475 ret = hclge_map_tqp(hdev);
8477 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8478 goto err_msi_irq_uninit;
8481 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8482 ret = hclge_mac_mdio_config(hdev);
8484 dev_err(&hdev->pdev->dev,
8485 "mdio config fail ret=%d\n", ret);
8486 goto err_msi_irq_uninit;
8490 ret = hclge_init_umv_space(hdev);
8492 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8493 goto err_mdiobus_unreg;
8496 ret = hclge_mac_init(hdev);
8498 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8499 goto err_mdiobus_unreg;
8502 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8504 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8505 goto err_mdiobus_unreg;
8508 ret = hclge_config_gro(hdev, true);
8510 goto err_mdiobus_unreg;
8512 ret = hclge_init_vlan_config(hdev);
8514 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8515 goto err_mdiobus_unreg;
8518 ret = hclge_tm_schd_init(hdev);
8520 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8521 goto err_mdiobus_unreg;
8524 hclge_rss_init_cfg(hdev);
8525 ret = hclge_rss_init_hw(hdev);
8527 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8528 goto err_mdiobus_unreg;
8531 ret = init_mgr_tbl(hdev);
8533 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8534 goto err_mdiobus_unreg;
8537 ret = hclge_init_fd_config(hdev);
8540 "fd table init fail, ret=%d\n", ret);
8541 goto err_mdiobus_unreg;
8544 ret = hclge_hw_error_set_state(hdev, true);
8547 "fail(%d) to enable hw error interrupts\n", ret);
8548 goto err_mdiobus_unreg;
8551 INIT_KFIFO(hdev->mac_tnl_log);
8553 hclge_dcb_ops_set(hdev);
8555 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8556 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8557 INIT_WORK(&hdev->service_task, hclge_service_task);
8558 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8559 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8561 hclge_clear_all_event_cause(hdev);
8563 /* Enable MISC vector(vector0) */
8564 hclge_enable_vector(&hdev->misc_vector, true);
8566 hclge_state_init(hdev);
8567 hdev->last_reset_time = jiffies;
8569 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8573 if (hdev->hw.mac.phydev)
8574 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8576 hclge_misc_irq_uninit(hdev);
8578 pci_free_irq_vectors(pdev);
8580 hclge_cmd_uninit(hdev);
8582 pcim_iounmap(pdev, hdev->hw.io_base);
8583 pci_clear_master(pdev);
8584 pci_release_regions(pdev);
8585 pci_disable_device(pdev);
8590 static void hclge_stats_clear(struct hclge_dev *hdev)
8592 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8595 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8597 struct hclge_vport *vport = hdev->vport;
8600 for (i = 0; i < hdev->num_alloc_vport; i++) {
8601 hclge_vport_stop(vport);
8606 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8608 struct hclge_dev *hdev = ae_dev->priv;
8609 struct pci_dev *pdev = ae_dev->pdev;
8612 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8614 hclge_stats_clear(hdev);
8615 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8617 ret = hclge_cmd_init(hdev);
8619 dev_err(&pdev->dev, "Cmd queue init failed\n");
8623 ret = hclge_map_tqp(hdev);
8625 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8629 hclge_reset_umv_space(hdev);
8631 ret = hclge_mac_init(hdev);
8633 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8637 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8639 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8643 ret = hclge_config_gro(hdev, true);
8647 ret = hclge_init_vlan_config(hdev);
8649 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8653 ret = hclge_tm_init_hw(hdev, true);
8655 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8659 ret = hclge_rss_init_hw(hdev);
8661 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8665 ret = hclge_init_fd_config(hdev);
8668 "fd table init fail, ret=%d\n", ret);
8672 /* Re-enable the hw error interrupts because
8673 * the interrupts get disabled on core/global reset.
8675 ret = hclge_hw_error_set_state(hdev, true);
8678 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8682 hclge_reset_vport_state(hdev);
8684 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8690 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8692 struct hclge_dev *hdev = ae_dev->priv;
8693 struct hclge_mac *mac = &hdev->hw.mac;
8695 hclge_state_uninit(hdev);
8698 mdiobus_unregister(mac->mdio_bus);
8700 hclge_uninit_umv_space(hdev);
8702 /* Disable MISC vector(vector0) */
8703 hclge_enable_vector(&hdev->misc_vector, false);
8704 synchronize_irq(hdev->misc_vector.vector_irq);
8706 hclge_config_mac_tnl_int(hdev, false);
8707 hclge_hw_error_set_state(hdev, false);
8708 hclge_cmd_uninit(hdev);
8709 hclge_misc_irq_uninit(hdev);
8710 hclge_pci_uninit(hdev);
8711 mutex_destroy(&hdev->vport_lock);
8712 hclge_uninit_vport_mac_table(hdev);
8713 hclge_uninit_vport_vlan_table(hdev);
8714 mutex_destroy(&hdev->vport_cfg_mutex);
8715 ae_dev->priv = NULL;
8718 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8720 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8721 struct hclge_vport *vport = hclge_get_vport(handle);
8722 struct hclge_dev *hdev = vport->back;
8724 return min_t(u32, hdev->rss_size_max,
8725 vport->alloc_tqps / kinfo->num_tc);
8728 static void hclge_get_channels(struct hnae3_handle *handle,
8729 struct ethtool_channels *ch)
8731 ch->max_combined = hclge_get_max_channels(handle);
8732 ch->other_count = 1;
8734 ch->combined_count = handle->kinfo.rss_size;
8737 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8738 u16 *alloc_tqps, u16 *max_rss_size)
8740 struct hclge_vport *vport = hclge_get_vport(handle);
8741 struct hclge_dev *hdev = vport->back;
8743 *alloc_tqps = vport->alloc_tqps;
8744 *max_rss_size = hdev->rss_size_max;
8747 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8748 bool rxfh_configured)
8750 struct hclge_vport *vport = hclge_get_vport(handle);
8751 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8752 struct hclge_dev *hdev = vport->back;
8753 int cur_rss_size = kinfo->rss_size;
8754 int cur_tqps = kinfo->num_tqps;
8755 u16 tc_offset[HCLGE_MAX_TC_NUM];
8756 u16 tc_valid[HCLGE_MAX_TC_NUM];
8757 u16 tc_size[HCLGE_MAX_TC_NUM];
8762 kinfo->req_rss_size = new_tqps_num;
8764 ret = hclge_tm_vport_map_update(hdev);
8766 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8770 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8771 roundup_size = ilog2(roundup_size);
8772 /* Set the RSS TC mode according to the new RSS size */
8773 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8776 if (!(hdev->hw_tc_map & BIT(i)))
8780 tc_size[i] = roundup_size;
8781 tc_offset[i] = kinfo->rss_size * i;
8783 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8787 /* RSS indirection table has been configuared by user */
8788 if (rxfh_configured)
8791 /* Reinitializes the rss indirect table according to the new RSS size */
8792 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8796 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8797 rss_indir[i] = i % kinfo->rss_size;
8799 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8801 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8808 dev_info(&hdev->pdev->dev,
8809 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8810 cur_rss_size, kinfo->rss_size,
8811 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8816 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8817 u32 *regs_num_64_bit)
8819 struct hclge_desc desc;
8823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8824 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8826 dev_err(&hdev->pdev->dev,
8827 "Query register number cmd failed, ret = %d.\n", ret);
8831 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8832 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8834 total_num = *regs_num_32_bit + *regs_num_64_bit;
8841 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8844 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8846 struct hclge_desc *desc;
8847 u32 *reg_val = data;
8856 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8857 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8861 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8862 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8864 dev_err(&hdev->pdev->dev,
8865 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8870 for (i = 0; i < cmd_num; i++) {
8872 desc_data = (__le32 *)(&desc[i].data[0]);
8873 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8875 desc_data = (__le32 *)(&desc[i]);
8876 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8878 for (k = 0; k < n; k++) {
8879 *reg_val++ = le32_to_cpu(*desc_data++);
8891 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8894 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8896 struct hclge_desc *desc;
8897 u64 *reg_val = data;
8906 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8907 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8911 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8912 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8914 dev_err(&hdev->pdev->dev,
8915 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8920 for (i = 0; i < cmd_num; i++) {
8922 desc_data = (__le64 *)(&desc[i].data[0]);
8923 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8925 desc_data = (__le64 *)(&desc[i]);
8926 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8928 for (k = 0; k < n; k++) {
8929 *reg_val++ = le64_to_cpu(*desc_data++);
8941 #define MAX_SEPARATE_NUM 4
8942 #define SEPARATOR_VALUE 0xFFFFFFFF
8943 #define REG_NUM_PER_LINE 4
8944 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8946 static int hclge_get_regs_len(struct hnae3_handle *handle)
8948 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8949 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8950 struct hclge_vport *vport = hclge_get_vport(handle);
8951 struct hclge_dev *hdev = vport->back;
8952 u32 regs_num_32_bit, regs_num_64_bit;
8955 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8957 dev_err(&hdev->pdev->dev,
8958 "Get register number failed, ret = %d.\n", ret);
8962 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8963 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8964 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8965 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8967 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8968 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8969 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8972 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8975 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8976 struct hclge_vport *vport = hclge_get_vport(handle);
8977 struct hclge_dev *hdev = vport->back;
8978 u32 regs_num_32_bit, regs_num_64_bit;
8979 int i, j, reg_um, separator_num;
8983 *version = hdev->fw_version;
8985 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8987 dev_err(&hdev->pdev->dev,
8988 "Get register number failed, ret = %d.\n", ret);
8992 /* fetching per-PF registers valus from PF PCIe register space */
8993 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8994 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8995 for (i = 0; i < reg_um; i++)
8996 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8997 for (i = 0; i < separator_num; i++)
8998 *reg++ = SEPARATOR_VALUE;
9000 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9001 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9002 for (i = 0; i < reg_um; i++)
9003 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9004 for (i = 0; i < separator_num; i++)
9005 *reg++ = SEPARATOR_VALUE;
9007 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9008 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9009 for (j = 0; j < kinfo->num_tqps; j++) {
9010 for (i = 0; i < reg_um; i++)
9011 *reg++ = hclge_read_dev(&hdev->hw,
9012 ring_reg_addr_list[i] +
9014 for (i = 0; i < separator_num; i++)
9015 *reg++ = SEPARATOR_VALUE;
9018 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9019 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9020 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9021 for (i = 0; i < reg_um; i++)
9022 *reg++ = hclge_read_dev(&hdev->hw,
9023 tqp_intr_reg_addr_list[i] +
9025 for (i = 0; i < separator_num; i++)
9026 *reg++ = SEPARATOR_VALUE;
9029 /* fetching PF common registers values from firmware */
9030 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9032 dev_err(&hdev->pdev->dev,
9033 "Get 32 bit register failed, ret = %d.\n", ret);
9037 reg += regs_num_32_bit;
9038 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9040 dev_err(&hdev->pdev->dev,
9041 "Get 64 bit register failed, ret = %d.\n", ret);
9044 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9046 struct hclge_set_led_state_cmd *req;
9047 struct hclge_desc desc;
9050 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9052 req = (struct hclge_set_led_state_cmd *)desc.data;
9053 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9054 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9056 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9058 dev_err(&hdev->pdev->dev,
9059 "Send set led state cmd error, ret =%d\n", ret);
9064 enum hclge_led_status {
9067 HCLGE_LED_NO_CHANGE = 0xFF,
9070 static int hclge_set_led_id(struct hnae3_handle *handle,
9071 enum ethtool_phys_id_state status)
9073 struct hclge_vport *vport = hclge_get_vport(handle);
9074 struct hclge_dev *hdev = vport->back;
9077 case ETHTOOL_ID_ACTIVE:
9078 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9079 case ETHTOOL_ID_INACTIVE:
9080 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9086 static void hclge_get_link_mode(struct hnae3_handle *handle,
9087 unsigned long *supported,
9088 unsigned long *advertising)
9090 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9091 struct hclge_vport *vport = hclge_get_vport(handle);
9092 struct hclge_dev *hdev = vport->back;
9093 unsigned int idx = 0;
9095 for (; idx < size; idx++) {
9096 supported[idx] = hdev->hw.mac.supported[idx];
9097 advertising[idx] = hdev->hw.mac.advertising[idx];
9101 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9103 struct hclge_vport *vport = hclge_get_vport(handle);
9104 struct hclge_dev *hdev = vport->back;
9106 return hclge_config_gro(hdev, enable);
9109 static const struct hnae3_ae_ops hclge_ops = {
9110 .init_ae_dev = hclge_init_ae_dev,
9111 .uninit_ae_dev = hclge_uninit_ae_dev,
9112 .flr_prepare = hclge_flr_prepare,
9113 .flr_done = hclge_flr_done,
9114 .init_client_instance = hclge_init_client_instance,
9115 .uninit_client_instance = hclge_uninit_client_instance,
9116 .map_ring_to_vector = hclge_map_ring_to_vector,
9117 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9118 .get_vector = hclge_get_vector,
9119 .put_vector = hclge_put_vector,
9120 .set_promisc_mode = hclge_set_promisc_mode,
9121 .set_loopback = hclge_set_loopback,
9122 .start = hclge_ae_start,
9123 .stop = hclge_ae_stop,
9124 .client_start = hclge_client_start,
9125 .client_stop = hclge_client_stop,
9126 .get_status = hclge_get_status,
9127 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9128 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9129 .get_media_type = hclge_get_media_type,
9130 .check_port_speed = hclge_check_port_speed,
9131 .get_fec = hclge_get_fec,
9132 .set_fec = hclge_set_fec,
9133 .get_rss_key_size = hclge_get_rss_key_size,
9134 .get_rss_indir_size = hclge_get_rss_indir_size,
9135 .get_rss = hclge_get_rss,
9136 .set_rss = hclge_set_rss,
9137 .set_rss_tuple = hclge_set_rss_tuple,
9138 .get_rss_tuple = hclge_get_rss_tuple,
9139 .get_tc_size = hclge_get_tc_size,
9140 .get_mac_addr = hclge_get_mac_addr,
9141 .set_mac_addr = hclge_set_mac_addr,
9142 .do_ioctl = hclge_do_ioctl,
9143 .add_uc_addr = hclge_add_uc_addr,
9144 .rm_uc_addr = hclge_rm_uc_addr,
9145 .add_mc_addr = hclge_add_mc_addr,
9146 .rm_mc_addr = hclge_rm_mc_addr,
9147 .set_autoneg = hclge_set_autoneg,
9148 .get_autoneg = hclge_get_autoneg,
9149 .restart_autoneg = hclge_restart_autoneg,
9150 .get_pauseparam = hclge_get_pauseparam,
9151 .set_pauseparam = hclge_set_pauseparam,
9152 .set_mtu = hclge_set_mtu,
9153 .reset_queue = hclge_reset_tqp,
9154 .get_stats = hclge_get_stats,
9155 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9156 .update_stats = hclge_update_stats,
9157 .get_strings = hclge_get_strings,
9158 .get_sset_count = hclge_get_sset_count,
9159 .get_fw_version = hclge_get_fw_version,
9160 .get_mdix_mode = hclge_get_mdix_mode,
9161 .enable_vlan_filter = hclge_enable_vlan_filter,
9162 .set_vlan_filter = hclge_set_vlan_filter,
9163 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9164 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9165 .reset_event = hclge_reset_event,
9166 .set_default_reset_request = hclge_set_def_reset_request,
9167 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9168 .set_channels = hclge_set_channels,
9169 .get_channels = hclge_get_channels,
9170 .get_regs_len = hclge_get_regs_len,
9171 .get_regs = hclge_get_regs,
9172 .set_led_id = hclge_set_led_id,
9173 .get_link_mode = hclge_get_link_mode,
9174 .add_fd_entry = hclge_add_fd_entry,
9175 .del_fd_entry = hclge_del_fd_entry,
9176 .del_all_fd_entries = hclge_del_all_fd_entries,
9177 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9178 .get_fd_rule_info = hclge_get_fd_rule_info,
9179 .get_fd_all_rules = hclge_get_all_rules,
9180 .restore_fd_rules = hclge_restore_fd_entries,
9181 .enable_fd = hclge_enable_fd,
9182 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9183 .dbg_run_cmd = hclge_dbg_run_cmd,
9184 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9185 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9186 .ae_dev_resetting = hclge_ae_dev_resetting,
9187 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9188 .set_gro_en = hclge_gro_en,
9189 .get_global_queue_id = hclge_covert_handle_qid_global,
9190 .set_timer_task = hclge_set_timer_task,
9191 .mac_connect_phy = hclge_mac_connect_phy,
9192 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9195 static struct hnae3_ae_algo ae_algo = {
9197 .pdev_id_table = ae_algo_pci_tbl,
9200 static int hclge_init(void)
9202 pr_info("%s is initializing\n", HCLGE_NAME);
9204 hnae3_register_ae_algo(&ae_algo);
9209 static void hclge_exit(void)
9211 hnae3_unregister_ae_algo(&ae_algo);
9213 module_init(hclge_init);
9214 module_exit(hclge_exit);
9216 MODULE_LICENSE("GPL");
9217 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9218 MODULE_DESCRIPTION("HCLGE Driver");
9219 MODULE_VERSION(HCLGE_MOD_VERSION);