1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
41 static struct hnae3_ae_algo ae_algo;
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51 /* required last entry */
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58 HCLGE_CMDQ_TX_ADDR_H_REG,
59 HCLGE_CMDQ_TX_DEPTH_REG,
60 HCLGE_CMDQ_TX_TAIL_REG,
61 HCLGE_CMDQ_TX_HEAD_REG,
62 HCLGE_CMDQ_RX_ADDR_L_REG,
63 HCLGE_CMDQ_RX_ADDR_H_REG,
64 HCLGE_CMDQ_RX_DEPTH_REG,
65 HCLGE_CMDQ_RX_TAIL_REG,
66 HCLGE_CMDQ_RX_HEAD_REG,
67 HCLGE_VECTOR0_CMDQ_SRC_REG,
68 HCLGE_CMDQ_INTR_STS_REG,
69 HCLGE_CMDQ_INTR_EN_REG,
70 HCLGE_CMDQ_INTR_GEN_REG};
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73 HCLGE_VECTOR0_OTER_EN_REG,
74 HCLGE_MISC_RESET_STS_REG,
75 HCLGE_MISC_VECTOR_INT_STS,
76 HCLGE_GLOBAL_RESET_REG,
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81 HCLGE_RING_RX_ADDR_H_REG,
82 HCLGE_RING_RX_BD_NUM_REG,
83 HCLGE_RING_RX_BD_LENGTH_REG,
84 HCLGE_RING_RX_MERGE_EN_REG,
85 HCLGE_RING_RX_TAIL_REG,
86 HCLGE_RING_RX_HEAD_REG,
87 HCLGE_RING_RX_FBD_NUM_REG,
88 HCLGE_RING_RX_OFFSET_REG,
89 HCLGE_RING_RX_FBD_OFFSET_REG,
90 HCLGE_RING_RX_STASH_REG,
91 HCLGE_RING_RX_BD_ERR_REG,
92 HCLGE_RING_TX_ADDR_L_REG,
93 HCLGE_RING_TX_ADDR_H_REG,
94 HCLGE_RING_TX_BD_NUM_REG,
95 HCLGE_RING_TX_PRIORITY_REG,
97 HCLGE_RING_TX_MERGE_EN_REG,
98 HCLGE_RING_TX_TAIL_REG,
99 HCLGE_RING_TX_HEAD_REG,
100 HCLGE_RING_TX_FBD_NUM_REG,
101 HCLGE_RING_TX_OFFSET_REG,
102 HCLGE_RING_TX_EBD_NUM_REG,
103 HCLGE_RING_TX_EBD_OFFSET_REG,
104 HCLGE_RING_TX_BD_ERR_REG,
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108 HCLGE_TQP_INTR_GL0_REG,
109 HCLGE_TQP_INTR_GL1_REG,
110 HCLGE_TQP_INTR_GL2_REG,
111 HCLGE_TQP_INTR_RL_REG};
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
115 "Serdes serial Loopback test",
116 "Serdes parallel Loopback test",
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121 {"mac_tx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123 {"mac_rx_mac_pause_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125 {"mac_tx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127 {"mac_rx_control_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129 {"mac_tx_pfc_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131 {"mac_tx_pfc_pri0_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133 {"mac_tx_pfc_pri1_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135 {"mac_tx_pfc_pri2_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137 {"mac_tx_pfc_pri3_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139 {"mac_tx_pfc_pri4_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141 {"mac_tx_pfc_pri5_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143 {"mac_tx_pfc_pri6_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145 {"mac_tx_pfc_pri7_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147 {"mac_rx_pfc_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149 {"mac_rx_pfc_pri0_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151 {"mac_rx_pfc_pri1_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153 {"mac_rx_pfc_pri2_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155 {"mac_rx_pfc_pri3_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157 {"mac_rx_pfc_pri4_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159 {"mac_rx_pfc_pri5_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161 {"mac_rx_pfc_pri6_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163 {"mac_rx_pfc_pri7_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165 {"mac_tx_total_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167 {"mac_tx_total_oct_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169 {"mac_tx_good_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171 {"mac_tx_bad_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173 {"mac_tx_good_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175 {"mac_tx_bad_oct_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177 {"mac_tx_uni_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179 {"mac_tx_multi_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181 {"mac_tx_broad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183 {"mac_tx_undersize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185 {"mac_tx_oversize_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187 {"mac_tx_64_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189 {"mac_tx_65_127_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191 {"mac_tx_128_255_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193 {"mac_tx_256_511_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195 {"mac_tx_512_1023_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197 {"mac_tx_1024_1518_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199 {"mac_tx_1519_2047_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201 {"mac_tx_2048_4095_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203 {"mac_tx_4096_8191_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205 {"mac_tx_8192_9216_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207 {"mac_tx_9217_12287_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209 {"mac_tx_12288_16383_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211 {"mac_tx_1519_max_good_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213 {"mac_tx_1519_max_bad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215 {"mac_rx_total_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217 {"mac_rx_total_oct_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219 {"mac_rx_good_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221 {"mac_rx_bad_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223 {"mac_rx_good_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225 {"mac_rx_bad_oct_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227 {"mac_rx_uni_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229 {"mac_rx_multi_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231 {"mac_rx_broad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233 {"mac_rx_undersize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235 {"mac_rx_oversize_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237 {"mac_rx_64_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239 {"mac_rx_65_127_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241 {"mac_rx_128_255_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243 {"mac_rx_256_511_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245 {"mac_rx_512_1023_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247 {"mac_rx_1024_1518_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249 {"mac_rx_1519_2047_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251 {"mac_rx_2048_4095_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253 {"mac_rx_4096_8191_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255 {"mac_rx_8192_9216_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257 {"mac_rx_9217_12287_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259 {"mac_rx_12288_16383_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261 {"mac_rx_1519_max_good_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263 {"mac_rx_1519_max_bad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
266 {"mac_tx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268 {"mac_tx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270 {"mac_tx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272 {"mac_tx_err_all_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274 {"mac_tx_from_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276 {"mac_tx_from_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278 {"mac_rx_fragment_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280 {"mac_rx_undermin_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282 {"mac_rx_jabber_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284 {"mac_rx_fcs_err_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286 {"mac_rx_send_app_good_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288 {"mac_rx_send_app_bad_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
294 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298 .i_port_bitmap = 0x1,
302 static const u8 hclge_hash_key[] = {
303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
312 #define HCLGE_MAC_CMD_NUM 21
314 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
323 dev_err(&hdev->pdev->dev,
324 "Get MAC pkt stats fail, status = %d.\n", ret);
329 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330 /* for special opcode 0032, only the first desc has the head */
331 if (unlikely(i == 0)) {
332 desc_data = (__le64 *)(&desc[i].data[0]);
333 n = HCLGE_RD_FIRST_STATS_NUM;
335 desc_data = (__le64 *)(&desc[i]);
336 n = HCLGE_RD_OTHER_STATS_NUM;
339 for (k = 0; k < n; k++) {
340 *data += le64_to_cpu(*desc_data);
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
351 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352 struct hclge_desc *desc;
357 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
360 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
367 for (i = 0; i < desc_num; i++) {
368 /* for special opcode 0034, only the first desc has the head */
370 desc_data = (__le64 *)(&desc[i].data[0]);
371 n = HCLGE_RD_FIRST_STATS_NUM;
373 desc_data = (__le64 *)(&desc[i]);
374 n = HCLGE_RD_OTHER_STATS_NUM;
377 for (k = 0; k < n; k++) {
378 *data += le64_to_cpu(*desc_data);
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
391 struct hclge_desc desc;
396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
401 desc_data = (__le32 *)(&desc.data[0]);
402 reg_num = le32_to_cpu(*desc_data);
404 *desc_num = 1 + ((reg_num - 3) >> 2) +
405 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 ret = hclge_mac_query_reg_num(hdev, &desc_num);
417 /* The firmware supports the new statistics acquisition method */
419 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420 else if (ret == -EOPNOTSUPP)
421 ret = hclge_mac_update_stats_defective(hdev);
423 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
430 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431 struct hclge_vport *vport = hclge_get_vport(handle);
432 struct hclge_dev *hdev = vport->back;
433 struct hnae3_queue *queue;
434 struct hclge_desc desc[1];
435 struct hclge_tqp *tqp;
438 for (i = 0; i < kinfo->num_tqps; i++) {
439 queue = handle->kinfo.tqp[i];
440 tqp = container_of(queue, struct hclge_tqp, q);
441 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442 hclge_cmd_setup_basic_desc(&desc[0],
443 HCLGE_OPC_QUERY_RX_STATUS,
446 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447 ret = hclge_cmd_send(&hdev->hw, desc, 1);
449 dev_err(&hdev->pdev->dev,
450 "Query tqp stat fail, status = %d,queue = %d\n",
454 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455 le32_to_cpu(desc[0].data[1]);
458 for (i = 0; i < kinfo->num_tqps; i++) {
459 queue = handle->kinfo.tqp[i];
460 tqp = container_of(queue, struct hclge_tqp, q);
461 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462 hclge_cmd_setup_basic_desc(&desc[0],
463 HCLGE_OPC_QUERY_TX_STATUS,
466 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467 ret = hclge_cmd_send(&hdev->hw, desc, 1);
469 dev_err(&hdev->pdev->dev,
470 "Query tqp stat fail, status = %d,queue = %d\n",
474 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475 le32_to_cpu(desc[0].data[1]);
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
483 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484 struct hclge_tqp *tqp;
488 for (i = 0; i < kinfo->num_tqps; i++) {
489 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
493 for (i = 0; i < kinfo->num_tqps; i++) {
494 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
505 return kinfo->num_tqps * (2);
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
510 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 for (i = 0; i < kinfo->num_tqps; i++) {
515 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516 struct hclge_tqp, q);
517 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
519 buff = buff + ETH_GSTRING_LEN;
522 for (i = 0; i < kinfo->num_tqps; i++) {
523 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524 struct hclge_tqp, q);
525 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
527 buff = buff + ETH_GSTRING_LEN;
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534 const struct hclge_comm_stats_str strs[],
540 for (i = 0; i < size; i++)
541 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546 static u8 *hclge_comm_get_strings(u32 stringset,
547 const struct hclge_comm_stats_str strs[],
550 char *buff = (char *)data;
553 if (stringset != ETH_SS_STATS)
556 for (i = 0; i < size; i++) {
557 snprintf(buff, ETH_GSTRING_LEN,
559 buff = buff + ETH_GSTRING_LEN;
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
567 struct hnae3_handle *handle;
570 handle = &hdev->vport[0].nic;
571 if (handle->client) {
572 status = hclge_tqps_update_stats(handle);
574 dev_err(&hdev->pdev->dev,
575 "Update TQPS stats fail, status = %d.\n",
580 status = hclge_mac_update_stats(hdev);
582 dev_err(&hdev->pdev->dev,
583 "Update MAC stats fail, status = %d.\n", status);
586 static void hclge_update_stats(struct hnae3_handle *handle,
587 struct net_device_stats *net_stats)
589 struct hclge_vport *vport = hclge_get_vport(handle);
590 struct hclge_dev *hdev = vport->back;
593 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
596 status = hclge_mac_update_stats(hdev);
598 dev_err(&hdev->pdev->dev,
599 "Update MAC stats fail, status = %d.\n",
602 status = hclge_tqps_update_stats(handle);
604 dev_err(&hdev->pdev->dev,
605 "Update TQPS stats fail, status = %d.\n",
608 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614 HNAE3_SUPPORT_PHY_LOOPBACK |\
615 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
618 struct hclge_vport *vport = hclge_get_vport(handle);
619 struct hclge_dev *hdev = vport->back;
622 /* Loopback test support rules:
623 * mac: only GE mode support
624 * serdes: all mac mode will support include GE/XGE/LGE/CGE
625 * phy: only support when phy device exist on board
627 if (stringset == ETH_SS_TEST) {
628 /* clear loopback bit flags at first */
629 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630 if (hdev->pdev->revision >= 0x21 ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
635 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
639 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641 } else if (stringset == ETH_SS_STATS) {
642 count = ARRAY_SIZE(g_mac_stats_string) +
643 hclge_tqps_get_sset_count(handle, stringset);
649 static void hclge_get_strings(struct hnae3_handle *handle,
653 u8 *p = (char *)data;
656 if (stringset == ETH_SS_STATS) {
657 size = ARRAY_SIZE(g_mac_stats_string);
658 p = hclge_comm_get_strings(stringset,
662 p = hclge_tqps_get_strings(handle, p);
663 } else if (stringset == ETH_SS_TEST) {
664 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666 hns3_nic_test_strs[HNAE3_LOOP_APP],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
672 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
678 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
680 p += ETH_GSTRING_LEN;
682 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684 hns3_nic_test_strs[HNAE3_LOOP_PHY],
686 p += ETH_GSTRING_LEN;
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
697 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
699 ARRAY_SIZE(g_mac_stats_string),
701 p = hclge_tqps_get_stats(handle, p);
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
707 struct hclge_vport *vport = hclge_get_vport(handle);
708 struct hclge_dev *hdev = vport->back;
710 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715 struct hclge_func_status_cmd *status)
717 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
720 /* Set the pf to main pf */
721 if (status->pf_state & HCLGE_PF_STATE_MAIN)
722 hdev->flag |= HCLGE_FLAG_MAIN;
724 hdev->flag &= ~HCLGE_FLAG_MAIN;
729 static int hclge_query_function_status(struct hclge_dev *hdev)
731 struct hclge_func_status_cmd *req;
732 struct hclge_desc desc;
736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737 req = (struct hclge_func_status_cmd *)desc.data;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
742 dev_err(&hdev->pdev->dev,
743 "query function status failed %d.\n",
749 /* Check pf reset is done */
752 usleep_range(1000, 2000);
753 } while (timeout++ < 5);
755 ret = hclge_parse_func_status(hdev, req);
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 struct hclge_pf_res_cmd *req;
763 struct hclge_desc desc;
766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769 dev_err(&hdev->pdev->dev,
770 "query pf resource failed %d.\n", ret);
774 req = (struct hclge_pf_res_cmd *)desc.data;
775 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778 if (req->tx_buf_size)
780 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786 if (req->dv_buf_size)
788 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794 if (hnae3_dev_roce_supported(hdev)) {
795 hdev->roce_base_msix_offset =
796 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 /* PF should have NIC vectors and Roce vectors,
803 * NIC vectors are queued before Roce vectors.
805 hdev->num_msi = hdev->num_roce_msi +
806 hdev->roce_base_msix_offset;
809 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
816 static int hclge_parse_speed(int speed_cmd, int *speed)
820 *speed = HCLGE_MAC_SPEED_10M;
823 *speed = HCLGE_MAC_SPEED_100M;
826 *speed = HCLGE_MAC_SPEED_1G;
829 *speed = HCLGE_MAC_SPEED_10G;
832 *speed = HCLGE_MAC_SPEED_25G;
835 *speed = HCLGE_MAC_SPEED_40G;
838 *speed = HCLGE_MAC_SPEED_50G;
841 *speed = HCLGE_MAC_SPEED_100G;
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 struct hclge_vport *vport = hclge_get_vport(handle);
853 struct hclge_dev *hdev = vport->back;
854 u32 speed_ability = hdev->hw.mac.speed_ability;
858 case HCLGE_MAC_SPEED_10M:
859 speed_bit = HCLGE_SUPPORT_10M_BIT;
861 case HCLGE_MAC_SPEED_100M:
862 speed_bit = HCLGE_SUPPORT_100M_BIT;
864 case HCLGE_MAC_SPEED_1G:
865 speed_bit = HCLGE_SUPPORT_1G_BIT;
867 case HCLGE_MAC_SPEED_10G:
868 speed_bit = HCLGE_SUPPORT_10G_BIT;
870 case HCLGE_MAC_SPEED_25G:
871 speed_bit = HCLGE_SUPPORT_25G_BIT;
873 case HCLGE_MAC_SPEED_40G:
874 speed_bit = HCLGE_SUPPORT_40G_BIT;
876 case HCLGE_MAC_SPEED_50G:
877 speed_bit = HCLGE_SUPPORT_50G_BIT;
879 case HCLGE_MAC_SPEED_100G:
880 speed_bit = HCLGE_SUPPORT_100G_BIT;
886 if (speed_bit & speed_ability)
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 switch (mac->speed) {
977 case HCLGE_MAC_SPEED_10G:
978 case HCLGE_MAC_SPEED_40G:
979 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
982 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984 case HCLGE_MAC_SPEED_25G:
985 case HCLGE_MAC_SPEED_50G:
986 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
989 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
992 case HCLGE_MAC_SPEED_100G:
993 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
997 mac->fec_ability = 0;
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1005 struct hclge_mac *mac = &hdev->hw.mac;
1007 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1011 hclge_convert_setting_sr(mac, speed_ability);
1012 hclge_convert_setting_lr(mac, speed_ability);
1013 hclge_convert_setting_cr(mac, speed_ability);
1014 if (hdev->pdev->revision >= 0x21)
1015 hclge_convert_setting_fec(mac);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1025 struct hclge_mac *mac = &hdev->hw.mac;
1027 hclge_convert_setting_kr(mac, speed_ability);
1028 if (hdev->pdev->revision >= 0x21)
1029 hclge_convert_setting_fec(mac);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1038 unsigned long *supported = hdev->hw.mac.supported;
1040 /* default to support all speed for GE port */
1042 speed_ability = HCLGE_SUPPORT_GE;
1044 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1067 u8 media_type = hdev->hw.mac.media_type;
1069 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072 hclge_parse_copper_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074 hclge_parse_backplane_link_mode(hdev, speed_ability);
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1078 struct hclge_cfg_param_cmd *req;
1079 u64 mac_addr_tmp_high;
1083 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1085 /* get the configuration */
1086 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TQP_DESC_N_M,
1093 HCLGE_CFG_TQP_DESC_N_S);
1095 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096 HCLGE_CFG_PHY_ADDR_M,
1097 HCLGE_CFG_PHY_ADDR_S);
1098 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099 HCLGE_CFG_MEDIA_TP_M,
1100 HCLGE_CFG_MEDIA_TP_S);
1101 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102 HCLGE_CFG_RX_BUF_LEN_M,
1103 HCLGE_CFG_RX_BUF_LEN_S);
1104 /* get mac_address */
1105 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107 HCLGE_CFG_MAC_ADDR_H_M,
1108 HCLGE_CFG_MAC_ADDR_H_S);
1110 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1112 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113 HCLGE_CFG_DEFAULT_SPEED_M,
1114 HCLGE_CFG_DEFAULT_SPEED_S);
1115 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116 HCLGE_CFG_RSS_SIZE_M,
1117 HCLGE_CFG_RSS_SIZE_S);
1119 for (i = 0; i < ETH_ALEN; i++)
1120 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1122 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1125 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126 HCLGE_CFG_SPEED_ABILITY_M,
1127 HCLGE_CFG_SPEED_ABILITY_S);
1128 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129 HCLGE_CFG_UMV_TBL_SPACE_M,
1130 HCLGE_CFG_UMV_TBL_SPACE_S);
1131 if (!cfg->umv_space)
1132 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 /* hclge_get_cfg: query the static parameter from flash
1136 * @hdev: pointer to struct hclge_dev
1137 * @hcfg: the config structure to be getted
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1141 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142 struct hclge_cfg_param_cmd *req;
1145 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1148 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1151 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153 /* Len should be united by 4 bytes when send to hardware */
1154 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156 req->offset = cpu_to_le32(offset);
1159 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1161 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 hclge_parse_cfg(hcfg, desc);
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1174 ret = hclge_query_function_status(hdev);
1176 dev_err(&hdev->pdev->dev,
1177 "query function status error %d.\n", ret);
1181 /* get pf resource */
1182 ret = hclge_query_pf_resource(hdev);
1184 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1191 #define HCLGE_MIN_TX_DESC 64
1192 #define HCLGE_MIN_RX_DESC 64
1194 if (!is_kdump_kernel())
1197 dev_info(&hdev->pdev->dev,
1198 "Running kdump kernel. Using minimal resources\n");
1200 /* minimal queue pairs equals to the number of vports */
1201 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1206 static int hclge_configure(struct hclge_dev *hdev)
1208 struct hclge_cfg cfg;
1211 ret = hclge_get_cfg(hdev, &cfg);
1213 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1217 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218 hdev->base_tqp_pid = 0;
1219 hdev->rss_size_max = cfg.rss_size_max;
1220 hdev->rx_buf_len = cfg.rx_buf_len;
1221 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222 hdev->hw.mac.media_type = cfg.media_type;
1223 hdev->hw.mac.phy_addr = cfg.phy_addr;
1224 hdev->num_tx_desc = cfg.tqp_desc_num;
1225 hdev->num_rx_desc = cfg.tqp_desc_num;
1226 hdev->tm_info.num_pg = 1;
1227 hdev->tc_max = cfg.tc_num;
1228 hdev->tm_info.hw_pfc_map = 0;
1229 hdev->wanted_umv_size = cfg.umv_space;
1231 if (hnae3_dev_fd_supported(hdev)) {
1233 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1236 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1238 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1242 hclge_parse_link_mode(hdev, cfg.speed_ability);
1244 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245 (hdev->tc_max < 1)) {
1246 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 /* Dev does not support DCB */
1252 if (!hnae3_dev_dcb_supported(hdev)) {
1256 hdev->pfc_max = hdev->tc_max;
1259 hdev->tm_info.num_tc = 1;
1261 /* Currently not support uncontiuous tc */
1262 for (i = 0; i < hdev->tm_info.num_tc; i++)
1263 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1265 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1267 hclge_init_kdump_kernel_config(hdev);
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1275 struct hclge_cfg_tso_status_cmd *req;
1276 struct hclge_desc desc;
1279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1281 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1284 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286 req->tso_mss_min = cpu_to_le16(tso_mss);
1289 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291 req->tso_mss_max = cpu_to_le16(tso_mss);
1293 return hclge_cmd_send(&hdev->hw, &desc, 1);
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1298 struct hclge_cfg_gro_status_cmd *req;
1299 struct hclge_desc desc;
1302 if (!hnae3_dev_gro_supported(hdev))
1305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1308 req->gro_en = cpu_to_le16(en ? 1 : 0);
1310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1312 dev_err(&hdev->pdev->dev,
1313 "GRO hardware config cmd failed, ret = %d\n", ret);
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1320 struct hclge_tqp *tqp;
1323 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324 sizeof(struct hclge_tqp), GFP_KERNEL);
1330 for (i = 0; i < hdev->num_tqps; i++) {
1331 tqp->dev = &hdev->pdev->dev;
1334 tqp->q.ae_algo = &ae_algo;
1335 tqp->q.buf_size = hdev->rx_buf_len;
1336 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339 i * HCLGE_TQP_REG_SIZE;
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1350 struct hclge_tqp_map_cmd *req;
1351 struct hclge_desc desc;
1354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1356 req = (struct hclge_tqp_map_cmd *)desc.data;
1357 req->tqp_id = cpu_to_le16(tqp_pid);
1358 req->tqp_vf = func_id;
1359 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360 1 << HCLGE_TQP_MAP_EN_B;
1361 req->tqp_vid = cpu_to_le16(tqp_vid);
1363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1365 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1372 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373 struct hclge_dev *hdev = vport->back;
1376 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377 alloced < num_tqps; i++) {
1378 if (!hdev->htqp[i].alloced) {
1379 hdev->htqp[i].q.handle = &vport->nic;
1380 hdev->htqp[i].q.tqp_index = alloced;
1381 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384 hdev->htqp[i].alloced = true;
1388 vport->alloc_tqps = alloced;
1389 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390 vport->alloc_tqps / hdev->tm_info.num_tc);
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396 u16 num_tx_desc, u16 num_rx_desc)
1399 struct hnae3_handle *nic = &vport->nic;
1400 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401 struct hclge_dev *hdev = vport->back;
1404 kinfo->num_tx_desc = num_tx_desc;
1405 kinfo->num_rx_desc = num_rx_desc;
1407 kinfo->rx_buf_len = hdev->rx_buf_len;
1409 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410 sizeof(struct hnae3_queue *), GFP_KERNEL);
1414 ret = hclge_assign_tqp(vport, num_tqps);
1416 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422 struct hclge_vport *vport)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hnae3_knic_private_info *kinfo;
1428 kinfo = &nic->kinfo;
1429 for (i = 0; i < vport->alloc_tqps; i++) {
1430 struct hclge_tqp *q =
1431 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1435 is_pf = !(vport->vport_id);
1436 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1447 struct hclge_vport *vport = hdev->vport;
1450 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451 for (i = 0; i < num_vport; i++) {
1454 ret = hclge_map_tqp_to_vport(hdev, vport);
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1466 /* this would be initialized later */
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1471 struct hnae3_handle *nic = &vport->nic;
1472 struct hclge_dev *hdev = vport->back;
1475 nic->pdev = hdev->pdev;
1476 nic->ae_algo = &ae_algo;
1477 nic->numa_node_mask = hdev->numa_node_mask;
1479 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480 ret = hclge_knic_setup(vport, num_tqps,
1481 hdev->num_tx_desc, hdev->num_rx_desc);
1484 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1489 hclge_unic_setup(vport, num_tqps);
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1497 struct pci_dev *pdev = hdev->pdev;
1498 struct hclge_vport *vport;
1504 /* We need to alloc a vport for main NIC of PF */
1505 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1507 if (hdev->num_tqps < num_vport) {
1508 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509 hdev->num_tqps, num_vport);
1513 /* Alloc the same number of TQPs for every vport */
1514 tqp_per_vport = hdev->num_tqps / num_vport;
1515 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1517 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1522 hdev->vport = vport;
1523 hdev->num_alloc_vport = num_vport;
1525 if (IS_ENABLED(CONFIG_PCI_IOV))
1526 hdev->num_alloc_vfs = hdev->num_req_vfs;
1528 for (i = 0; i < num_vport; i++) {
1530 vport->vport_id = i;
1531 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534 INIT_LIST_HEAD(&vport->vlan_list);
1535 INIT_LIST_HEAD(&vport->uc_mac_list);
1536 INIT_LIST_HEAD(&vport->mc_mac_list);
1539 ret = hclge_vport_setup(vport, tqp_main_vport);
1541 ret = hclge_vport_setup(vport, tqp_per_vport);
1544 "vport setup failed for vport %d, %d\n",
1555 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556 struct hclge_pkt_buf_alloc *buf_alloc)
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1561 struct hclge_tx_buff_alloc_cmd *req;
1562 struct hclge_desc desc;
1566 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1568 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1572 req->tx_pkt_buff[i] =
1573 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1579 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1591 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1600 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601 if (hdev->hw_tc_map & BIT(i))
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 struct hclge_priv_buf *priv;
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625 struct hclge_pkt_buf_alloc *buf_alloc)
1627 struct hclge_priv_buf *priv;
1630 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631 priv = &buf_alloc->priv_buf[i];
1632 if (hdev->hw_tc_map & BIT(i) &&
1633 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1643 struct hclge_priv_buf *priv;
1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648 priv = &buf_alloc->priv_buf[i];
1650 rx_priv += priv->buf_size;
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1657 u32 i, total_tx_size = 0;
1659 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1662 return total_tx_size;
1665 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666 struct hclge_pkt_buf_alloc *buf_alloc,
1669 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670 u32 tc_num = hclge_get_tc_num(hdev);
1671 u32 shared_buf, aligned_mps;
1675 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1677 if (hnae3_dev_dcb_supported(hdev))
1678 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1680 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681 + hdev->dv_buf_size;
1683 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685 HCLGE_BUF_SIZE_UNIT);
1687 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688 if (rx_all < rx_priv + shared_std)
1691 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692 buf_alloc->s_buf.buf_size = shared_buf;
1693 if (hnae3_dev_dcb_supported(hdev)) {
1694 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1698 buf_alloc->s_buf.self.high = aligned_mps +
1699 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700 buf_alloc->s_buf.self.low = aligned_mps;
1703 if (hnae3_dev_dcb_supported(hdev)) {
1705 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1707 hi_thrd = shared_buf - hdev->dv_buf_size;
1709 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711 lo_thrd = hi_thrd - aligned_mps / 2;
1713 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714 lo_thrd = aligned_mps;
1717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726 struct hclge_pkt_buf_alloc *buf_alloc)
1730 total_size = hdev->pkt_buf_size;
1732 /* alloc tx buffer for all enabled tc */
1733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736 if (hdev->hw_tc_map & BIT(i)) {
1737 if (total_size < hdev->tx_buf_size)
1740 priv->tx_buf_size = hdev->tx_buf_size;
1742 priv->tx_buf_size = 0;
1745 total_size -= priv->tx_buf_size;
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
1754 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1758 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1766 if (!(hdev->hw_tc_map & BIT(i)))
1771 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772 priv->wl.low = max ? aligned_mps : 256;
1773 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774 HCLGE_BUF_SIZE_UNIT);
1777 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1780 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1783 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787 struct hclge_pkt_buf_alloc *buf_alloc)
1789 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1793 /* let the last to be cleared first */
1794 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 if (hdev->hw_tc_map & BIT(i) &&
1798 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799 /* Clear the no pfc TC private buffer */
1807 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808 no_pfc_priv_num == 0)
1812 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816 struct hclge_pkt_buf_alloc *buf_alloc)
1818 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1822 /* let the last to be cleared first */
1823 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1826 if (hdev->hw_tc_map & BIT(i) &&
1827 hdev->tm_info.hw_pfc_map & BIT(i)) {
1828 /* Reduce the number of pfc TC with private buffer */
1836 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1841 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845 * @hdev: pointer to struct hclge_dev
1846 * @buf_alloc: pointer to buffer calculation data
1847 * @return: 0: calculate sucessful, negative: fail
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850 struct hclge_pkt_buf_alloc *buf_alloc)
1852 /* When DCB is not supported, rx private buffer is not allocated. */
1853 if (!hnae3_dev_dcb_supported(hdev)) {
1854 u32 rx_all = hdev->pkt_buf_size;
1856 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1863 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1866 /* try to decrease the buffer size */
1867 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1870 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1873 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1882 struct hclge_rx_priv_buff_cmd *req;
1883 struct hclge_desc desc;
1887 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1890 /* Alloc private buffer TCs */
1891 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1897 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1901 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1906 dev_err(&hdev->pdev->dev,
1907 "rx private buffer alloc cmd failed %d\n", ret);
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913 struct hclge_pkt_buf_alloc *buf_alloc)
1915 struct hclge_rx_priv_wl_buf *req;
1916 struct hclge_priv_buf *priv;
1917 struct hclge_desc desc[2];
1921 for (i = 0; i < 2; i++) {
1922 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1924 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1926 /* The first descriptor set the NEXT bit to 1 */
1928 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1930 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1932 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1935 priv = &buf_alloc->priv_buf[idx];
1936 req->tc_wl[j].high =
1937 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938 req->tc_wl[j].high |=
1939 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1941 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942 req->tc_wl[j].low |=
1943 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1947 /* Send 2 descriptor at one time */
1948 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1950 dev_err(&hdev->pdev->dev,
1951 "rx private waterline config cmd failed %d\n",
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957 struct hclge_pkt_buf_alloc *buf_alloc)
1959 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960 struct hclge_rx_com_thrd *req;
1961 struct hclge_desc desc[2];
1962 struct hclge_tc_thrd *tc;
1966 for (i = 0; i < 2; i++) {
1967 hclge_cmd_setup_basic_desc(&desc[i],
1968 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1971 /* The first descriptor set the NEXT bit to 1 */
1973 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1975 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1977 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1980 req->com_thrd[j].high =
1981 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982 req->com_thrd[j].high |=
1983 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984 req->com_thrd[j].low =
1985 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986 req->com_thrd[j].low |=
1987 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1991 /* Send 2 descriptors at one time */
1992 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1994 dev_err(&hdev->pdev->dev,
1995 "common threshold config cmd failed %d\n", ret);
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000 struct hclge_pkt_buf_alloc *buf_alloc)
2002 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003 struct hclge_rx_com_wl *req;
2004 struct hclge_desc desc;
2007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2009 req = (struct hclge_rx_com_wl *)desc.data;
2010 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2013 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2016 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2018 dev_err(&hdev->pdev->dev,
2019 "common waterline config cmd failed %d\n", ret);
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2026 struct hclge_pkt_buf_alloc *pkt_buf;
2029 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2033 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2035 dev_err(&hdev->pdev->dev,
2036 "could not calc tx buffer size for all TCs %d\n", ret);
2040 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2042 dev_err(&hdev->pdev->dev,
2043 "could not alloc tx buffers %d\n", ret);
2047 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2049 dev_err(&hdev->pdev->dev,
2050 "could not calc rx priv buffer size for all TCs %d\n",
2055 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2057 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2062 if (hnae3_dev_dcb_supported(hdev)) {
2063 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2065 dev_err(&hdev->pdev->dev,
2066 "could not configure rx private waterline %d\n",
2071 ret = hclge_common_thrd_config(hdev, pkt_buf);
2073 dev_err(&hdev->pdev->dev,
2074 "could not configure common threshold %d\n",
2080 ret = hclge_common_wl_config(hdev, pkt_buf);
2082 dev_err(&hdev->pdev->dev,
2083 "could not configure common waterline %d\n", ret);
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2092 struct hnae3_handle *roce = &vport->roce;
2093 struct hnae3_handle *nic = &vport->nic;
2095 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2097 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098 vport->back->num_msi_left == 0)
2101 roce->rinfo.base_vector = vport->back->roce_base_vector;
2103 roce->rinfo.netdev = nic->kinfo.netdev;
2104 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2106 roce->pdev = nic->pdev;
2107 roce->ae_algo = nic->ae_algo;
2108 roce->numa_node_mask = nic->numa_node_mask;
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2115 struct pci_dev *pdev = hdev->pdev;
2119 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2123 "failed(%d) to allocate MSI/MSI-X vectors\n",
2127 if (vectors < hdev->num_msi)
2128 dev_warn(&hdev->pdev->dev,
2129 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130 hdev->num_msi, vectors);
2132 hdev->num_msi = vectors;
2133 hdev->num_msi_left = vectors;
2134 hdev->base_msi_vector = pdev->irq;
2135 hdev->roce_base_vector = hdev->base_msi_vector +
2136 hdev->roce_base_msix_offset;
2138 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139 sizeof(u16), GFP_KERNEL);
2140 if (!hdev->vector_status) {
2141 pci_free_irq_vectors(pdev);
2145 for (i = 0; i < hdev->num_msi; i++)
2146 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2148 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149 sizeof(int), GFP_KERNEL);
2150 if (!hdev->vector_irq) {
2151 pci_free_irq_vectors(pdev);
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2161 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162 duplex = HCLGE_MAC_FULL;
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2170 struct hclge_config_mac_speed_dup_cmd *req;
2171 struct hclge_desc desc;
2174 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2178 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2181 case HCLGE_MAC_SPEED_10M:
2182 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 HCLGE_CFG_SPEED_S, 6);
2185 case HCLGE_MAC_SPEED_100M:
2186 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 HCLGE_CFG_SPEED_S, 7);
2189 case HCLGE_MAC_SPEED_1G:
2190 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191 HCLGE_CFG_SPEED_S, 0);
2193 case HCLGE_MAC_SPEED_10G:
2194 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195 HCLGE_CFG_SPEED_S, 1);
2197 case HCLGE_MAC_SPEED_25G:
2198 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199 HCLGE_CFG_SPEED_S, 2);
2201 case HCLGE_MAC_SPEED_40G:
2202 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203 HCLGE_CFG_SPEED_S, 3);
2205 case HCLGE_MAC_SPEED_50G:
2206 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207 HCLGE_CFG_SPEED_S, 4);
2209 case HCLGE_MAC_SPEED_100G:
2210 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211 HCLGE_CFG_SPEED_S, 5);
2214 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2218 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2223 dev_err(&hdev->pdev->dev,
2224 "mac speed/duplex config cmd failed %d.\n", ret);
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2235 duplex = hclge_check_speed_dup(duplex, speed);
2236 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2239 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2243 hdev->hw.mac.speed = speed;
2244 hdev->hw.mac.duplex = duplex;
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2252 struct hclge_vport *vport = hclge_get_vport(handle);
2253 struct hclge_dev *hdev = vport->back;
2255 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2260 struct hclge_config_auto_neg_cmd *req;
2261 struct hclge_desc desc;
2265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2267 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2273 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2281 struct hclge_vport *vport = hclge_get_vport(handle);
2282 struct hclge_dev *hdev = vport->back;
2284 if (!hdev->hw.mac.support_autoneg) {
2286 dev_err(&hdev->pdev->dev,
2287 "autoneg is not supported by current port\n");
2294 return hclge_set_autoneg_en(hdev, enable);
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2299 struct hclge_vport *vport = hclge_get_vport(handle);
2300 struct hclge_dev *hdev = vport->back;
2301 struct phy_device *phydev = hdev->hw.mac.phydev;
2304 return phydev->autoneg;
2306 return hdev->hw.mac.autoneg;
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2311 struct hclge_vport *vport = hclge_get_vport(handle);
2312 struct hclge_dev *hdev = vport->back;
2315 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2317 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2320 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2325 struct hclge_config_fec_cmd *req;
2326 struct hclge_desc desc;
2329 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2331 req = (struct hclge_config_fec_cmd *)desc.data;
2332 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334 if (fec_mode & BIT(HNAE3_FEC_RS))
2335 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337 if (fec_mode & BIT(HNAE3_FEC_BASER))
2338 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2341 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2343 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2350 struct hclge_vport *vport = hclge_get_vport(handle);
2351 struct hclge_dev *hdev = vport->back;
2352 struct hclge_mac *mac = &hdev->hw.mac;
2355 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2360 ret = hclge_set_fec_hw(hdev, fec_mode);
2364 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2371 struct hclge_vport *vport = hclge_get_vport(handle);
2372 struct hclge_dev *hdev = vport->back;
2373 struct hclge_mac *mac = &hdev->hw.mac;
2376 *fec_ability = mac->fec_ability;
2378 *fec_mode = mac->fec_mode;
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2383 struct hclge_mac *mac = &hdev->hw.mac;
2386 hdev->support_sfp_query = true;
2387 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389 hdev->hw.mac.duplex);
2391 dev_err(&hdev->pdev->dev,
2392 "Config mac speed dup fail ret=%d\n", ret);
2398 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2401 dev_err(&hdev->pdev->dev,
2402 "Fec mode init fail, ret = %d\n", ret);
2407 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2409 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2413 ret = hclge_buffer_alloc(hdev);
2415 dev_err(&hdev->pdev->dev,
2416 "allocate buffer fail, ret=%d\n", ret);
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2423 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425 schedule_work(&hdev->mbx_service_task);
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2430 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431 schedule_work(&hdev->rst_service_task);
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2436 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439 (void)schedule_work(&hdev->service_task);
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2444 struct hclge_link_status_cmd *req;
2445 struct hclge_desc desc;
2449 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2452 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2457 req = (struct hclge_link_status_cmd *)desc.data;
2458 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2460 return !!link_status;
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2468 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2471 mac_state = hclge_get_mac_link_status(hdev);
2473 if (hdev->hw.mac.phydev) {
2474 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475 link_stat = mac_state &
2476 hdev->hw.mac.phydev->link;
2481 link_stat = mac_state;
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2489 struct hnae3_client *rclient = hdev->roce_client;
2490 struct hnae3_client *client = hdev->nic_client;
2491 struct hnae3_handle *rhandle;
2492 struct hnae3_handle *handle;
2498 state = hclge_get_mac_phy_link(hdev);
2499 if (state != hdev->hw.mac.link) {
2500 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501 handle = &hdev->vport[i].nic;
2502 client->ops->link_status_change(handle, state);
2503 hclge_config_mac_tnl_int(hdev, state);
2504 rhandle = &hdev->vport[i].roce;
2505 if (rclient && rclient->ops->link_status_change)
2506 rclient->ops->link_status_change(rhandle,
2509 hdev->hw.mac.link = state;
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2515 /* update fec ability by speed */
2516 hclge_convert_setting_fec(mac);
2518 /* firmware can not identify back plane type, the media type
2519 * read from configuration can help deal it
2521 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525 mac->module_type = HNAE3_MODULE_TYPE_TP;
2527 if (mac->support_autoneg == true) {
2528 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529 linkmode_copy(mac->advertising, mac->supported);
2531 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2533 linkmode_zero(mac->advertising);
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2539 struct hclge_sfp_info_cmd *resp = NULL;
2540 struct hclge_desc desc;
2543 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544 resp = (struct hclge_sfp_info_cmd *)desc.data;
2545 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546 if (ret == -EOPNOTSUPP) {
2547 dev_warn(&hdev->pdev->dev,
2548 "IMP do not support get SFP speed %d\n", ret);
2551 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2555 *speed = le32_to_cpu(resp->speed);
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2562 struct hclge_sfp_info_cmd *resp;
2563 struct hclge_desc desc;
2566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567 resp = (struct hclge_sfp_info_cmd *)desc.data;
2569 resp->query_type = QUERY_ACTIVE_SPEED;
2571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572 if (ret == -EOPNOTSUPP) {
2573 dev_warn(&hdev->pdev->dev,
2574 "IMP does not support get SFP info %d\n", ret);
2577 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2581 mac->speed = le32_to_cpu(resp->speed);
2582 /* if resp->speed_ability is 0, it means it's an old version
2583 * firmware, do not update these params
2585 if (resp->speed_ability) {
2586 mac->module_type = le32_to_cpu(resp->module_type);
2587 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588 mac->autoneg = resp->autoneg;
2589 mac->support_autoneg = resp->autoneg_ability;
2590 if (!resp->active_fec)
2593 mac->fec_mode = BIT(resp->active_fec);
2595 mac->speed_type = QUERY_SFP_SPEED;
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2603 struct hclge_mac *mac = &hdev->hw.mac;
2604 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2607 /* get the port info from SFP cmd if not copper port */
2608 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2611 /* if IMP does not support get SFP/qSFP info, return directly */
2612 if (!hdev->support_sfp_query)
2615 if (hdev->pdev->revision >= 0x21)
2616 ret = hclge_get_sfp_info(hdev, mac);
2618 ret = hclge_get_sfp_speed(hdev, &speed);
2620 if (ret == -EOPNOTSUPP) {
2621 hdev->support_sfp_query = false;
2627 if (hdev->pdev->revision >= 0x21) {
2628 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629 hclge_update_port_capability(mac);
2632 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2635 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636 return 0; /* do nothing if no SFP */
2638 /* must config full duplex for SFP */
2639 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2643 static int hclge_get_status(struct hnae3_handle *handle)
2645 struct hclge_vport *vport = hclge_get_vport(handle);
2646 struct hclge_dev *hdev = vport->back;
2648 hclge_update_link_status(hdev);
2650 return hdev->hw.mac.link;
2653 static void hclge_service_timer(struct timer_list *t)
2655 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2657 mod_timer(&hdev->service_timer, jiffies + HZ);
2658 hdev->hw_stats.stats_timer++;
2659 hdev->fd_arfs_expire_timer++;
2660 hclge_task_schedule(hdev);
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2665 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2667 /* Flush memory before next watchdog */
2668 smp_mb__before_atomic();
2669 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2674 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2676 /* fetch the events from their corresponding regs */
2677 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679 msix_src_reg = hclge_read_dev(&hdev->hw,
2680 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2682 /* Assumption: If by any chance reset and mailbox events are reported
2683 * together then we will only process reset event in this go and will
2684 * defer the processing of the mailbox events. Since, we would have not
2685 * cleared RX CMDQ event this time we would receive again another
2686 * interrupt from H/W just for the mailbox.
2689 /* check for vector0 reset event sources */
2690 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695 hdev->rst_stats.imp_rst_cnt++;
2696 return HCLGE_VECTOR0_EVENT_RST;
2699 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704 hdev->rst_stats.global_rst_cnt++;
2705 return HCLGE_VECTOR0_EVENT_RST;
2708 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713 hdev->rst_stats.core_rst_cnt++;
2714 return HCLGE_VECTOR0_EVENT_RST;
2717 /* check for vector0 msix event source */
2718 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2721 return HCLGE_VECTOR0_EVENT_ERR;
2724 /* check for vector0 mailbox(=CMDQ RX) event source */
2725 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727 *clearval = cmdq_src_reg;
2728 return HCLGE_VECTOR0_EVENT_MBX;
2731 /* print other vector0 event source */
2732 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733 cmdq_src_reg, msix_src_reg);
2734 return HCLGE_VECTOR0_EVENT_OTHER;
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2740 switch (event_type) {
2741 case HCLGE_VECTOR0_EVENT_RST:
2742 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2744 case HCLGE_VECTOR0_EVENT_MBX:
2745 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2754 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2763 writel(enable ? 1 : 0, vector->addr);
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2768 struct hclge_dev *hdev = data;
2772 hclge_enable_vector(&hdev->misc_vector, false);
2773 event_cause = hclge_check_event_cause(hdev, &clearval);
2775 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776 switch (event_cause) {
2777 case HCLGE_VECTOR0_EVENT_ERR:
2778 /* we do not know what type of reset is required now. This could
2779 * only be decided after we fetch the type of errors which
2780 * caused this event. Therefore, we will do below for now:
2781 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782 * have defered type of reset to be used.
2783 * 2. Schedule the reset serivce task.
2784 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2785 * will fetch the correct type of reset. This would be done
2786 * by first decoding the types of errors.
2788 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2790 case HCLGE_VECTOR0_EVENT_RST:
2791 hclge_reset_task_schedule(hdev);
2793 case HCLGE_VECTOR0_EVENT_MBX:
2794 /* If we are here then,
2795 * 1. Either we are not handling any mbx task and we are not
2798 * 2. We could be handling a mbx task but nothing more is
2800 * In both cases, we should schedule mbx task as there are more
2801 * mbx messages reported by this interrupt.
2803 hclge_mbx_task_schedule(hdev);
2806 dev_warn(&hdev->pdev->dev,
2807 "received unknown or unhandled event of vector0\n");
2811 /* clear the source of interrupt if it is not cause by reset */
2812 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813 hclge_clear_event_cause(hdev, event_cause, clearval);
2814 hclge_enable_vector(&hdev->misc_vector, true);
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2822 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823 dev_warn(&hdev->pdev->dev,
2824 "vector(vector_id %d) has been freed.\n", vector_id);
2828 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829 hdev->num_msi_left += 1;
2830 hdev->num_msi_used -= 1;
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2835 struct hclge_misc_vector *vector = &hdev->misc_vector;
2837 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2839 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840 hdev->vector_status[0] = 0;
2842 hdev->num_msi_left -= 1;
2843 hdev->num_msi_used += 1;
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2850 hclge_get_misc_vector(hdev);
2852 /* this would be explicitly freed in the end */
2853 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854 0, "hclge_misc", hdev);
2856 hclge_free_vector(hdev, 0);
2857 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858 hdev->misc_vector.vector_irq);
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2866 free_irq(hdev->misc_vector.vector_irq, hdev);
2867 hclge_free_vector(hdev, 0);
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871 enum hnae3_reset_notify_type type)
2873 struct hnae3_client *client = hdev->nic_client;
2876 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2880 if (!client->ops->reset_notify)
2883 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2884 struct hnae3_handle *handle = &hdev->vport[i].nic;
2887 ret = client->ops->reset_notify(handle, type);
2889 dev_err(&hdev->pdev->dev,
2890 "notify nic client failed %d(%d)\n", type, ret);
2898 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2899 enum hnae3_reset_notify_type type)
2901 struct hnae3_client *client = hdev->roce_client;
2908 if (!client->ops->reset_notify)
2911 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2912 struct hnae3_handle *handle = &hdev->vport[i].roce;
2914 ret = client->ops->reset_notify(handle, type);
2916 dev_err(&hdev->pdev->dev,
2917 "notify roce client failed %d(%d)",
2926 static int hclge_reset_wait(struct hclge_dev *hdev)
2928 #define HCLGE_RESET_WATI_MS 100
2929 #define HCLGE_RESET_WAIT_CNT 200
2930 u32 val, reg, reg_bit;
2933 switch (hdev->reset_type) {
2934 case HNAE3_IMP_RESET:
2935 reg = HCLGE_GLOBAL_RESET_REG;
2936 reg_bit = HCLGE_IMP_RESET_BIT;
2938 case HNAE3_GLOBAL_RESET:
2939 reg = HCLGE_GLOBAL_RESET_REG;
2940 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2942 case HNAE3_CORE_RESET:
2943 reg = HCLGE_GLOBAL_RESET_REG;
2944 reg_bit = HCLGE_CORE_RESET_BIT;
2946 case HNAE3_FUNC_RESET:
2947 reg = HCLGE_FUN_RST_ING;
2948 reg_bit = HCLGE_FUN_RST_ING_B;
2950 case HNAE3_FLR_RESET:
2953 dev_err(&hdev->pdev->dev,
2954 "Wait for unsupported reset type: %d\n",
2959 if (hdev->reset_type == HNAE3_FLR_RESET) {
2960 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2961 cnt++ < HCLGE_RESET_WAIT_CNT)
2962 msleep(HCLGE_RESET_WATI_MS);
2964 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2965 dev_err(&hdev->pdev->dev,
2966 "flr wait timeout: %d\n", cnt);
2973 val = hclge_read_dev(&hdev->hw, reg);
2974 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2975 msleep(HCLGE_RESET_WATI_MS);
2976 val = hclge_read_dev(&hdev->hw, reg);
2980 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2981 dev_warn(&hdev->pdev->dev,
2982 "Wait for reset timeout: %d\n", hdev->reset_type);
2989 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2991 struct hclge_vf_rst_cmd *req;
2992 struct hclge_desc desc;
2994 req = (struct hclge_vf_rst_cmd *)desc.data;
2995 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2996 req->dest_vfid = func_id;
3001 return hclge_cmd_send(&hdev->hw, &desc, 1);
3004 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3008 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3009 struct hclge_vport *vport = &hdev->vport[i];
3012 /* Send cmd to set/clear VF's FUNC_RST_ING */
3013 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3015 dev_err(&hdev->pdev->dev,
3016 "set vf(%d) rst failed %d!\n",
3017 vport->vport_id, ret);
3021 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3024 /* Inform VF to process the reset.
3025 * hclge_inform_reset_assert_to_vf may fail if VF
3026 * driver is not loaded.
3028 ret = hclge_inform_reset_assert_to_vf(vport);
3030 dev_warn(&hdev->pdev->dev,
3031 "inform reset to vf(%d) failed %d!\n",
3032 vport->vport_id, ret);
3038 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3040 struct hclge_desc desc;
3041 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3044 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3045 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3046 req->fun_reset_vfid = func_id;
3048 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3050 dev_err(&hdev->pdev->dev,
3051 "send function reset cmd fail, status =%d\n", ret);
3056 static void hclge_do_reset(struct hclge_dev *hdev)
3058 struct hnae3_handle *handle = &hdev->vport[0].nic;
3059 struct pci_dev *pdev = hdev->pdev;
3062 if (hclge_get_hw_reset_stat(handle)) {
3063 dev_info(&pdev->dev, "Hardware reset not finish\n");
3064 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3065 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3066 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3070 switch (hdev->reset_type) {
3071 case HNAE3_GLOBAL_RESET:
3072 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3073 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3074 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3075 dev_info(&pdev->dev, "Global Reset requested\n");
3077 case HNAE3_CORE_RESET:
3078 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3079 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3080 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3081 dev_info(&pdev->dev, "Core Reset requested\n");
3083 case HNAE3_FUNC_RESET:
3084 dev_info(&pdev->dev, "PF Reset requested\n");
3085 /* schedule again to check later */
3086 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3087 hclge_reset_task_schedule(hdev);
3089 case HNAE3_FLR_RESET:
3090 dev_info(&pdev->dev, "FLR requested\n");
3091 /* schedule again to check later */
3092 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3093 hclge_reset_task_schedule(hdev);
3096 dev_warn(&pdev->dev,
3097 "Unsupported reset type: %d\n", hdev->reset_type);
3102 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3103 unsigned long *addr)
3105 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3107 /* first, resolve any unknown reset type to the known type(s) */
3108 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3109 /* we will intentionally ignore any errors from this function
3110 * as we will end up in *some* reset request in any case
3112 hclge_handle_hw_msix_error(hdev, addr);
3113 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3114 /* We defered the clearing of the error event which caused
3115 * interrupt since it was not posssible to do that in
3116 * interrupt context (and this is the reason we introduced
3117 * new UNKNOWN reset type). Now, the errors have been
3118 * handled and cleared in hardware we can safely enable
3119 * interrupts. This is an exception to the norm.
3121 hclge_enable_vector(&hdev->misc_vector, true);
3124 /* return the highest priority reset level amongst all */
3125 if (test_bit(HNAE3_IMP_RESET, addr)) {
3126 rst_level = HNAE3_IMP_RESET;
3127 clear_bit(HNAE3_IMP_RESET, addr);
3128 clear_bit(HNAE3_GLOBAL_RESET, addr);
3129 clear_bit(HNAE3_CORE_RESET, addr);
3130 clear_bit(HNAE3_FUNC_RESET, addr);
3131 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3132 rst_level = HNAE3_GLOBAL_RESET;
3133 clear_bit(HNAE3_GLOBAL_RESET, addr);
3134 clear_bit(HNAE3_CORE_RESET, addr);
3135 clear_bit(HNAE3_FUNC_RESET, addr);
3136 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3137 rst_level = HNAE3_CORE_RESET;
3138 clear_bit(HNAE3_CORE_RESET, addr);
3139 clear_bit(HNAE3_FUNC_RESET, addr);
3140 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3141 rst_level = HNAE3_FUNC_RESET;
3142 clear_bit(HNAE3_FUNC_RESET, addr);
3143 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3144 rst_level = HNAE3_FLR_RESET;
3145 clear_bit(HNAE3_FLR_RESET, addr);
3148 if (hdev->reset_type != HNAE3_NONE_RESET &&
3149 rst_level < hdev->reset_type)
3150 return HNAE3_NONE_RESET;
3155 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3159 switch (hdev->reset_type) {
3160 case HNAE3_IMP_RESET:
3161 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3163 case HNAE3_GLOBAL_RESET:
3164 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3166 case HNAE3_CORE_RESET:
3167 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3176 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3177 hclge_enable_vector(&hdev->misc_vector, true);
3180 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3184 switch (hdev->reset_type) {
3185 case HNAE3_FUNC_RESET:
3187 case HNAE3_FLR_RESET:
3188 ret = hclge_set_all_vf_rst(hdev, true);
3197 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3202 switch (hdev->reset_type) {
3203 case HNAE3_FUNC_RESET:
3204 /* There is no mechanism for PF to know if VF has stopped IO
3205 * for now, just wait 100 ms for VF to stop IO
3208 ret = hclge_func_reset_cmd(hdev, 0);
3210 dev_err(&hdev->pdev->dev,
3211 "asserting function reset fail %d!\n", ret);
3215 /* After performaning pf reset, it is not necessary to do the
3216 * mailbox handling or send any command to firmware, because
3217 * any mailbox handling or command to firmware is only valid
3218 * after hclge_cmd_init is called.
3220 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3221 hdev->rst_stats.pf_rst_cnt++;
3223 case HNAE3_FLR_RESET:
3224 /* There is no mechanism for PF to know if VF has stopped IO
3225 * for now, just wait 100 ms for VF to stop IO
3228 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3229 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3230 hdev->rst_stats.flr_rst_cnt++;
3232 case HNAE3_IMP_RESET:
3233 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3234 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3235 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3241 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3246 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3248 #define MAX_RESET_FAIL_CNT 5
3249 #define RESET_UPGRADE_DELAY_SEC 10
3251 if (hdev->reset_pending) {
3252 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3253 hdev->reset_pending);
3255 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3256 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3257 BIT(HCLGE_IMP_RESET_BIT))) {
3258 dev_info(&hdev->pdev->dev,
3259 "reset failed because IMP Reset is pending\n");
3260 hclge_clear_reset_cause(hdev);
3262 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3263 hdev->reset_fail_cnt++;
3265 set_bit(hdev->reset_type, &hdev->reset_pending);
3266 dev_info(&hdev->pdev->dev,
3267 "re-schedule to wait for hw reset done\n");
3271 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3272 hclge_clear_reset_cause(hdev);
3273 mod_timer(&hdev->reset_timer,
3274 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3279 hclge_clear_reset_cause(hdev);
3280 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3284 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3288 switch (hdev->reset_type) {
3289 case HNAE3_FUNC_RESET:
3291 case HNAE3_FLR_RESET:
3292 ret = hclge_set_all_vf_rst(hdev, false);
3301 static void hclge_reset(struct hclge_dev *hdev)
3303 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3304 bool is_timeout = false;
3307 /* Initialize ae_dev reset status as well, in case enet layer wants to
3308 * know if device is undergoing reset
3310 ae_dev->reset_type = hdev->reset_type;
3311 hdev->rst_stats.reset_cnt++;
3312 /* perform reset of the stack & ae device for a client */
3313 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3317 ret = hclge_reset_prepare_down(hdev);
3322 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3324 goto err_reset_lock;
3328 ret = hclge_reset_prepare_wait(hdev);
3332 if (hclge_reset_wait(hdev)) {
3337 hdev->rst_stats.hw_reset_done_cnt++;
3339 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3344 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3346 goto err_reset_lock;
3348 ret = hclge_reset_ae_dev(hdev->ae_dev);
3350 goto err_reset_lock;
3352 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3354 goto err_reset_lock;
3356 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3358 goto err_reset_lock;
3360 hclge_clear_reset_cause(hdev);
3362 ret = hclge_reset_prepare_up(hdev);
3364 goto err_reset_lock;
3366 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3368 goto err_reset_lock;
3372 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3376 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3380 hdev->last_reset_time = jiffies;
3381 hdev->reset_fail_cnt = 0;
3382 hdev->rst_stats.reset_done_cnt++;
3383 ae_dev->reset_type = HNAE3_NONE_RESET;
3384 del_timer(&hdev->reset_timer);
3391 if (hclge_reset_err_handle(hdev, is_timeout))
3392 hclge_reset_task_schedule(hdev);
3395 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3397 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3398 struct hclge_dev *hdev = ae_dev->priv;
3400 /* We might end up getting called broadly because of 2 below cases:
3401 * 1. Recoverable error was conveyed through APEI and only way to bring
3402 * normalcy is to reset.
3403 * 2. A new reset request from the stack due to timeout
3405 * For the first case,error event might not have ae handle available.
3406 * check if this is a new reset request and we are not here just because
3407 * last reset attempt did not succeed and watchdog hit us again. We will
3408 * know this if last reset request did not occur very recently (watchdog
3409 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3410 * In case of new request we reset the "reset level" to PF reset.
3411 * And if it is a repeat reset request of the most recent one then we
3412 * want to make sure we throttle the reset request. Therefore, we will
3413 * not allow it again before 3*HZ times.
3416 handle = &hdev->vport[0].nic;
3418 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3420 else if (hdev->default_reset_request)
3422 hclge_get_reset_level(hdev,
3423 &hdev->default_reset_request);
3424 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3425 hdev->reset_level = HNAE3_FUNC_RESET;
3427 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3430 /* request reset & schedule reset task */
3431 set_bit(hdev->reset_level, &hdev->reset_request);
3432 hclge_reset_task_schedule(hdev);
3434 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3435 hdev->reset_level++;
3438 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3439 enum hnae3_reset_type rst_type)
3441 struct hclge_dev *hdev = ae_dev->priv;
3443 set_bit(rst_type, &hdev->default_reset_request);
3446 static void hclge_reset_timer(struct timer_list *t)
3448 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3450 dev_info(&hdev->pdev->dev,
3451 "triggering global reset in reset timer\n");
3452 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3453 hclge_reset_event(hdev->pdev, NULL);
3456 static void hclge_reset_subtask(struct hclge_dev *hdev)
3458 /* check if there is any ongoing reset in the hardware. This status can
3459 * be checked from reset_pending. If there is then, we need to wait for
3460 * hardware to complete reset.
3461 * a. If we are able to figure out in reasonable time that hardware
3462 * has fully resetted then, we can proceed with driver, client
3464 * b. else, we can come back later to check this status so re-sched
3467 hdev->last_reset_time = jiffies;
3468 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3469 if (hdev->reset_type != HNAE3_NONE_RESET)
3472 /* check if we got any *new* reset requests to be honored */
3473 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3474 if (hdev->reset_type != HNAE3_NONE_RESET)
3475 hclge_do_reset(hdev);
3477 hdev->reset_type = HNAE3_NONE_RESET;
3480 static void hclge_reset_service_task(struct work_struct *work)
3482 struct hclge_dev *hdev =
3483 container_of(work, struct hclge_dev, rst_service_task);
3485 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3488 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3490 hclge_reset_subtask(hdev);
3492 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3495 static void hclge_mailbox_service_task(struct work_struct *work)
3497 struct hclge_dev *hdev =
3498 container_of(work, struct hclge_dev, mbx_service_task);
3500 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3503 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3505 hclge_mbx_handler(hdev);
3507 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3510 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3514 /* start from vport 1 for PF is always alive */
3515 for (i = 1; i < hdev->num_alloc_vport; i++) {
3516 struct hclge_vport *vport = &hdev->vport[i];
3518 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3519 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3521 /* If vf is not alive, set to default value */
3522 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3523 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3527 static void hclge_service_task(struct work_struct *work)
3529 struct hclge_dev *hdev =
3530 container_of(work, struct hclge_dev, service_task);
3532 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3533 hclge_update_stats_for_all(hdev);
3534 hdev->hw_stats.stats_timer = 0;
3537 hclge_update_port_info(hdev);
3538 hclge_update_link_status(hdev);
3539 hclge_update_vport_alive(hdev);
3540 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3541 hclge_rfs_filter_expire(hdev);
3542 hdev->fd_arfs_expire_timer = 0;
3544 hclge_service_complete(hdev);
3547 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3549 /* VF handle has no client */
3550 if (!handle->client)
3551 return container_of(handle, struct hclge_vport, nic);
3552 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3553 return container_of(handle, struct hclge_vport, roce);
3555 return container_of(handle, struct hclge_vport, nic);
3558 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3559 struct hnae3_vector_info *vector_info)
3561 struct hclge_vport *vport = hclge_get_vport(handle);
3562 struct hnae3_vector_info *vector = vector_info;
3563 struct hclge_dev *hdev = vport->back;
3567 vector_num = min(hdev->num_msi_left, vector_num);
3569 for (j = 0; j < vector_num; j++) {
3570 for (i = 1; i < hdev->num_msi; i++) {
3571 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3572 vector->vector = pci_irq_vector(hdev->pdev, i);
3573 vector->io_addr = hdev->hw.io_base +
3574 HCLGE_VECTOR_REG_BASE +
3575 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3577 HCLGE_VECTOR_VF_OFFSET;
3578 hdev->vector_status[i] = vport->vport_id;
3579 hdev->vector_irq[i] = vector->vector;
3588 hdev->num_msi_left -= alloc;
3589 hdev->num_msi_used += alloc;
3594 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3598 for (i = 0; i < hdev->num_msi; i++)
3599 if (vector == hdev->vector_irq[i])
3605 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3607 struct hclge_vport *vport = hclge_get_vport(handle);
3608 struct hclge_dev *hdev = vport->back;
3611 vector_id = hclge_get_vector_index(hdev, vector);
3612 if (vector_id < 0) {
3613 dev_err(&hdev->pdev->dev,
3614 "Get vector index fail. vector_id =%d\n", vector_id);
3618 hclge_free_vector(hdev, vector_id);
3623 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3625 return HCLGE_RSS_KEY_SIZE;
3628 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3630 return HCLGE_RSS_IND_TBL_SIZE;
3633 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3634 const u8 hfunc, const u8 *key)
3636 struct hclge_rss_config_cmd *req;
3637 struct hclge_desc desc;
3642 req = (struct hclge_rss_config_cmd *)desc.data;
3644 for (key_offset = 0; key_offset < 3; key_offset++) {
3645 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3648 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3649 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3651 if (key_offset == 2)
3653 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3655 key_size = HCLGE_RSS_HASH_KEY_NUM;
3657 memcpy(req->hash_key,
3658 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3660 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3662 dev_err(&hdev->pdev->dev,
3663 "Configure RSS config fail, status = %d\n",
3671 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3673 struct hclge_rss_indirection_table_cmd *req;
3674 struct hclge_desc desc;
3678 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3680 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3681 hclge_cmd_setup_basic_desc
3682 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3684 req->start_table_index =
3685 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3686 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3688 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3689 req->rss_result[j] =
3690 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3692 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3694 dev_err(&hdev->pdev->dev,
3695 "Configure rss indir table fail,status = %d\n",
3703 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3704 u16 *tc_size, u16 *tc_offset)
3706 struct hclge_rss_tc_mode_cmd *req;
3707 struct hclge_desc desc;
3711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3712 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3714 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3717 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3718 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3719 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3720 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3721 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3723 req->rss_tc_mode[i] = cpu_to_le16(mode);
3726 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3728 dev_err(&hdev->pdev->dev,
3729 "Configure rss tc mode fail, status = %d\n", ret);
3734 static void hclge_get_rss_type(struct hclge_vport *vport)
3736 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3737 vport->rss_tuple_sets.ipv4_udp_en ||
3738 vport->rss_tuple_sets.ipv4_sctp_en ||
3739 vport->rss_tuple_sets.ipv6_tcp_en ||
3740 vport->rss_tuple_sets.ipv6_udp_en ||
3741 vport->rss_tuple_sets.ipv6_sctp_en)
3742 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3743 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3744 vport->rss_tuple_sets.ipv6_fragment_en)
3745 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3747 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3750 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3752 struct hclge_rss_input_tuple_cmd *req;
3753 struct hclge_desc desc;
3756 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3758 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3760 /* Get the tuple cfg from pf */
3761 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3762 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3763 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3764 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3765 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3766 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3767 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3768 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3769 hclge_get_rss_type(&hdev->vport[0]);
3770 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3772 dev_err(&hdev->pdev->dev,
3773 "Configure rss input fail, status = %d\n", ret);
3777 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3780 struct hclge_vport *vport = hclge_get_vport(handle);
3783 /* Get hash algorithm */
3785 switch (vport->rss_algo) {
3786 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3787 *hfunc = ETH_RSS_HASH_TOP;
3789 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3790 *hfunc = ETH_RSS_HASH_XOR;
3793 *hfunc = ETH_RSS_HASH_UNKNOWN;
3798 /* Get the RSS Key required by the user */
3800 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3802 /* Get indirect table */
3804 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3805 indir[i] = vport->rss_indirection_tbl[i];
3810 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3811 const u8 *key, const u8 hfunc)
3813 struct hclge_vport *vport = hclge_get_vport(handle);
3814 struct hclge_dev *hdev = vport->back;
3818 /* Set the RSS Hash Key if specififed by the user */
3821 case ETH_RSS_HASH_TOP:
3822 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3824 case ETH_RSS_HASH_XOR:
3825 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3827 case ETH_RSS_HASH_NO_CHANGE:
3828 hash_algo = vport->rss_algo;
3834 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3838 /* Update the shadow RSS key with user specified qids */
3839 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3840 vport->rss_algo = hash_algo;
3843 /* Update the shadow RSS table with user specified qids */
3844 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3845 vport->rss_indirection_tbl[i] = indir[i];
3847 /* Update the hardware */
3848 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3851 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3853 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3855 if (nfc->data & RXH_L4_B_2_3)
3856 hash_sets |= HCLGE_D_PORT_BIT;
3858 hash_sets &= ~HCLGE_D_PORT_BIT;
3860 if (nfc->data & RXH_IP_SRC)
3861 hash_sets |= HCLGE_S_IP_BIT;
3863 hash_sets &= ~HCLGE_S_IP_BIT;
3865 if (nfc->data & RXH_IP_DST)
3866 hash_sets |= HCLGE_D_IP_BIT;
3868 hash_sets &= ~HCLGE_D_IP_BIT;
3870 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3871 hash_sets |= HCLGE_V_TAG_BIT;
3876 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3877 struct ethtool_rxnfc *nfc)
3879 struct hclge_vport *vport = hclge_get_vport(handle);
3880 struct hclge_dev *hdev = vport->back;
3881 struct hclge_rss_input_tuple_cmd *req;
3882 struct hclge_desc desc;
3886 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3887 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3890 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3891 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3893 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3894 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3895 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3896 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3897 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3898 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3899 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3900 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3902 tuple_sets = hclge_get_rss_hash_bits(nfc);
3903 switch (nfc->flow_type) {
3905 req->ipv4_tcp_en = tuple_sets;
3908 req->ipv6_tcp_en = tuple_sets;
3911 req->ipv4_udp_en = tuple_sets;
3914 req->ipv6_udp_en = tuple_sets;
3917 req->ipv4_sctp_en = tuple_sets;
3920 if ((nfc->data & RXH_L4_B_0_1) ||
3921 (nfc->data & RXH_L4_B_2_3))
3924 req->ipv6_sctp_en = tuple_sets;
3927 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3930 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3936 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3938 dev_err(&hdev->pdev->dev,
3939 "Set rss tuple fail, status = %d\n", ret);
3943 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3944 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3945 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3946 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3947 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3948 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3949 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3950 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3951 hclge_get_rss_type(vport);
3955 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3956 struct ethtool_rxnfc *nfc)
3958 struct hclge_vport *vport = hclge_get_vport(handle);
3963 switch (nfc->flow_type) {
3965 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3968 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3971 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3974 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3977 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3980 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3984 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3993 if (tuple_sets & HCLGE_D_PORT_BIT)
3994 nfc->data |= RXH_L4_B_2_3;
3995 if (tuple_sets & HCLGE_S_PORT_BIT)
3996 nfc->data |= RXH_L4_B_0_1;
3997 if (tuple_sets & HCLGE_D_IP_BIT)
3998 nfc->data |= RXH_IP_DST;
3999 if (tuple_sets & HCLGE_S_IP_BIT)
4000 nfc->data |= RXH_IP_SRC;
4005 static int hclge_get_tc_size(struct hnae3_handle *handle)
4007 struct hclge_vport *vport = hclge_get_vport(handle);
4008 struct hclge_dev *hdev = vport->back;
4010 return hdev->rss_size_max;
4013 int hclge_rss_init_hw(struct hclge_dev *hdev)
4015 struct hclge_vport *vport = hdev->vport;
4016 u8 *rss_indir = vport[0].rss_indirection_tbl;
4017 u16 rss_size = vport[0].alloc_rss_size;
4018 u8 *key = vport[0].rss_hash_key;
4019 u8 hfunc = vport[0].rss_algo;
4020 u16 tc_offset[HCLGE_MAX_TC_NUM];
4021 u16 tc_valid[HCLGE_MAX_TC_NUM];
4022 u16 tc_size[HCLGE_MAX_TC_NUM];
4026 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4030 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4034 ret = hclge_set_rss_input_tuple(hdev);
4038 /* Each TC have the same queue size, and tc_size set to hardware is
4039 * the log2 of roundup power of two of rss_size, the acutal queue
4040 * size is limited by indirection table.
4042 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4043 dev_err(&hdev->pdev->dev,
4044 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4049 roundup_size = roundup_pow_of_two(rss_size);
4050 roundup_size = ilog2(roundup_size);
4052 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4055 if (!(hdev->hw_tc_map & BIT(i)))
4059 tc_size[i] = roundup_size;
4060 tc_offset[i] = rss_size * i;
4063 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4066 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4068 struct hclge_vport *vport = hdev->vport;
4071 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4072 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4073 vport[j].rss_indirection_tbl[i] =
4074 i % vport[j].alloc_rss_size;
4078 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4080 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4081 struct hclge_vport *vport = hdev->vport;
4083 if (hdev->pdev->revision >= 0x21)
4084 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4086 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4087 vport[i].rss_tuple_sets.ipv4_tcp_en =
4088 HCLGE_RSS_INPUT_TUPLE_OTHER;
4089 vport[i].rss_tuple_sets.ipv4_udp_en =
4090 HCLGE_RSS_INPUT_TUPLE_OTHER;
4091 vport[i].rss_tuple_sets.ipv4_sctp_en =
4092 HCLGE_RSS_INPUT_TUPLE_SCTP;
4093 vport[i].rss_tuple_sets.ipv4_fragment_en =
4094 HCLGE_RSS_INPUT_TUPLE_OTHER;
4095 vport[i].rss_tuple_sets.ipv6_tcp_en =
4096 HCLGE_RSS_INPUT_TUPLE_OTHER;
4097 vport[i].rss_tuple_sets.ipv6_udp_en =
4098 HCLGE_RSS_INPUT_TUPLE_OTHER;
4099 vport[i].rss_tuple_sets.ipv6_sctp_en =
4100 HCLGE_RSS_INPUT_TUPLE_SCTP;
4101 vport[i].rss_tuple_sets.ipv6_fragment_en =
4102 HCLGE_RSS_INPUT_TUPLE_OTHER;
4104 vport[i].rss_algo = rss_algo;
4106 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4107 HCLGE_RSS_KEY_SIZE);
4110 hclge_rss_indir_init_cfg(hdev);
4113 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4114 int vector_id, bool en,
4115 struct hnae3_ring_chain_node *ring_chain)
4117 struct hclge_dev *hdev = vport->back;
4118 struct hnae3_ring_chain_node *node;
4119 struct hclge_desc desc;
4120 struct hclge_ctrl_vector_chain_cmd *req
4121 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4122 enum hclge_cmd_status status;
4123 enum hclge_opcode_type op;
4124 u16 tqp_type_and_id;
4127 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4128 hclge_cmd_setup_basic_desc(&desc, op, false);
4129 req->int_vector_id = vector_id;
4132 for (node = ring_chain; node; node = node->next) {
4133 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4134 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4136 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4137 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4138 HCLGE_TQP_ID_S, node->tqp_index);
4139 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4141 hnae3_get_field(node->int_gl_idx,
4142 HNAE3_RING_GL_IDX_M,
4143 HNAE3_RING_GL_IDX_S));
4144 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4145 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4146 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4147 req->vfid = vport->vport_id;
4149 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4151 dev_err(&hdev->pdev->dev,
4152 "Map TQP fail, status is %d.\n",
4158 hclge_cmd_setup_basic_desc(&desc,
4161 req->int_vector_id = vector_id;
4166 req->int_cause_num = i;
4167 req->vfid = vport->vport_id;
4168 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4170 dev_err(&hdev->pdev->dev,
4171 "Map TQP fail, status is %d.\n", status);
4179 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4181 struct hnae3_ring_chain_node *ring_chain)
4183 struct hclge_vport *vport = hclge_get_vport(handle);
4184 struct hclge_dev *hdev = vport->back;
4187 vector_id = hclge_get_vector_index(hdev, vector);
4188 if (vector_id < 0) {
4189 dev_err(&hdev->pdev->dev,
4190 "Get vector index fail. vector_id =%d\n", vector_id);
4194 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4197 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4199 struct hnae3_ring_chain_node *ring_chain)
4201 struct hclge_vport *vport = hclge_get_vport(handle);
4202 struct hclge_dev *hdev = vport->back;
4205 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4208 vector_id = hclge_get_vector_index(hdev, vector);
4209 if (vector_id < 0) {
4210 dev_err(&handle->pdev->dev,
4211 "Get vector index fail. ret =%d\n", vector_id);
4215 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4217 dev_err(&handle->pdev->dev,
4218 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4225 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4226 struct hclge_promisc_param *param)
4228 struct hclge_promisc_cfg_cmd *req;
4229 struct hclge_desc desc;
4232 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4234 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4235 req->vf_id = param->vf_id;
4237 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4238 * pdev revision(0x20), new revision support them. The
4239 * value of this two fields will not return error when driver
4240 * send command to fireware in revision(0x20).
4242 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4243 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4245 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4247 dev_err(&hdev->pdev->dev,
4248 "Set promisc mode fail, status is %d.\n", ret);
4253 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4254 bool en_mc, bool en_bc, int vport_id)
4259 memset(param, 0, sizeof(struct hclge_promisc_param));
4261 param->enable = HCLGE_PROMISC_EN_UC;
4263 param->enable |= HCLGE_PROMISC_EN_MC;
4265 param->enable |= HCLGE_PROMISC_EN_BC;
4266 param->vf_id = vport_id;
4269 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4272 struct hclge_vport *vport = hclge_get_vport(handle);
4273 struct hclge_dev *hdev = vport->back;
4274 struct hclge_promisc_param param;
4275 bool en_bc_pmc = true;
4277 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4278 * always bypassed. So broadcast promisc should be disabled until
4279 * user enable promisc mode
4281 if (handle->pdev->revision == 0x20)
4282 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4284 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4286 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4289 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4291 struct hclge_get_fd_mode_cmd *req;
4292 struct hclge_desc desc;
4295 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4297 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4299 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4301 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4305 *fd_mode = req->mode;
4310 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4311 u32 *stage1_entry_num,
4312 u32 *stage2_entry_num,
4313 u16 *stage1_counter_num,
4314 u16 *stage2_counter_num)
4316 struct hclge_get_fd_allocation_cmd *req;
4317 struct hclge_desc desc;
4320 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4322 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4324 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4326 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4331 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4332 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4333 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4334 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4339 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4341 struct hclge_set_fd_key_config_cmd *req;
4342 struct hclge_fd_key_cfg *stage;
4343 struct hclge_desc desc;
4346 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4348 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4349 stage = &hdev->fd_cfg.key_cfg[stage_num];
4350 req->stage = stage_num;
4351 req->key_select = stage->key_sel;
4352 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4353 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4354 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4355 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4356 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4357 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4359 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4361 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4366 static int hclge_init_fd_config(struct hclge_dev *hdev)
4368 #define LOW_2_WORDS 0x03
4369 struct hclge_fd_key_cfg *key_cfg;
4372 if (!hnae3_dev_fd_supported(hdev))
4375 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4379 switch (hdev->fd_cfg.fd_mode) {
4380 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4381 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4383 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4384 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4387 dev_err(&hdev->pdev->dev,
4388 "Unsupported flow director mode %d\n",
4389 hdev->fd_cfg.fd_mode);
4393 hdev->fd_cfg.proto_support =
4394 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4395 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4396 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4397 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4398 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4399 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4400 key_cfg->outer_sipv6_word_en = 0;
4401 key_cfg->outer_dipv6_word_en = 0;
4403 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4404 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4405 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4406 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4408 /* If use max 400bit key, we can support tuples for ether type */
4409 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4410 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4411 key_cfg->tuple_active |=
4412 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4415 /* roce_type is used to filter roce frames
4416 * dst_vport is used to specify the rule
4418 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4420 ret = hclge_get_fd_allocation(hdev,
4421 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4422 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4423 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4424 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4428 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4431 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4432 int loc, u8 *key, bool is_add)
4434 struct hclge_fd_tcam_config_1_cmd *req1;
4435 struct hclge_fd_tcam_config_2_cmd *req2;
4436 struct hclge_fd_tcam_config_3_cmd *req3;
4437 struct hclge_desc desc[3];
4440 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4441 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4442 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4443 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4444 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4446 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4447 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4448 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4450 req1->stage = stage;
4451 req1->xy_sel = sel_x ? 1 : 0;
4452 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4453 req1->index = cpu_to_le32(loc);
4454 req1->entry_vld = sel_x ? is_add : 0;
4457 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4458 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4459 sizeof(req2->tcam_data));
4460 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4461 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4464 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4466 dev_err(&hdev->pdev->dev,
4467 "config tcam key fail, ret=%d\n",
4473 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4474 struct hclge_fd_ad_data *action)
4476 struct hclge_fd_ad_config_cmd *req;
4477 struct hclge_desc desc;
4481 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4483 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4484 req->index = cpu_to_le32(loc);
4487 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4488 action->write_rule_id_to_bd);
4489 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4492 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4493 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4494 action->forward_to_direct_queue);
4495 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4497 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4498 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4499 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4500 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4501 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4502 action->counter_id);
4504 req->ad_data = cpu_to_le64(ad_data);
4505 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4507 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4512 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4513 struct hclge_fd_rule *rule)
4515 u16 tmp_x_s, tmp_y_s;
4516 u32 tmp_x_l, tmp_y_l;
4519 if (rule->unused_tuple & tuple_bit)
4522 switch (tuple_bit) {
4525 case BIT(INNER_DST_MAC):
4526 for (i = 0; i < 6; i++) {
4527 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4528 rule->tuples_mask.dst_mac[i]);
4529 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4530 rule->tuples_mask.dst_mac[i]);
4534 case BIT(INNER_SRC_MAC):
4535 for (i = 0; i < 6; i++) {
4536 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4537 rule->tuples.src_mac[i]);
4538 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4539 rule->tuples.src_mac[i]);
4543 case BIT(INNER_VLAN_TAG_FST):
4544 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4545 rule->tuples_mask.vlan_tag1);
4546 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4547 rule->tuples_mask.vlan_tag1);
4548 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4549 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4552 case BIT(INNER_ETH_TYPE):
4553 calc_x(tmp_x_s, rule->tuples.ether_proto,
4554 rule->tuples_mask.ether_proto);
4555 calc_y(tmp_y_s, rule->tuples.ether_proto,
4556 rule->tuples_mask.ether_proto);
4557 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4558 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4561 case BIT(INNER_IP_TOS):
4562 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4563 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4566 case BIT(INNER_IP_PROTO):
4567 calc_x(*key_x, rule->tuples.ip_proto,
4568 rule->tuples_mask.ip_proto);
4569 calc_y(*key_y, rule->tuples.ip_proto,
4570 rule->tuples_mask.ip_proto);
4573 case BIT(INNER_SRC_IP):
4574 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4575 rule->tuples_mask.src_ip[3]);
4576 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4577 rule->tuples_mask.src_ip[3]);
4578 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4579 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4582 case BIT(INNER_DST_IP):
4583 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4584 rule->tuples_mask.dst_ip[3]);
4585 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4586 rule->tuples_mask.dst_ip[3]);
4587 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4588 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4591 case BIT(INNER_SRC_PORT):
4592 calc_x(tmp_x_s, rule->tuples.src_port,
4593 rule->tuples_mask.src_port);
4594 calc_y(tmp_y_s, rule->tuples.src_port,
4595 rule->tuples_mask.src_port);
4596 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4597 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4600 case BIT(INNER_DST_PORT):
4601 calc_x(tmp_x_s, rule->tuples.dst_port,
4602 rule->tuples_mask.dst_port);
4603 calc_y(tmp_y_s, rule->tuples.dst_port,
4604 rule->tuples_mask.dst_port);
4605 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4606 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4614 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4615 u8 vf_id, u8 network_port_id)
4617 u32 port_number = 0;
4619 if (port_type == HOST_PORT) {
4620 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4622 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4624 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4626 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4627 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4628 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4634 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4635 __le32 *key_x, __le32 *key_y,
4636 struct hclge_fd_rule *rule)
4638 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4639 u8 cur_pos = 0, tuple_size, shift_bits;
4642 for (i = 0; i < MAX_META_DATA; i++) {
4643 tuple_size = meta_data_key_info[i].key_length;
4644 tuple_bit = key_cfg->meta_data_active & BIT(i);
4646 switch (tuple_bit) {
4647 case BIT(ROCE_TYPE):
4648 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4649 cur_pos += tuple_size;
4651 case BIT(DST_VPORT):
4652 port_number = hclge_get_port_number(HOST_PORT, 0,
4654 hnae3_set_field(meta_data,
4655 GENMASK(cur_pos + tuple_size, cur_pos),
4656 cur_pos, port_number);
4657 cur_pos += tuple_size;
4664 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4665 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4666 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4668 *key_x = cpu_to_le32(tmp_x << shift_bits);
4669 *key_y = cpu_to_le32(tmp_y << shift_bits);
4672 /* A complete key is combined with meta data key and tuple key.
4673 * Meta data key is stored at the MSB region, and tuple key is stored at
4674 * the LSB region, unused bits will be filled 0.
4676 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4677 struct hclge_fd_rule *rule)
4679 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4680 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4681 u8 *cur_key_x, *cur_key_y;
4682 int i, ret, tuple_size;
4683 u8 meta_data_region;
4685 memset(key_x, 0, sizeof(key_x));
4686 memset(key_y, 0, sizeof(key_y));
4690 for (i = 0 ; i < MAX_TUPLE; i++) {
4694 tuple_size = tuple_key_info[i].key_length / 8;
4695 check_tuple = key_cfg->tuple_active & BIT(i);
4697 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4700 cur_key_x += tuple_size;
4701 cur_key_y += tuple_size;
4705 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4706 MAX_META_DATA_LENGTH / 8;
4708 hclge_fd_convert_meta_data(key_cfg,
4709 (__le32 *)(key_x + meta_data_region),
4710 (__le32 *)(key_y + meta_data_region),
4713 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4716 dev_err(&hdev->pdev->dev,
4717 "fd key_y config fail, loc=%d, ret=%d\n",
4718 rule->queue_id, ret);
4722 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4725 dev_err(&hdev->pdev->dev,
4726 "fd key_x config fail, loc=%d, ret=%d\n",
4727 rule->queue_id, ret);
4731 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4732 struct hclge_fd_rule *rule)
4734 struct hclge_fd_ad_data ad_data;
4736 ad_data.ad_id = rule->location;
4738 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4739 ad_data.drop_packet = true;
4740 ad_data.forward_to_direct_queue = false;
4741 ad_data.queue_id = 0;
4743 ad_data.drop_packet = false;
4744 ad_data.forward_to_direct_queue = true;
4745 ad_data.queue_id = rule->queue_id;
4748 ad_data.use_counter = false;
4749 ad_data.counter_id = 0;
4751 ad_data.use_next_stage = false;
4752 ad_data.next_input_key = 0;
4754 ad_data.write_rule_id_to_bd = true;
4755 ad_data.rule_id = rule->location;
4757 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4760 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4761 struct ethtool_rx_flow_spec *fs, u32 *unused)
4763 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4764 struct ethtool_usrip4_spec *usr_ip4_spec;
4765 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4766 struct ethtool_usrip6_spec *usr_ip6_spec;
4767 struct ethhdr *ether_spec;
4769 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4772 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4775 if ((fs->flow_type & FLOW_EXT) &&
4776 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4777 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4781 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4785 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4786 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4788 if (!tcp_ip4_spec->ip4src)
4789 *unused |= BIT(INNER_SRC_IP);
4791 if (!tcp_ip4_spec->ip4dst)
4792 *unused |= BIT(INNER_DST_IP);
4794 if (!tcp_ip4_spec->psrc)
4795 *unused |= BIT(INNER_SRC_PORT);
4797 if (!tcp_ip4_spec->pdst)
4798 *unused |= BIT(INNER_DST_PORT);
4800 if (!tcp_ip4_spec->tos)
4801 *unused |= BIT(INNER_IP_TOS);
4805 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4806 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4807 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4809 if (!usr_ip4_spec->ip4src)
4810 *unused |= BIT(INNER_SRC_IP);
4812 if (!usr_ip4_spec->ip4dst)
4813 *unused |= BIT(INNER_DST_IP);
4815 if (!usr_ip4_spec->tos)
4816 *unused |= BIT(INNER_IP_TOS);
4818 if (!usr_ip4_spec->proto)
4819 *unused |= BIT(INNER_IP_PROTO);
4821 if (usr_ip4_spec->l4_4_bytes)
4824 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4831 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4832 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4835 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4836 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4837 *unused |= BIT(INNER_SRC_IP);
4839 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4840 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4841 *unused |= BIT(INNER_DST_IP);
4843 if (!tcp_ip6_spec->psrc)
4844 *unused |= BIT(INNER_SRC_PORT);
4846 if (!tcp_ip6_spec->pdst)
4847 *unused |= BIT(INNER_DST_PORT);
4849 if (tcp_ip6_spec->tclass)
4853 case IPV6_USER_FLOW:
4854 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4855 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4856 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4857 BIT(INNER_DST_PORT);
4859 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4860 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4861 *unused |= BIT(INNER_SRC_IP);
4863 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4864 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4865 *unused |= BIT(INNER_DST_IP);
4867 if (!usr_ip6_spec->l4_proto)
4868 *unused |= BIT(INNER_IP_PROTO);
4870 if (usr_ip6_spec->tclass)
4873 if (usr_ip6_spec->l4_4_bytes)
4878 ether_spec = &fs->h_u.ether_spec;
4879 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4880 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4881 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4883 if (is_zero_ether_addr(ether_spec->h_source))
4884 *unused |= BIT(INNER_SRC_MAC);
4886 if (is_zero_ether_addr(ether_spec->h_dest))
4887 *unused |= BIT(INNER_DST_MAC);
4889 if (!ether_spec->h_proto)
4890 *unused |= BIT(INNER_ETH_TYPE);
4897 if ((fs->flow_type & FLOW_EXT)) {
4898 if (fs->h_ext.vlan_etype)
4900 if (!fs->h_ext.vlan_tci)
4901 *unused |= BIT(INNER_VLAN_TAG_FST);
4903 if (fs->m_ext.vlan_tci) {
4904 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4908 *unused |= BIT(INNER_VLAN_TAG_FST);
4911 if (fs->flow_type & FLOW_MAC_EXT) {
4912 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4915 if (is_zero_ether_addr(fs->h_ext.h_dest))
4916 *unused |= BIT(INNER_DST_MAC);
4918 *unused &= ~(BIT(INNER_DST_MAC));
4924 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4926 struct hclge_fd_rule *rule = NULL;
4927 struct hlist_node *node2;
4929 spin_lock_bh(&hdev->fd_rule_lock);
4930 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4931 if (rule->location >= location)
4935 spin_unlock_bh(&hdev->fd_rule_lock);
4937 return rule && rule->location == location;
4940 /* make sure being called after lock up with fd_rule_lock */
4941 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4942 struct hclge_fd_rule *new_rule,
4946 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4947 struct hlist_node *node2;
4949 if (is_add && !new_rule)
4952 hlist_for_each_entry_safe(rule, node2,
4953 &hdev->fd_rule_list, rule_node) {
4954 if (rule->location >= location)
4959 if (rule && rule->location == location) {
4960 hlist_del(&rule->rule_node);
4962 hdev->hclge_fd_rule_num--;
4965 if (!hdev->hclge_fd_rule_num)
4966 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4967 clear_bit(location, hdev->fd_bmap);
4971 } else if (!is_add) {
4972 dev_err(&hdev->pdev->dev,
4973 "delete fail, rule %d is inexistent\n",
4978 INIT_HLIST_NODE(&new_rule->rule_node);
4981 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4983 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4985 set_bit(location, hdev->fd_bmap);
4986 hdev->hclge_fd_rule_num++;
4987 hdev->fd_active_type = new_rule->rule_type;
4992 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4993 struct ethtool_rx_flow_spec *fs,
4994 struct hclge_fd_rule *rule)
4996 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4998 switch (flow_type) {
5002 rule->tuples.src_ip[3] =
5003 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5004 rule->tuples_mask.src_ip[3] =
5005 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5007 rule->tuples.dst_ip[3] =
5008 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5009 rule->tuples_mask.dst_ip[3] =
5010 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5012 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5013 rule->tuples_mask.src_port =
5014 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5016 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5017 rule->tuples_mask.dst_port =
5018 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5020 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5021 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5023 rule->tuples.ether_proto = ETH_P_IP;
5024 rule->tuples_mask.ether_proto = 0xFFFF;
5028 rule->tuples.src_ip[3] =
5029 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5030 rule->tuples_mask.src_ip[3] =
5031 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5033 rule->tuples.dst_ip[3] =
5034 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5035 rule->tuples_mask.dst_ip[3] =
5036 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5038 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5039 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5041 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5042 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5044 rule->tuples.ether_proto = ETH_P_IP;
5045 rule->tuples_mask.ether_proto = 0xFFFF;
5051 be32_to_cpu_array(rule->tuples.src_ip,
5052 fs->h_u.tcp_ip6_spec.ip6src, 4);
5053 be32_to_cpu_array(rule->tuples_mask.src_ip,
5054 fs->m_u.tcp_ip6_spec.ip6src, 4);
5056 be32_to_cpu_array(rule->tuples.dst_ip,
5057 fs->h_u.tcp_ip6_spec.ip6dst, 4);
5058 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5059 fs->m_u.tcp_ip6_spec.ip6dst, 4);
5061 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5062 rule->tuples_mask.src_port =
5063 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5065 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5066 rule->tuples_mask.dst_port =
5067 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5069 rule->tuples.ether_proto = ETH_P_IPV6;
5070 rule->tuples_mask.ether_proto = 0xFFFF;
5073 case IPV6_USER_FLOW:
5074 be32_to_cpu_array(rule->tuples.src_ip,
5075 fs->h_u.usr_ip6_spec.ip6src, 4);
5076 be32_to_cpu_array(rule->tuples_mask.src_ip,
5077 fs->m_u.usr_ip6_spec.ip6src, 4);
5079 be32_to_cpu_array(rule->tuples.dst_ip,
5080 fs->h_u.usr_ip6_spec.ip6dst, 4);
5081 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5082 fs->m_u.usr_ip6_spec.ip6dst, 4);
5084 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5085 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5087 rule->tuples.ether_proto = ETH_P_IPV6;
5088 rule->tuples_mask.ether_proto = 0xFFFF;
5092 ether_addr_copy(rule->tuples.src_mac,
5093 fs->h_u.ether_spec.h_source);
5094 ether_addr_copy(rule->tuples_mask.src_mac,
5095 fs->m_u.ether_spec.h_source);
5097 ether_addr_copy(rule->tuples.dst_mac,
5098 fs->h_u.ether_spec.h_dest);
5099 ether_addr_copy(rule->tuples_mask.dst_mac,
5100 fs->m_u.ether_spec.h_dest);
5102 rule->tuples.ether_proto =
5103 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5104 rule->tuples_mask.ether_proto =
5105 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5112 switch (flow_type) {
5115 rule->tuples.ip_proto = IPPROTO_SCTP;
5116 rule->tuples_mask.ip_proto = 0xFF;
5120 rule->tuples.ip_proto = IPPROTO_TCP;
5121 rule->tuples_mask.ip_proto = 0xFF;
5125 rule->tuples.ip_proto = IPPROTO_UDP;
5126 rule->tuples_mask.ip_proto = 0xFF;
5132 if ((fs->flow_type & FLOW_EXT)) {
5133 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5134 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5137 if (fs->flow_type & FLOW_MAC_EXT) {
5138 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5139 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5145 /* make sure being called after lock up with fd_rule_lock */
5146 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5147 struct hclge_fd_rule *rule)
5152 dev_err(&hdev->pdev->dev,
5153 "The flow director rule is NULL\n");
5157 /* it will never fail here, so needn't to check return value */
5158 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5160 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5164 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5171 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5175 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5176 struct ethtool_rxnfc *cmd)
5178 struct hclge_vport *vport = hclge_get_vport(handle);
5179 struct hclge_dev *hdev = vport->back;
5180 u16 dst_vport_id = 0, q_index = 0;
5181 struct ethtool_rx_flow_spec *fs;
5182 struct hclge_fd_rule *rule;
5187 if (!hnae3_dev_fd_supported(hdev))
5191 dev_warn(&hdev->pdev->dev,
5192 "Please enable flow director first\n");
5196 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5198 ret = hclge_fd_check_spec(hdev, fs, &unused);
5200 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5204 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5205 action = HCLGE_FD_ACTION_DROP_PACKET;
5207 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5208 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5211 if (vf > hdev->num_req_vfs) {
5212 dev_err(&hdev->pdev->dev,
5213 "Error: vf id (%d) > max vf num (%d)\n",
5214 vf, hdev->num_req_vfs);
5218 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5219 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5222 dev_err(&hdev->pdev->dev,
5223 "Error: queue id (%d) > max tqp num (%d)\n",
5228 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5232 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5236 ret = hclge_fd_get_tuple(hdev, fs, rule);
5242 rule->flow_type = fs->flow_type;
5244 rule->location = fs->location;
5245 rule->unused_tuple = unused;
5246 rule->vf_id = dst_vport_id;
5247 rule->queue_id = q_index;
5248 rule->action = action;
5249 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5251 /* to avoid rule conflict, when user configure rule by ethtool,
5252 * we need to clear all arfs rules
5254 hclge_clear_arfs_rules(handle);
5256 spin_lock_bh(&hdev->fd_rule_lock);
5257 ret = hclge_fd_config_rule(hdev, rule);
5259 spin_unlock_bh(&hdev->fd_rule_lock);
5264 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5265 struct ethtool_rxnfc *cmd)
5267 struct hclge_vport *vport = hclge_get_vport(handle);
5268 struct hclge_dev *hdev = vport->back;
5269 struct ethtool_rx_flow_spec *fs;
5272 if (!hnae3_dev_fd_supported(hdev))
5275 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5277 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5280 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5281 dev_err(&hdev->pdev->dev,
5282 "Delete fail, rule %d is inexistent\n",
5287 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5288 fs->location, NULL, false);
5292 spin_lock_bh(&hdev->fd_rule_lock);
5293 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5295 spin_unlock_bh(&hdev->fd_rule_lock);
5300 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5303 struct hclge_vport *vport = hclge_get_vport(handle);
5304 struct hclge_dev *hdev = vport->back;
5305 struct hclge_fd_rule *rule;
5306 struct hlist_node *node;
5309 if (!hnae3_dev_fd_supported(hdev))
5312 spin_lock_bh(&hdev->fd_rule_lock);
5313 for_each_set_bit(location, hdev->fd_bmap,
5314 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5315 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5319 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5321 hlist_del(&rule->rule_node);
5324 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5325 hdev->hclge_fd_rule_num = 0;
5326 bitmap_zero(hdev->fd_bmap,
5327 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5330 spin_unlock_bh(&hdev->fd_rule_lock);
5333 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5335 struct hclge_vport *vport = hclge_get_vport(handle);
5336 struct hclge_dev *hdev = vport->back;
5337 struct hclge_fd_rule *rule;
5338 struct hlist_node *node;
5341 /* Return ok here, because reset error handling will check this
5342 * return value. If error is returned here, the reset process will
5345 if (!hnae3_dev_fd_supported(hdev))
5348 /* if fd is disabled, should not restore it when reset */
5352 spin_lock_bh(&hdev->fd_rule_lock);
5353 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5354 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5356 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5359 dev_warn(&hdev->pdev->dev,
5360 "Restore rule %d failed, remove it\n",
5362 clear_bit(rule->location, hdev->fd_bmap);
5363 hlist_del(&rule->rule_node);
5365 hdev->hclge_fd_rule_num--;
5369 if (hdev->hclge_fd_rule_num)
5370 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5372 spin_unlock_bh(&hdev->fd_rule_lock);
5377 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5378 struct ethtool_rxnfc *cmd)
5380 struct hclge_vport *vport = hclge_get_vport(handle);
5381 struct hclge_dev *hdev = vport->back;
5383 if (!hnae3_dev_fd_supported(hdev))
5386 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5387 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5392 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5393 struct ethtool_rxnfc *cmd)
5395 struct hclge_vport *vport = hclge_get_vport(handle);
5396 struct hclge_fd_rule *rule = NULL;
5397 struct hclge_dev *hdev = vport->back;
5398 struct ethtool_rx_flow_spec *fs;
5399 struct hlist_node *node2;
5401 if (!hnae3_dev_fd_supported(hdev))
5404 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5406 spin_lock_bh(&hdev->fd_rule_lock);
5408 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5409 if (rule->location >= fs->location)
5413 if (!rule || fs->location != rule->location) {
5414 spin_unlock_bh(&hdev->fd_rule_lock);
5419 fs->flow_type = rule->flow_type;
5420 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5424 fs->h_u.tcp_ip4_spec.ip4src =
5425 cpu_to_be32(rule->tuples.src_ip[3]);
5426 fs->m_u.tcp_ip4_spec.ip4src =
5427 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5428 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5430 fs->h_u.tcp_ip4_spec.ip4dst =
5431 cpu_to_be32(rule->tuples.dst_ip[3]);
5432 fs->m_u.tcp_ip4_spec.ip4dst =
5433 rule->unused_tuple & BIT(INNER_DST_IP) ?
5434 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5436 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5437 fs->m_u.tcp_ip4_spec.psrc =
5438 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5439 0 : cpu_to_be16(rule->tuples_mask.src_port);
5441 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5442 fs->m_u.tcp_ip4_spec.pdst =
5443 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5444 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5446 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5447 fs->m_u.tcp_ip4_spec.tos =
5448 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5449 0 : rule->tuples_mask.ip_tos;
5453 fs->h_u.usr_ip4_spec.ip4src =
5454 cpu_to_be32(rule->tuples.src_ip[3]);
5455 fs->m_u.tcp_ip4_spec.ip4src =
5456 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5457 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5459 fs->h_u.usr_ip4_spec.ip4dst =
5460 cpu_to_be32(rule->tuples.dst_ip[3]);
5461 fs->m_u.usr_ip4_spec.ip4dst =
5462 rule->unused_tuple & BIT(INNER_DST_IP) ?
5463 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5465 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5466 fs->m_u.usr_ip4_spec.tos =
5467 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5468 0 : rule->tuples_mask.ip_tos;
5470 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5471 fs->m_u.usr_ip4_spec.proto =
5472 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5473 0 : rule->tuples_mask.ip_proto;
5475 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5481 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5482 rule->tuples.src_ip, 4);
5483 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5484 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5486 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5487 rule->tuples_mask.src_ip, 4);
5489 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5490 rule->tuples.dst_ip, 4);
5491 if (rule->unused_tuple & BIT(INNER_DST_IP))
5492 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5494 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5495 rule->tuples_mask.dst_ip, 4);
5497 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5498 fs->m_u.tcp_ip6_spec.psrc =
5499 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5500 0 : cpu_to_be16(rule->tuples_mask.src_port);
5502 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5503 fs->m_u.tcp_ip6_spec.pdst =
5504 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5505 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5508 case IPV6_USER_FLOW:
5509 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5510 rule->tuples.src_ip, 4);
5511 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5512 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5514 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5515 rule->tuples_mask.src_ip, 4);
5517 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5518 rule->tuples.dst_ip, 4);
5519 if (rule->unused_tuple & BIT(INNER_DST_IP))
5520 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5522 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5523 rule->tuples_mask.dst_ip, 4);
5525 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5526 fs->m_u.usr_ip6_spec.l4_proto =
5527 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5528 0 : rule->tuples_mask.ip_proto;
5532 ether_addr_copy(fs->h_u.ether_spec.h_source,
5533 rule->tuples.src_mac);
5534 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5535 eth_zero_addr(fs->m_u.ether_spec.h_source);
5537 ether_addr_copy(fs->m_u.ether_spec.h_source,
5538 rule->tuples_mask.src_mac);
5540 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5541 rule->tuples.dst_mac);
5542 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5543 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5545 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5546 rule->tuples_mask.dst_mac);
5548 fs->h_u.ether_spec.h_proto =
5549 cpu_to_be16(rule->tuples.ether_proto);
5550 fs->m_u.ether_spec.h_proto =
5551 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5552 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5556 spin_unlock_bh(&hdev->fd_rule_lock);
5560 if (fs->flow_type & FLOW_EXT) {
5561 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5562 fs->m_ext.vlan_tci =
5563 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5564 cpu_to_be16(VLAN_VID_MASK) :
5565 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5568 if (fs->flow_type & FLOW_MAC_EXT) {
5569 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5570 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5571 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5573 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5574 rule->tuples_mask.dst_mac);
5577 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5578 fs->ring_cookie = RX_CLS_FLOW_DISC;
5582 fs->ring_cookie = rule->queue_id;
5583 vf_id = rule->vf_id;
5584 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5585 fs->ring_cookie |= vf_id;
5588 spin_unlock_bh(&hdev->fd_rule_lock);
5593 static int hclge_get_all_rules(struct hnae3_handle *handle,
5594 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5596 struct hclge_vport *vport = hclge_get_vport(handle);
5597 struct hclge_dev *hdev = vport->back;
5598 struct hclge_fd_rule *rule;
5599 struct hlist_node *node2;
5602 if (!hnae3_dev_fd_supported(hdev))
5605 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5607 spin_lock_bh(&hdev->fd_rule_lock);
5608 hlist_for_each_entry_safe(rule, node2,
5609 &hdev->fd_rule_list, rule_node) {
5610 if (cnt == cmd->rule_cnt) {
5611 spin_unlock_bh(&hdev->fd_rule_lock);
5615 rule_locs[cnt] = rule->location;
5619 spin_unlock_bh(&hdev->fd_rule_lock);
5621 cmd->rule_cnt = cnt;
5626 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5627 struct hclge_fd_rule_tuples *tuples)
5629 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5630 tuples->ip_proto = fkeys->basic.ip_proto;
5631 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5633 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5634 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5635 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5637 memcpy(tuples->src_ip,
5638 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5639 sizeof(tuples->src_ip));
5640 memcpy(tuples->dst_ip,
5641 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5642 sizeof(tuples->dst_ip));
5646 /* traverse all rules, check whether an existed rule has the same tuples */
5647 static struct hclge_fd_rule *
5648 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5649 const struct hclge_fd_rule_tuples *tuples)
5651 struct hclge_fd_rule *rule = NULL;
5652 struct hlist_node *node;
5654 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5655 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5662 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5663 struct hclge_fd_rule *rule)
5665 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5666 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5667 BIT(INNER_SRC_PORT);
5670 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5671 if (tuples->ether_proto == ETH_P_IP) {
5672 if (tuples->ip_proto == IPPROTO_TCP)
5673 rule->flow_type = TCP_V4_FLOW;
5675 rule->flow_type = UDP_V4_FLOW;
5677 if (tuples->ip_proto == IPPROTO_TCP)
5678 rule->flow_type = TCP_V6_FLOW;
5680 rule->flow_type = UDP_V6_FLOW;
5682 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5683 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5686 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5687 u16 flow_id, struct flow_keys *fkeys)
5689 struct hclge_vport *vport = hclge_get_vport(handle);
5690 struct hclge_fd_rule_tuples new_tuples;
5691 struct hclge_dev *hdev = vport->back;
5692 struct hclge_fd_rule *rule;
5697 if (!hnae3_dev_fd_supported(hdev))
5700 memset(&new_tuples, 0, sizeof(new_tuples));
5701 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5703 spin_lock_bh(&hdev->fd_rule_lock);
5705 /* when there is already fd rule existed add by user,
5706 * arfs should not work
5708 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5709 spin_unlock_bh(&hdev->fd_rule_lock);
5714 /* check is there flow director filter existed for this flow,
5715 * if not, create a new filter for it;
5716 * if filter exist with different queue id, modify the filter;
5717 * if filter exist with same queue id, do nothing
5719 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5721 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5722 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5723 spin_unlock_bh(&hdev->fd_rule_lock);
5728 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5730 spin_unlock_bh(&hdev->fd_rule_lock);
5735 set_bit(bit_id, hdev->fd_bmap);
5736 rule->location = bit_id;
5737 rule->flow_id = flow_id;
5738 rule->queue_id = queue_id;
5739 hclge_fd_build_arfs_rule(&new_tuples, rule);
5740 ret = hclge_fd_config_rule(hdev, rule);
5742 spin_unlock_bh(&hdev->fd_rule_lock);
5747 return rule->location;
5750 spin_unlock_bh(&hdev->fd_rule_lock);
5752 if (rule->queue_id == queue_id)
5753 return rule->location;
5755 tmp_queue_id = rule->queue_id;
5756 rule->queue_id = queue_id;
5757 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5759 rule->queue_id = tmp_queue_id;
5763 return rule->location;
5766 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5768 #ifdef CONFIG_RFS_ACCEL
5769 struct hnae3_handle *handle = &hdev->vport[0].nic;
5770 struct hclge_fd_rule *rule;
5771 struct hlist_node *node;
5772 HLIST_HEAD(del_list);
5774 spin_lock_bh(&hdev->fd_rule_lock);
5775 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5776 spin_unlock_bh(&hdev->fd_rule_lock);
5779 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5780 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5781 rule->flow_id, rule->location)) {
5782 hlist_del_init(&rule->rule_node);
5783 hlist_add_head(&rule->rule_node, &del_list);
5784 hdev->hclge_fd_rule_num--;
5785 clear_bit(rule->location, hdev->fd_bmap);
5788 spin_unlock_bh(&hdev->fd_rule_lock);
5790 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5791 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5792 rule->location, NULL, false);
5798 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5800 #ifdef CONFIG_RFS_ACCEL
5801 struct hclge_vport *vport = hclge_get_vport(handle);
5802 struct hclge_dev *hdev = vport->back;
5804 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5805 hclge_del_all_fd_entries(handle, true);
5809 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5811 struct hclge_vport *vport = hclge_get_vport(handle);
5812 struct hclge_dev *hdev = vport->back;
5814 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5815 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5818 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5820 struct hclge_vport *vport = hclge_get_vport(handle);
5821 struct hclge_dev *hdev = vport->back;
5823 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5826 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5828 struct hclge_vport *vport = hclge_get_vport(handle);
5829 struct hclge_dev *hdev = vport->back;
5831 return hdev->rst_stats.hw_reset_done_cnt;
5834 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5836 struct hclge_vport *vport = hclge_get_vport(handle);
5837 struct hclge_dev *hdev = vport->back;
5840 hdev->fd_en = enable;
5841 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5843 hclge_del_all_fd_entries(handle, clear);
5845 hclge_restore_fd_entries(handle);
5848 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5850 struct hclge_desc desc;
5851 struct hclge_config_mac_mode_cmd *req =
5852 (struct hclge_config_mac_mode_cmd *)desc.data;
5856 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5857 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5858 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5859 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5860 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5861 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5862 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5863 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5864 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5865 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5866 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5867 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5868 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5869 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5870 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5871 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5873 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5875 dev_err(&hdev->pdev->dev,
5876 "mac enable fail, ret =%d.\n", ret);
5879 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5881 struct hclge_config_mac_mode_cmd *req;
5882 struct hclge_desc desc;
5886 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5887 /* 1 Read out the MAC mode config at first */
5888 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5889 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5891 dev_err(&hdev->pdev->dev,
5892 "mac loopback get fail, ret =%d.\n", ret);
5896 /* 2 Then setup the loopback flag */
5897 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5898 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5899 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5900 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5902 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5904 /* 3 Config mac work mode with loopback flag
5905 * and its original configure parameters
5907 hclge_cmd_reuse_desc(&desc, false);
5908 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5910 dev_err(&hdev->pdev->dev,
5911 "mac loopback set fail, ret =%d.\n", ret);
5915 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5916 enum hnae3_loop loop_mode)
5918 #define HCLGE_SERDES_RETRY_MS 10
5919 #define HCLGE_SERDES_RETRY_NUM 100
5921 #define HCLGE_MAC_LINK_STATUS_MS 10
5922 #define HCLGE_MAC_LINK_STATUS_NUM 100
5923 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5924 #define HCLGE_MAC_LINK_STATUS_UP 1
5926 struct hclge_serdes_lb_cmd *req;
5927 struct hclge_desc desc;
5928 int mac_link_ret = 0;
5932 req = (struct hclge_serdes_lb_cmd *)desc.data;
5933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5935 switch (loop_mode) {
5936 case HNAE3_LOOP_SERIAL_SERDES:
5937 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5939 case HNAE3_LOOP_PARALLEL_SERDES:
5940 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5943 dev_err(&hdev->pdev->dev,
5944 "unsupported serdes loopback mode %d\n", loop_mode);
5949 req->enable = loop_mode_b;
5950 req->mask = loop_mode_b;
5951 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5953 req->mask = loop_mode_b;
5954 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5957 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5959 dev_err(&hdev->pdev->dev,
5960 "serdes loopback set fail, ret = %d\n", ret);
5965 msleep(HCLGE_SERDES_RETRY_MS);
5966 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5968 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5970 dev_err(&hdev->pdev->dev,
5971 "serdes loopback get, ret = %d\n", ret);
5974 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5975 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5977 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5978 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5980 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5981 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5985 hclge_cfg_mac_mode(hdev, en);
5989 /* serdes Internal loopback, independent of the network cable.*/
5990 msleep(HCLGE_MAC_LINK_STATUS_MS);
5991 ret = hclge_get_mac_link_status(hdev);
5992 if (ret == mac_link_ret)
5994 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5996 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6001 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6002 int stream_id, bool enable)
6004 struct hclge_desc desc;
6005 struct hclge_cfg_com_tqp_queue_cmd *req =
6006 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6009 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6010 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6011 req->stream_id = cpu_to_le16(stream_id);
6012 req->enable |= enable << HCLGE_TQP_ENABLE_B;
6014 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6016 dev_err(&hdev->pdev->dev,
6017 "Tqp enable fail, status =%d.\n", ret);
6021 static int hclge_set_loopback(struct hnae3_handle *handle,
6022 enum hnae3_loop loop_mode, bool en)
6024 struct hclge_vport *vport = hclge_get_vport(handle);
6025 struct hnae3_knic_private_info *kinfo;
6026 struct hclge_dev *hdev = vport->back;
6029 switch (loop_mode) {
6030 case HNAE3_LOOP_APP:
6031 ret = hclge_set_app_loopback(hdev, en);
6033 case HNAE3_LOOP_SERIAL_SERDES:
6034 case HNAE3_LOOP_PARALLEL_SERDES:
6035 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6039 dev_err(&hdev->pdev->dev,
6040 "loop_mode %d is not supported\n", loop_mode);
6047 kinfo = &vport->nic.kinfo;
6048 for (i = 0; i < kinfo->num_tqps; i++) {
6049 ret = hclge_tqp_enable(hdev, i, 0, en);
6057 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6059 struct hclge_vport *vport = hclge_get_vport(handle);
6060 struct hnae3_knic_private_info *kinfo;
6061 struct hnae3_queue *queue;
6062 struct hclge_tqp *tqp;
6065 kinfo = &vport->nic.kinfo;
6066 for (i = 0; i < kinfo->num_tqps; i++) {
6067 queue = handle->kinfo.tqp[i];
6068 tqp = container_of(queue, struct hclge_tqp, q);
6069 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6073 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6075 struct hclge_vport *vport = hclge_get_vport(handle);
6076 struct hclge_dev *hdev = vport->back;
6079 mod_timer(&hdev->service_timer, jiffies + HZ);
6081 del_timer_sync(&hdev->service_timer);
6082 cancel_work_sync(&hdev->service_task);
6083 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6087 static int hclge_ae_start(struct hnae3_handle *handle)
6089 struct hclge_vport *vport = hclge_get_vport(handle);
6090 struct hclge_dev *hdev = vport->back;
6093 hclge_cfg_mac_mode(hdev, true);
6094 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6095 hdev->hw.mac.link = 0;
6097 /* reset tqp stats */
6098 hclge_reset_tqp_stats(handle);
6100 hclge_mac_start_phy(hdev);
6105 static void hclge_ae_stop(struct hnae3_handle *handle)
6107 struct hclge_vport *vport = hclge_get_vport(handle);
6108 struct hclge_dev *hdev = vport->back;
6111 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6113 hclge_clear_arfs_rules(handle);
6115 /* If it is not PF reset, the firmware will disable the MAC,
6116 * so it only need to stop phy here.
6118 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6119 hdev->reset_type != HNAE3_FUNC_RESET) {
6120 hclge_mac_stop_phy(hdev);
6124 for (i = 0; i < handle->kinfo.num_tqps; i++)
6125 hclge_reset_tqp(handle, i);
6128 hclge_cfg_mac_mode(hdev, false);
6130 hclge_mac_stop_phy(hdev);
6132 /* reset tqp stats */
6133 hclge_reset_tqp_stats(handle);
6134 hclge_update_link_status(hdev);
6137 int hclge_vport_start(struct hclge_vport *vport)
6139 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6140 vport->last_active_jiffies = jiffies;
6144 void hclge_vport_stop(struct hclge_vport *vport)
6146 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6149 static int hclge_client_start(struct hnae3_handle *handle)
6151 struct hclge_vport *vport = hclge_get_vport(handle);
6153 return hclge_vport_start(vport);
6156 static void hclge_client_stop(struct hnae3_handle *handle)
6158 struct hclge_vport *vport = hclge_get_vport(handle);
6160 hclge_vport_stop(vport);
6163 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6164 u16 cmdq_resp, u8 resp_code,
6165 enum hclge_mac_vlan_tbl_opcode op)
6167 struct hclge_dev *hdev = vport->back;
6168 int return_status = -EIO;
6171 dev_err(&hdev->pdev->dev,
6172 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6177 if (op == HCLGE_MAC_VLAN_ADD) {
6178 if ((!resp_code) || (resp_code == 1)) {
6180 } else if (resp_code == 2) {
6181 return_status = -ENOSPC;
6182 dev_err(&hdev->pdev->dev,
6183 "add mac addr failed for uc_overflow.\n");
6184 } else if (resp_code == 3) {
6185 return_status = -ENOSPC;
6186 dev_err(&hdev->pdev->dev,
6187 "add mac addr failed for mc_overflow.\n");
6189 dev_err(&hdev->pdev->dev,
6190 "add mac addr failed for undefined, code=%d.\n",
6193 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6196 } else if (resp_code == 1) {
6197 return_status = -ENOENT;
6198 dev_dbg(&hdev->pdev->dev,
6199 "remove mac addr failed for miss.\n");
6201 dev_err(&hdev->pdev->dev,
6202 "remove mac addr failed for undefined, code=%d.\n",
6205 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6208 } else if (resp_code == 1) {
6209 return_status = -ENOENT;
6210 dev_dbg(&hdev->pdev->dev,
6211 "lookup mac addr failed for miss.\n");
6213 dev_err(&hdev->pdev->dev,
6214 "lookup mac addr failed for undefined, code=%d.\n",
6218 return_status = -EINVAL;
6219 dev_err(&hdev->pdev->dev,
6220 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6224 return return_status;
6227 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6232 if (vfid > 255 || vfid < 0)
6235 if (vfid >= 0 && vfid <= 191) {
6236 word_num = vfid / 32;
6237 bit_num = vfid % 32;
6239 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6241 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6243 word_num = (vfid - 192) / 32;
6244 bit_num = vfid % 32;
6246 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6248 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6254 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6256 #define HCLGE_DESC_NUMBER 3
6257 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6260 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6261 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6262 if (desc[i].data[j])
6268 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6269 const u8 *addr, bool is_mc)
6271 const unsigned char *mac_addr = addr;
6272 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6273 (mac_addr[0]) | (mac_addr[1] << 8);
6274 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6276 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6278 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6279 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6282 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6283 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6286 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6287 struct hclge_mac_vlan_tbl_entry_cmd *req)
6289 struct hclge_dev *hdev = vport->back;
6290 struct hclge_desc desc;
6295 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6297 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6299 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6301 dev_err(&hdev->pdev->dev,
6302 "del mac addr failed for cmd_send, ret =%d.\n",
6306 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6307 retval = le16_to_cpu(desc.retval);
6309 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6310 HCLGE_MAC_VLAN_REMOVE);
6313 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6314 struct hclge_mac_vlan_tbl_entry_cmd *req,
6315 struct hclge_desc *desc,
6318 struct hclge_dev *hdev = vport->back;
6323 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6325 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6326 memcpy(desc[0].data,
6328 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6329 hclge_cmd_setup_basic_desc(&desc[1],
6330 HCLGE_OPC_MAC_VLAN_ADD,
6332 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6333 hclge_cmd_setup_basic_desc(&desc[2],
6334 HCLGE_OPC_MAC_VLAN_ADD,
6336 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6338 memcpy(desc[0].data,
6340 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6341 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6344 dev_err(&hdev->pdev->dev,
6345 "lookup mac addr failed for cmd_send, ret =%d.\n",
6349 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6350 retval = le16_to_cpu(desc[0].retval);
6352 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6353 HCLGE_MAC_VLAN_LKUP);
6356 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6357 struct hclge_mac_vlan_tbl_entry_cmd *req,
6358 struct hclge_desc *mc_desc)
6360 struct hclge_dev *hdev = vport->back;
6367 struct hclge_desc desc;
6369 hclge_cmd_setup_basic_desc(&desc,
6370 HCLGE_OPC_MAC_VLAN_ADD,
6372 memcpy(desc.data, req,
6373 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6374 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6375 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6376 retval = le16_to_cpu(desc.retval);
6378 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6380 HCLGE_MAC_VLAN_ADD);
6382 hclge_cmd_reuse_desc(&mc_desc[0], false);
6383 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6384 hclge_cmd_reuse_desc(&mc_desc[1], false);
6385 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6386 hclge_cmd_reuse_desc(&mc_desc[2], false);
6387 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6388 memcpy(mc_desc[0].data, req,
6389 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6390 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6391 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6392 retval = le16_to_cpu(mc_desc[0].retval);
6394 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6396 HCLGE_MAC_VLAN_ADD);
6400 dev_err(&hdev->pdev->dev,
6401 "add mac addr failed for cmd_send, ret =%d.\n",
6409 static int hclge_init_umv_space(struct hclge_dev *hdev)
6411 u16 allocated_size = 0;
6414 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6419 if (allocated_size < hdev->wanted_umv_size)
6420 dev_warn(&hdev->pdev->dev,
6421 "Alloc umv space failed, want %d, get %d\n",
6422 hdev->wanted_umv_size, allocated_size);
6424 mutex_init(&hdev->umv_mutex);
6425 hdev->max_umv_size = allocated_size;
6426 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6427 hdev->share_umv_size = hdev->priv_umv_size +
6428 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6433 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6437 if (hdev->max_umv_size > 0) {
6438 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6442 hdev->max_umv_size = 0;
6444 mutex_destroy(&hdev->umv_mutex);
6449 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6450 u16 *allocated_size, bool is_alloc)
6452 struct hclge_umv_spc_alc_cmd *req;
6453 struct hclge_desc desc;
6456 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6457 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6458 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6459 req->space_size = cpu_to_le32(space_size);
6461 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6463 dev_err(&hdev->pdev->dev,
6464 "%s umv space failed for cmd_send, ret =%d\n",
6465 is_alloc ? "allocate" : "free", ret);
6469 if (is_alloc && allocated_size)
6470 *allocated_size = le32_to_cpu(desc.data[1]);
6475 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6477 struct hclge_vport *vport;
6480 for (i = 0; i < hdev->num_alloc_vport; i++) {
6481 vport = &hdev->vport[i];
6482 vport->used_umv_num = 0;
6485 mutex_lock(&hdev->umv_mutex);
6486 hdev->share_umv_size = hdev->priv_umv_size +
6487 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6488 mutex_unlock(&hdev->umv_mutex);
6491 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6493 struct hclge_dev *hdev = vport->back;
6496 mutex_lock(&hdev->umv_mutex);
6497 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6498 hdev->share_umv_size == 0);
6499 mutex_unlock(&hdev->umv_mutex);
6504 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6506 struct hclge_dev *hdev = vport->back;
6508 mutex_lock(&hdev->umv_mutex);
6510 if (vport->used_umv_num > hdev->priv_umv_size)
6511 hdev->share_umv_size++;
6513 if (vport->used_umv_num > 0)
6514 vport->used_umv_num--;
6516 if (vport->used_umv_num >= hdev->priv_umv_size &&
6517 hdev->share_umv_size > 0)
6518 hdev->share_umv_size--;
6519 vport->used_umv_num++;
6521 mutex_unlock(&hdev->umv_mutex);
6524 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6525 const unsigned char *addr)
6527 struct hclge_vport *vport = hclge_get_vport(handle);
6529 return hclge_add_uc_addr_common(vport, addr);
6532 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6533 const unsigned char *addr)
6535 struct hclge_dev *hdev = vport->back;
6536 struct hclge_mac_vlan_tbl_entry_cmd req;
6537 struct hclge_desc desc;
6538 u16 egress_port = 0;
6541 /* mac addr check */
6542 if (is_zero_ether_addr(addr) ||
6543 is_broadcast_ether_addr(addr) ||
6544 is_multicast_ether_addr(addr)) {
6545 dev_err(&hdev->pdev->dev,
6546 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6548 is_zero_ether_addr(addr),
6549 is_broadcast_ether_addr(addr),
6550 is_multicast_ether_addr(addr));
6554 memset(&req, 0, sizeof(req));
6556 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6557 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6559 req.egress_port = cpu_to_le16(egress_port);
6561 hclge_prepare_mac_addr(&req, addr, false);
6563 /* Lookup the mac address in the mac_vlan table, and add
6564 * it if the entry is inexistent. Repeated unicast entry
6565 * is not allowed in the mac vlan table.
6567 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6568 if (ret == -ENOENT) {
6569 if (!hclge_is_umv_space_full(vport)) {
6570 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6572 hclge_update_umv_space(vport, false);
6576 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6577 hdev->priv_umv_size);
6582 /* check if we just hit the duplicate */
6584 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6585 vport->vport_id, addr);
6589 dev_err(&hdev->pdev->dev,
6590 "PF failed to add unicast entry(%pM) in the MAC table\n",
6596 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6597 const unsigned char *addr)
6599 struct hclge_vport *vport = hclge_get_vport(handle);
6601 return hclge_rm_uc_addr_common(vport, addr);
6604 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6605 const unsigned char *addr)
6607 struct hclge_dev *hdev = vport->back;
6608 struct hclge_mac_vlan_tbl_entry_cmd req;
6611 /* mac addr check */
6612 if (is_zero_ether_addr(addr) ||
6613 is_broadcast_ether_addr(addr) ||
6614 is_multicast_ether_addr(addr)) {
6615 dev_dbg(&hdev->pdev->dev,
6616 "Remove mac err! invalid mac:%pM.\n",
6621 memset(&req, 0, sizeof(req));
6622 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6623 hclge_prepare_mac_addr(&req, addr, false);
6624 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6626 hclge_update_umv_space(vport, true);
6631 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6632 const unsigned char *addr)
6634 struct hclge_vport *vport = hclge_get_vport(handle);
6636 return hclge_add_mc_addr_common(vport, addr);
6639 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6640 const unsigned char *addr)
6642 struct hclge_dev *hdev = vport->back;
6643 struct hclge_mac_vlan_tbl_entry_cmd req;
6644 struct hclge_desc desc[3];
6647 /* mac addr check */
6648 if (!is_multicast_ether_addr(addr)) {
6649 dev_err(&hdev->pdev->dev,
6650 "Add mc mac err! invalid mac:%pM.\n",
6654 memset(&req, 0, sizeof(req));
6655 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6656 hclge_prepare_mac_addr(&req, addr, true);
6657 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6659 /* This mac addr exist, update VFID for it */
6660 hclge_update_desc_vfid(desc, vport->vport_id, false);
6661 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6663 /* This mac addr do not exist, add new entry for it */
6664 memset(desc[0].data, 0, sizeof(desc[0].data));
6665 memset(desc[1].data, 0, sizeof(desc[0].data));
6666 memset(desc[2].data, 0, sizeof(desc[0].data));
6667 hclge_update_desc_vfid(desc, vport->vport_id, false);
6668 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6671 if (status == -ENOSPC)
6672 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6677 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6678 const unsigned char *addr)
6680 struct hclge_vport *vport = hclge_get_vport(handle);
6682 return hclge_rm_mc_addr_common(vport, addr);
6685 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6686 const unsigned char *addr)
6688 struct hclge_dev *hdev = vport->back;
6689 struct hclge_mac_vlan_tbl_entry_cmd req;
6690 enum hclge_cmd_status status;
6691 struct hclge_desc desc[3];
6693 /* mac addr check */
6694 if (!is_multicast_ether_addr(addr)) {
6695 dev_dbg(&hdev->pdev->dev,
6696 "Remove mc mac err! invalid mac:%pM.\n",
6701 memset(&req, 0, sizeof(req));
6702 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6703 hclge_prepare_mac_addr(&req, addr, true);
6704 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6706 /* This mac addr exist, remove this handle's VFID for it */
6707 hclge_update_desc_vfid(desc, vport->vport_id, true);
6709 if (hclge_is_all_function_id_zero(desc))
6710 /* All the vfid is zero, so need to delete this entry */
6711 status = hclge_remove_mac_vlan_tbl(vport, &req);
6713 /* Not all the vfid is zero, update the vfid */
6714 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6717 /* Maybe this mac address is in mta table, but it cannot be
6718 * deleted here because an entry of mta represents an address
6719 * range rather than a specific address. the delete action to
6720 * all entries will take effect in update_mta_status called by
6721 * hns3_nic_set_rx_mode.
6729 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6730 enum HCLGE_MAC_ADDR_TYPE mac_type)
6732 struct hclge_vport_mac_addr_cfg *mac_cfg;
6733 struct list_head *list;
6735 if (!vport->vport_id)
6738 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6742 mac_cfg->hd_tbl_status = true;
6743 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6745 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6746 &vport->uc_mac_list : &vport->mc_mac_list;
6748 list_add_tail(&mac_cfg->node, list);
6751 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6753 enum HCLGE_MAC_ADDR_TYPE mac_type)
6755 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6756 struct list_head *list;
6757 bool uc_flag, mc_flag;
6759 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6760 &vport->uc_mac_list : &vport->mc_mac_list;
6762 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6763 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6765 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6766 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6767 if (uc_flag && mac_cfg->hd_tbl_status)
6768 hclge_rm_uc_addr_common(vport, mac_addr);
6770 if (mc_flag && mac_cfg->hd_tbl_status)
6771 hclge_rm_mc_addr_common(vport, mac_addr);
6773 list_del(&mac_cfg->node);
6780 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6781 enum HCLGE_MAC_ADDR_TYPE mac_type)
6783 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6784 struct list_head *list;
6786 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6787 &vport->uc_mac_list : &vport->mc_mac_list;
6789 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6790 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6791 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6793 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6794 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6796 mac_cfg->hd_tbl_status = false;
6798 list_del(&mac_cfg->node);
6804 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6806 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6807 struct hclge_vport *vport;
6810 mutex_lock(&hdev->vport_cfg_mutex);
6811 for (i = 0; i < hdev->num_alloc_vport; i++) {
6812 vport = &hdev->vport[i];
6813 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6814 list_del(&mac->node);
6818 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6819 list_del(&mac->node);
6823 mutex_unlock(&hdev->vport_cfg_mutex);
6826 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6827 u16 cmdq_resp, u8 resp_code)
6829 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6830 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6831 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6832 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6837 dev_err(&hdev->pdev->dev,
6838 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6843 switch (resp_code) {
6844 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6845 case HCLGE_ETHERTYPE_ALREADY_ADD:
6848 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6849 dev_err(&hdev->pdev->dev,
6850 "add mac ethertype failed for manager table overflow.\n");
6851 return_status = -EIO;
6853 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6854 dev_err(&hdev->pdev->dev,
6855 "add mac ethertype failed for key conflict.\n");
6856 return_status = -EIO;
6859 dev_err(&hdev->pdev->dev,
6860 "add mac ethertype failed for undefined, code=%d.\n",
6862 return_status = -EIO;
6865 return return_status;
6868 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6869 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6871 struct hclge_desc desc;
6876 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6877 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6879 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6881 dev_err(&hdev->pdev->dev,
6882 "add mac ethertype failed for cmd_send, ret =%d.\n",
6887 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6888 retval = le16_to_cpu(desc.retval);
6890 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6893 static int init_mgr_tbl(struct hclge_dev *hdev)
6898 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6899 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6901 dev_err(&hdev->pdev->dev,
6902 "add mac ethertype failed, ret =%d.\n",
6911 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6913 struct hclge_vport *vport = hclge_get_vport(handle);
6914 struct hclge_dev *hdev = vport->back;
6916 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6919 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6922 const unsigned char *new_addr = (const unsigned char *)p;
6923 struct hclge_vport *vport = hclge_get_vport(handle);
6924 struct hclge_dev *hdev = vport->back;
6927 /* mac addr check */
6928 if (is_zero_ether_addr(new_addr) ||
6929 is_broadcast_ether_addr(new_addr) ||
6930 is_multicast_ether_addr(new_addr)) {
6931 dev_err(&hdev->pdev->dev,
6932 "Change uc mac err! invalid mac:%p.\n",
6937 if ((!is_first || is_kdump_kernel()) &&
6938 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6939 dev_warn(&hdev->pdev->dev,
6940 "remove old uc mac address fail.\n");
6942 ret = hclge_add_uc_addr(handle, new_addr);
6944 dev_err(&hdev->pdev->dev,
6945 "add uc mac address fail, ret =%d.\n",
6949 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6950 dev_err(&hdev->pdev->dev,
6951 "restore uc mac address fail.\n");
6956 ret = hclge_pause_addr_cfg(hdev, new_addr);
6958 dev_err(&hdev->pdev->dev,
6959 "configure mac pause address fail, ret =%d.\n",
6964 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6969 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6972 struct hclge_vport *vport = hclge_get_vport(handle);
6973 struct hclge_dev *hdev = vport->back;
6975 if (!hdev->hw.mac.phydev)
6978 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6981 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6982 u8 fe_type, bool filter_en, u8 vf_id)
6984 struct hclge_vlan_filter_ctrl_cmd *req;
6985 struct hclge_desc desc;
6988 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6990 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6991 req->vlan_type = vlan_type;
6992 req->vlan_fe = filter_en ? fe_type : 0;
6995 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6997 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7003 #define HCLGE_FILTER_TYPE_VF 0
7004 #define HCLGE_FILTER_TYPE_PORT 1
7005 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7006 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7007 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7008 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7009 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7010 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7011 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7012 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7013 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7015 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7017 struct hclge_vport *vport = hclge_get_vport(handle);
7018 struct hclge_dev *hdev = vport->back;
7020 if (hdev->pdev->revision >= 0x21) {
7021 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7022 HCLGE_FILTER_FE_EGRESS, enable, 0);
7023 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7024 HCLGE_FILTER_FE_INGRESS, enable, 0);
7026 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7027 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7031 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7033 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7036 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7037 bool is_kill, u16 vlan, u8 qos,
7040 #define HCLGE_MAX_VF_BYTES 16
7041 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7042 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7043 struct hclge_desc desc[2];
7048 hclge_cmd_setup_basic_desc(&desc[0],
7049 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7050 hclge_cmd_setup_basic_desc(&desc[1],
7051 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7053 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7055 vf_byte_off = vfid / 8;
7056 vf_byte_val = 1 << (vfid % 8);
7058 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7059 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7061 req0->vlan_id = cpu_to_le16(vlan);
7062 req0->vlan_cfg = is_kill;
7064 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7065 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7067 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7069 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7071 dev_err(&hdev->pdev->dev,
7072 "Send vf vlan command fail, ret =%d.\n",
7078 #define HCLGE_VF_VLAN_NO_ENTRY 2
7079 if (!req0->resp_code || req0->resp_code == 1)
7082 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7083 dev_warn(&hdev->pdev->dev,
7084 "vf vlan table is full, vf vlan filter is disabled\n");
7088 dev_err(&hdev->pdev->dev,
7089 "Add vf vlan filter fail, ret =%d.\n",
7092 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7093 if (!req0->resp_code)
7096 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7097 dev_warn(&hdev->pdev->dev,
7098 "vlan %d filter is not in vf vlan table\n",
7103 dev_err(&hdev->pdev->dev,
7104 "Kill vf vlan filter fail, ret =%d.\n",
7111 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7112 u16 vlan_id, bool is_kill)
7114 struct hclge_vlan_filter_pf_cfg_cmd *req;
7115 struct hclge_desc desc;
7116 u8 vlan_offset_byte_val;
7117 u8 vlan_offset_byte;
7121 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7123 vlan_offset_160 = vlan_id / 160;
7124 vlan_offset_byte = (vlan_id % 160) / 8;
7125 vlan_offset_byte_val = 1 << (vlan_id % 8);
7127 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7128 req->vlan_offset = vlan_offset_160;
7129 req->vlan_cfg = is_kill;
7130 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7132 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7134 dev_err(&hdev->pdev->dev,
7135 "port vlan command, send fail, ret =%d.\n", ret);
7139 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7140 u16 vport_id, u16 vlan_id, u8 qos,
7143 u16 vport_idx, vport_num = 0;
7146 if (is_kill && !vlan_id)
7149 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7152 dev_err(&hdev->pdev->dev,
7153 "Set %d vport vlan filter config fail, ret =%d.\n",
7158 /* vlan 0 may be added twice when 8021q module is enabled */
7159 if (!is_kill && !vlan_id &&
7160 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7163 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7164 dev_err(&hdev->pdev->dev,
7165 "Add port vlan failed, vport %d is already in vlan %d\n",
7171 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7172 dev_err(&hdev->pdev->dev,
7173 "Delete port vlan failed, vport %d is not in vlan %d\n",
7178 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7181 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7182 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7188 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7190 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7191 struct hclge_vport_vtag_tx_cfg_cmd *req;
7192 struct hclge_dev *hdev = vport->back;
7193 struct hclge_desc desc;
7196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7198 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7199 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7200 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7201 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7202 vcfg->accept_tag1 ? 1 : 0);
7203 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7204 vcfg->accept_untag1 ? 1 : 0);
7205 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7206 vcfg->accept_tag2 ? 1 : 0);
7207 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7208 vcfg->accept_untag2 ? 1 : 0);
7209 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7210 vcfg->insert_tag1_en ? 1 : 0);
7211 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7212 vcfg->insert_tag2_en ? 1 : 0);
7213 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7215 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7216 req->vf_bitmap[req->vf_offset] =
7217 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7219 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7221 dev_err(&hdev->pdev->dev,
7222 "Send port txvlan cfg command fail, ret =%d\n",
7228 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7230 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7231 struct hclge_vport_vtag_rx_cfg_cmd *req;
7232 struct hclge_dev *hdev = vport->back;
7233 struct hclge_desc desc;
7236 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7238 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7239 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7240 vcfg->strip_tag1_en ? 1 : 0);
7241 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7242 vcfg->strip_tag2_en ? 1 : 0);
7243 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7244 vcfg->vlan1_vlan_prionly ? 1 : 0);
7245 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7246 vcfg->vlan2_vlan_prionly ? 1 : 0);
7248 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7249 req->vf_bitmap[req->vf_offset] =
7250 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7252 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7254 dev_err(&hdev->pdev->dev,
7255 "Send port rxvlan cfg command fail, ret =%d\n",
7261 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7262 u16 port_base_vlan_state,
7267 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7268 vport->txvlan_cfg.accept_tag1 = true;
7269 vport->txvlan_cfg.insert_tag1_en = false;
7270 vport->txvlan_cfg.default_tag1 = 0;
7272 vport->txvlan_cfg.accept_tag1 = false;
7273 vport->txvlan_cfg.insert_tag1_en = true;
7274 vport->txvlan_cfg.default_tag1 = vlan_tag;
7277 vport->txvlan_cfg.accept_untag1 = true;
7279 /* accept_tag2 and accept_untag2 are not supported on
7280 * pdev revision(0x20), new revision support them,
7281 * this two fields can not be configured by user.
7283 vport->txvlan_cfg.accept_tag2 = true;
7284 vport->txvlan_cfg.accept_untag2 = true;
7285 vport->txvlan_cfg.insert_tag2_en = false;
7286 vport->txvlan_cfg.default_tag2 = 0;
7288 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7289 vport->rxvlan_cfg.strip_tag1_en = false;
7290 vport->rxvlan_cfg.strip_tag2_en =
7291 vport->rxvlan_cfg.rx_vlan_offload_en;
7293 vport->rxvlan_cfg.strip_tag1_en =
7294 vport->rxvlan_cfg.rx_vlan_offload_en;
7295 vport->rxvlan_cfg.strip_tag2_en = true;
7297 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7298 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7300 ret = hclge_set_vlan_tx_offload_cfg(vport);
7304 return hclge_set_vlan_rx_offload_cfg(vport);
7307 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7309 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7310 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7311 struct hclge_desc desc;
7314 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7315 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7316 rx_req->ot_fst_vlan_type =
7317 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7318 rx_req->ot_sec_vlan_type =
7319 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7320 rx_req->in_fst_vlan_type =
7321 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7322 rx_req->in_sec_vlan_type =
7323 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7325 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7327 dev_err(&hdev->pdev->dev,
7328 "Send rxvlan protocol type command fail, ret =%d\n",
7333 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7335 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7336 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7337 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7339 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7341 dev_err(&hdev->pdev->dev,
7342 "Send txvlan protocol type command fail, ret =%d\n",
7348 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7350 #define HCLGE_DEF_VLAN_TYPE 0x8100
7352 struct hnae3_handle *handle = &hdev->vport[0].nic;
7353 struct hclge_vport *vport;
7357 if (hdev->pdev->revision >= 0x21) {
7358 /* for revision 0x21, vf vlan filter is per function */
7359 for (i = 0; i < hdev->num_alloc_vport; i++) {
7360 vport = &hdev->vport[i];
7361 ret = hclge_set_vlan_filter_ctrl(hdev,
7362 HCLGE_FILTER_TYPE_VF,
7363 HCLGE_FILTER_FE_EGRESS,
7370 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7371 HCLGE_FILTER_FE_INGRESS, true,
7376 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7377 HCLGE_FILTER_FE_EGRESS_V1_B,
7383 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7385 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7386 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7387 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7388 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7389 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7390 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7392 ret = hclge_set_vlan_protocol_type(hdev);
7396 for (i = 0; i < hdev->num_alloc_vport; i++) {
7399 vport = &hdev->vport[i];
7400 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7402 ret = hclge_vlan_offload_cfg(vport,
7403 vport->port_base_vlan_cfg.state,
7409 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7412 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7415 struct hclge_vport_vlan_cfg *vlan;
7417 /* vlan 0 is reserved */
7421 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7425 vlan->hd_tbl_status = writen_to_tbl;
7426 vlan->vlan_id = vlan_id;
7428 list_add_tail(&vlan->node, &vport->vlan_list);
7431 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7433 struct hclge_vport_vlan_cfg *vlan, *tmp;
7434 struct hclge_dev *hdev = vport->back;
7437 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7438 if (!vlan->hd_tbl_status) {
7439 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7441 vlan->vlan_id, 0, false);
7443 dev_err(&hdev->pdev->dev,
7444 "restore vport vlan list failed, ret=%d\n",
7449 vlan->hd_tbl_status = true;
7455 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7458 struct hclge_vport_vlan_cfg *vlan, *tmp;
7459 struct hclge_dev *hdev = vport->back;
7461 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7462 if (vlan->vlan_id == vlan_id) {
7463 if (is_write_tbl && vlan->hd_tbl_status)
7464 hclge_set_vlan_filter_hw(hdev,
7470 list_del(&vlan->node);
7477 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7479 struct hclge_vport_vlan_cfg *vlan, *tmp;
7480 struct hclge_dev *hdev = vport->back;
7482 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7483 if (vlan->hd_tbl_status)
7484 hclge_set_vlan_filter_hw(hdev,
7490 vlan->hd_tbl_status = false;
7492 list_del(&vlan->node);
7498 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7500 struct hclge_vport_vlan_cfg *vlan, *tmp;
7501 struct hclge_vport *vport;
7504 mutex_lock(&hdev->vport_cfg_mutex);
7505 for (i = 0; i < hdev->num_alloc_vport; i++) {
7506 vport = &hdev->vport[i];
7507 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7508 list_del(&vlan->node);
7512 mutex_unlock(&hdev->vport_cfg_mutex);
7515 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7517 struct hclge_vport *vport = hclge_get_vport(handle);
7519 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7520 vport->rxvlan_cfg.strip_tag1_en = false;
7521 vport->rxvlan_cfg.strip_tag2_en = enable;
7523 vport->rxvlan_cfg.strip_tag1_en = enable;
7524 vport->rxvlan_cfg.strip_tag2_en = true;
7526 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7527 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7528 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7530 return hclge_set_vlan_rx_offload_cfg(vport);
7533 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7534 u16 port_base_vlan_state,
7535 struct hclge_vlan_info *new_info,
7536 struct hclge_vlan_info *old_info)
7538 struct hclge_dev *hdev = vport->back;
7541 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7542 hclge_rm_vport_all_vlan_table(vport, false);
7543 return hclge_set_vlan_filter_hw(hdev,
7544 htons(new_info->vlan_proto),
7547 new_info->qos, false);
7550 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7551 vport->vport_id, old_info->vlan_tag,
7552 old_info->qos, true);
7556 return hclge_add_vport_all_vlan_table(vport);
7559 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7560 struct hclge_vlan_info *vlan_info)
7562 struct hnae3_handle *nic = &vport->nic;
7563 struct hclge_vlan_info *old_vlan_info;
7564 struct hclge_dev *hdev = vport->back;
7567 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7569 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7573 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7574 /* add new VLAN tag */
7575 ret = hclge_set_vlan_filter_hw(hdev,
7576 htons(vlan_info->vlan_proto),
7578 vlan_info->vlan_tag,
7579 vlan_info->qos, false);
7583 /* remove old VLAN tag */
7584 ret = hclge_set_vlan_filter_hw(hdev,
7585 htons(old_vlan_info->vlan_proto),
7587 old_vlan_info->vlan_tag,
7588 old_vlan_info->qos, true);
7595 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7600 /* update state only when disable/enable port based VLAN */
7601 vport->port_base_vlan_cfg.state = state;
7602 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7603 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7605 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7608 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7609 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7610 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7615 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7616 enum hnae3_port_base_vlan_state state,
7619 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7621 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7623 return HNAE3_PORT_BASE_VLAN_ENABLE;
7626 return HNAE3_PORT_BASE_VLAN_DISABLE;
7627 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7628 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7630 return HNAE3_PORT_BASE_VLAN_MODIFY;
7634 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7635 u16 vlan, u8 qos, __be16 proto)
7637 struct hclge_vport *vport = hclge_get_vport(handle);
7638 struct hclge_dev *hdev = vport->back;
7639 struct hclge_vlan_info vlan_info;
7643 if (hdev->pdev->revision == 0x20)
7646 /* qos is a 3 bits value, so can not be bigger than 7 */
7647 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7649 if (proto != htons(ETH_P_8021Q))
7650 return -EPROTONOSUPPORT;
7652 vport = &hdev->vport[vfid];
7653 state = hclge_get_port_base_vlan_state(vport,
7654 vport->port_base_vlan_cfg.state,
7656 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7659 vlan_info.vlan_tag = vlan;
7660 vlan_info.qos = qos;
7661 vlan_info.vlan_proto = ntohs(proto);
7663 /* update port based VLAN for PF */
7665 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7666 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7667 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7672 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7673 return hclge_update_port_base_vlan_cfg(vport, state,
7676 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7684 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7685 u16 vlan_id, bool is_kill)
7687 struct hclge_vport *vport = hclge_get_vport(handle);
7688 struct hclge_dev *hdev = vport->back;
7689 bool writen_to_tbl = false;
7692 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7693 * filter entry. In this case, we don't update VLAN filter table
7694 * when user add new VLAN or remove exist VLAN, just update the vport
7695 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7696 * table until port based VLAN disabled
7698 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7699 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7700 vlan_id, 0, is_kill);
7701 writen_to_tbl = true;
7708 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7710 hclge_add_vport_vlan_table(vport, vlan_id,
7716 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7718 struct hclge_config_max_frm_size_cmd *req;
7719 struct hclge_desc desc;
7721 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7723 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7724 req->max_frm_size = cpu_to_le16(new_mps);
7725 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7727 return hclge_cmd_send(&hdev->hw, &desc, 1);
7730 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7732 struct hclge_vport *vport = hclge_get_vport(handle);
7734 return hclge_set_vport_mtu(vport, new_mtu);
7737 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7739 struct hclge_dev *hdev = vport->back;
7740 int i, max_frm_size, ret = 0;
7742 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7743 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7744 max_frm_size > HCLGE_MAC_MAX_FRAME)
7747 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7748 mutex_lock(&hdev->vport_lock);
7749 /* VF's mps must fit within hdev->mps */
7750 if (vport->vport_id && max_frm_size > hdev->mps) {
7751 mutex_unlock(&hdev->vport_lock);
7753 } else if (vport->vport_id) {
7754 vport->mps = max_frm_size;
7755 mutex_unlock(&hdev->vport_lock);
7759 /* PF's mps must be greater then VF's mps */
7760 for (i = 1; i < hdev->num_alloc_vport; i++)
7761 if (max_frm_size < hdev->vport[i].mps) {
7762 mutex_unlock(&hdev->vport_lock);
7766 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7768 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7770 dev_err(&hdev->pdev->dev,
7771 "Change mtu fail, ret =%d\n", ret);
7775 hdev->mps = max_frm_size;
7776 vport->mps = max_frm_size;
7778 ret = hclge_buffer_alloc(hdev);
7780 dev_err(&hdev->pdev->dev,
7781 "Allocate buffer fail, ret =%d\n", ret);
7784 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7785 mutex_unlock(&hdev->vport_lock);
7789 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7792 struct hclge_reset_tqp_queue_cmd *req;
7793 struct hclge_desc desc;
7796 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7798 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7799 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7800 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7802 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7804 dev_err(&hdev->pdev->dev,
7805 "Send tqp reset cmd error, status =%d\n", ret);
7812 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7814 struct hclge_reset_tqp_queue_cmd *req;
7815 struct hclge_desc desc;
7818 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7820 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7821 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7823 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7825 dev_err(&hdev->pdev->dev,
7826 "Get reset status error, status =%d\n", ret);
7830 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7833 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7835 struct hnae3_queue *queue;
7836 struct hclge_tqp *tqp;
7838 queue = handle->kinfo.tqp[queue_id];
7839 tqp = container_of(queue, struct hclge_tqp, q);
7844 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7846 struct hclge_vport *vport = hclge_get_vport(handle);
7847 struct hclge_dev *hdev = vport->back;
7848 int reset_try_times = 0;
7853 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7855 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7857 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7861 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7863 dev_err(&hdev->pdev->dev,
7864 "Send reset tqp cmd fail, ret = %d\n", ret);
7868 reset_try_times = 0;
7869 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7870 /* Wait for tqp hw reset */
7872 reset_status = hclge_get_reset_status(hdev, queue_gid);
7877 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7878 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7882 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7884 dev_err(&hdev->pdev->dev,
7885 "Deassert the soft reset fail, ret = %d\n", ret);
7890 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7892 struct hclge_dev *hdev = vport->back;
7893 int reset_try_times = 0;
7898 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7900 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7902 dev_warn(&hdev->pdev->dev,
7903 "Send reset tqp cmd fail, ret = %d\n", ret);
7907 reset_try_times = 0;
7908 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7909 /* Wait for tqp hw reset */
7911 reset_status = hclge_get_reset_status(hdev, queue_gid);
7916 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7917 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7921 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7923 dev_warn(&hdev->pdev->dev,
7924 "Deassert the soft reset fail, ret = %d\n", ret);
7927 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7929 struct hclge_vport *vport = hclge_get_vport(handle);
7930 struct hclge_dev *hdev = vport->back;
7932 return hdev->fw_version;
7935 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7937 struct phy_device *phydev = hdev->hw.mac.phydev;
7942 phy_set_asym_pause(phydev, rx_en, tx_en);
7945 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7950 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7951 else if (rx_en && !tx_en)
7952 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7953 else if (!rx_en && tx_en)
7954 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7956 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7958 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7961 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7963 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7968 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7973 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7975 struct phy_device *phydev = hdev->hw.mac.phydev;
7976 u16 remote_advertising = 0;
7977 u16 local_advertising = 0;
7978 u32 rx_pause, tx_pause;
7981 if (!phydev->link || !phydev->autoneg)
7984 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7987 remote_advertising = LPA_PAUSE_CAP;
7989 if (phydev->asym_pause)
7990 remote_advertising |= LPA_PAUSE_ASYM;
7992 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7993 remote_advertising);
7994 tx_pause = flowctl & FLOW_CTRL_TX;
7995 rx_pause = flowctl & FLOW_CTRL_RX;
7997 if (phydev->duplex == HCLGE_MAC_HALF) {
8002 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8005 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8006 u32 *rx_en, u32 *tx_en)
8008 struct hclge_vport *vport = hclge_get_vport(handle);
8009 struct hclge_dev *hdev = vport->back;
8011 *auto_neg = hclge_get_autoneg(handle);
8013 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8019 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8022 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8025 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8034 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8035 u32 rx_en, u32 tx_en)
8037 struct hclge_vport *vport = hclge_get_vport(handle);
8038 struct hclge_dev *hdev = vport->back;
8039 struct phy_device *phydev = hdev->hw.mac.phydev;
8042 fc_autoneg = hclge_get_autoneg(handle);
8043 if (auto_neg != fc_autoneg) {
8044 dev_info(&hdev->pdev->dev,
8045 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8049 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8050 dev_info(&hdev->pdev->dev,
8051 "Priority flow control enabled. Cannot set link flow control.\n");
8055 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8058 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8061 return phy_start_aneg(phydev);
8063 if (hdev->pdev->revision == 0x20)
8066 return hclge_restart_autoneg(handle);
8069 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8070 u8 *auto_neg, u32 *speed, u8 *duplex)
8072 struct hclge_vport *vport = hclge_get_vport(handle);
8073 struct hclge_dev *hdev = vport->back;
8076 *speed = hdev->hw.mac.speed;
8078 *duplex = hdev->hw.mac.duplex;
8080 *auto_neg = hdev->hw.mac.autoneg;
8083 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8086 struct hclge_vport *vport = hclge_get_vport(handle);
8087 struct hclge_dev *hdev = vport->back;
8090 *media_type = hdev->hw.mac.media_type;
8093 *module_type = hdev->hw.mac.module_type;
8096 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8097 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8099 struct hclge_vport *vport = hclge_get_vport(handle);
8100 struct hclge_dev *hdev = vport->back;
8101 struct phy_device *phydev = hdev->hw.mac.phydev;
8102 int mdix_ctrl, mdix, retval, is_resolved;
8105 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8106 *tp_mdix = ETH_TP_MDI_INVALID;
8110 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8112 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8113 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8114 HCLGE_PHY_MDIX_CTRL_S);
8116 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8117 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8118 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8120 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8122 switch (mdix_ctrl) {
8124 *tp_mdix_ctrl = ETH_TP_MDI;
8127 *tp_mdix_ctrl = ETH_TP_MDI_X;
8130 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8133 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8138 *tp_mdix = ETH_TP_MDI_INVALID;
8140 *tp_mdix = ETH_TP_MDI_X;
8142 *tp_mdix = ETH_TP_MDI;
8145 static void hclge_info_show(struct hclge_dev *hdev)
8147 struct device *dev = &hdev->pdev->dev;
8149 dev_info(dev, "PF info begin:\n");
8151 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8152 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8153 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8154 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8155 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8156 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8157 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8158 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8159 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8160 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8161 dev_info(dev, "This is %s PF\n",
8162 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8163 dev_info(dev, "DCB %s\n",
8164 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8165 dev_info(dev, "MQPRIO %s\n",
8166 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8168 dev_info(dev, "PF info end.\n");
8171 static int hclge_init_client_instance(struct hnae3_client *client,
8172 struct hnae3_ae_dev *ae_dev)
8174 struct hclge_dev *hdev = ae_dev->priv;
8175 struct hclge_vport *vport;
8178 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8179 vport = &hdev->vport[i];
8181 switch (client->type) {
8182 case HNAE3_CLIENT_KNIC:
8184 hdev->nic_client = client;
8185 vport->nic.client = client;
8186 ret = client->ops->init_instance(&vport->nic);
8190 hnae3_set_client_init_flag(client, ae_dev, 1);
8191 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8193 if (netif_msg_drv(&hdev->vport->nic))
8194 hclge_info_show(hdev);
8196 if (hdev->roce_client &&
8197 hnae3_dev_roce_supported(hdev)) {
8198 struct hnae3_client *rc = hdev->roce_client;
8200 ret = hclge_init_roce_base_info(vport);
8204 ret = rc->ops->init_instance(&vport->roce);
8208 hnae3_set_client_init_flag(hdev->roce_client,
8213 case HNAE3_CLIENT_UNIC:
8214 hdev->nic_client = client;
8215 vport->nic.client = client;
8217 ret = client->ops->init_instance(&vport->nic);
8221 hnae3_set_client_init_flag(client, ae_dev, 1);
8224 case HNAE3_CLIENT_ROCE:
8225 if (hnae3_dev_roce_supported(hdev)) {
8226 hdev->roce_client = client;
8227 vport->roce.client = client;
8230 if (hdev->roce_client && hdev->nic_client) {
8231 ret = hclge_init_roce_base_info(vport);
8235 ret = client->ops->init_instance(&vport->roce);
8239 hnae3_set_client_init_flag(client, ae_dev, 1);
8251 hdev->nic_client = NULL;
8252 vport->nic.client = NULL;
8255 hdev->roce_client = NULL;
8256 vport->roce.client = NULL;
8260 static void hclge_uninit_client_instance(struct hnae3_client *client,
8261 struct hnae3_ae_dev *ae_dev)
8263 struct hclge_dev *hdev = ae_dev->priv;
8264 struct hclge_vport *vport;
8267 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8268 vport = &hdev->vport[i];
8269 if (hdev->roce_client) {
8270 hdev->roce_client->ops->uninit_instance(&vport->roce,
8272 hdev->roce_client = NULL;
8273 vport->roce.client = NULL;
8275 if (client->type == HNAE3_CLIENT_ROCE)
8277 if (hdev->nic_client && client->ops->uninit_instance) {
8278 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8279 client->ops->uninit_instance(&vport->nic, 0);
8280 hdev->nic_client = NULL;
8281 vport->nic.client = NULL;
8286 static int hclge_pci_init(struct hclge_dev *hdev)
8288 struct pci_dev *pdev = hdev->pdev;
8289 struct hclge_hw *hw;
8292 ret = pci_enable_device(pdev);
8294 dev_err(&pdev->dev, "failed to enable PCI device\n");
8298 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8300 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8303 "can't set consistent PCI DMA");
8304 goto err_disable_device;
8306 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8309 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8311 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8312 goto err_disable_device;
8315 pci_set_master(pdev);
8317 hw->io_base = pcim_iomap(pdev, 2, 0);
8319 dev_err(&pdev->dev, "Can't map configuration register space\n");
8321 goto err_clr_master;
8324 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8328 pci_clear_master(pdev);
8329 pci_release_regions(pdev);
8331 pci_disable_device(pdev);
8336 static void hclge_pci_uninit(struct hclge_dev *hdev)
8338 struct pci_dev *pdev = hdev->pdev;
8340 pcim_iounmap(pdev, hdev->hw.io_base);
8341 pci_free_irq_vectors(pdev);
8342 pci_clear_master(pdev);
8343 pci_release_mem_regions(pdev);
8344 pci_disable_device(pdev);
8347 static void hclge_state_init(struct hclge_dev *hdev)
8349 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8350 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8351 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8352 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8353 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8354 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8357 static void hclge_state_uninit(struct hclge_dev *hdev)
8359 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8361 if (hdev->service_timer.function)
8362 del_timer_sync(&hdev->service_timer);
8363 if (hdev->reset_timer.function)
8364 del_timer_sync(&hdev->reset_timer);
8365 if (hdev->service_task.func)
8366 cancel_work_sync(&hdev->service_task);
8367 if (hdev->rst_service_task.func)
8368 cancel_work_sync(&hdev->rst_service_task);
8369 if (hdev->mbx_service_task.func)
8370 cancel_work_sync(&hdev->mbx_service_task);
8373 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8375 #define HCLGE_FLR_WAIT_MS 100
8376 #define HCLGE_FLR_WAIT_CNT 50
8377 struct hclge_dev *hdev = ae_dev->priv;
8380 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8381 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8382 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8383 hclge_reset_event(hdev->pdev, NULL);
8385 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8386 cnt++ < HCLGE_FLR_WAIT_CNT)
8387 msleep(HCLGE_FLR_WAIT_MS);
8389 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8390 dev_err(&hdev->pdev->dev,
8391 "flr wait down timeout: %d\n", cnt);
8394 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8396 struct hclge_dev *hdev = ae_dev->priv;
8398 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8401 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8403 struct pci_dev *pdev = ae_dev->pdev;
8404 struct hclge_dev *hdev;
8407 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8414 hdev->ae_dev = ae_dev;
8415 hdev->reset_type = HNAE3_NONE_RESET;
8416 hdev->reset_level = HNAE3_FUNC_RESET;
8417 ae_dev->priv = hdev;
8418 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8420 mutex_init(&hdev->vport_lock);
8421 mutex_init(&hdev->vport_cfg_mutex);
8422 spin_lock_init(&hdev->fd_rule_lock);
8424 ret = hclge_pci_init(hdev);
8426 dev_err(&pdev->dev, "PCI init failed\n");
8430 /* Firmware command queue initialize */
8431 ret = hclge_cmd_queue_init(hdev);
8433 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8434 goto err_pci_uninit;
8437 /* Firmware command initialize */
8438 ret = hclge_cmd_init(hdev);
8440 goto err_cmd_uninit;
8442 ret = hclge_get_cap(hdev);
8444 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8446 goto err_cmd_uninit;
8449 ret = hclge_configure(hdev);
8451 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8452 goto err_cmd_uninit;
8455 ret = hclge_init_msi(hdev);
8457 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8458 goto err_cmd_uninit;
8461 ret = hclge_misc_irq_init(hdev);
8464 "Misc IRQ(vector0) init error, ret = %d.\n",
8466 goto err_msi_uninit;
8469 ret = hclge_alloc_tqps(hdev);
8471 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8472 goto err_msi_irq_uninit;
8475 ret = hclge_alloc_vport(hdev);
8477 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8478 goto err_msi_irq_uninit;
8481 ret = hclge_map_tqp(hdev);
8483 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8484 goto err_msi_irq_uninit;
8487 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8488 ret = hclge_mac_mdio_config(hdev);
8490 dev_err(&hdev->pdev->dev,
8491 "mdio config fail ret=%d\n", ret);
8492 goto err_msi_irq_uninit;
8496 ret = hclge_init_umv_space(hdev);
8498 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8499 goto err_mdiobus_unreg;
8502 ret = hclge_mac_init(hdev);
8504 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8505 goto err_mdiobus_unreg;
8508 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8510 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8511 goto err_mdiobus_unreg;
8514 ret = hclge_config_gro(hdev, true);
8516 goto err_mdiobus_unreg;
8518 ret = hclge_init_vlan_config(hdev);
8520 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8521 goto err_mdiobus_unreg;
8524 ret = hclge_tm_schd_init(hdev);
8526 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8527 goto err_mdiobus_unreg;
8530 hclge_rss_init_cfg(hdev);
8531 ret = hclge_rss_init_hw(hdev);
8533 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8534 goto err_mdiobus_unreg;
8537 ret = init_mgr_tbl(hdev);
8539 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8540 goto err_mdiobus_unreg;
8543 ret = hclge_init_fd_config(hdev);
8546 "fd table init fail, ret=%d\n", ret);
8547 goto err_mdiobus_unreg;
8550 ret = hclge_hw_error_set_state(hdev, true);
8553 "fail(%d) to enable hw error interrupts\n", ret);
8554 goto err_mdiobus_unreg;
8557 INIT_KFIFO(hdev->mac_tnl_log);
8559 hclge_dcb_ops_set(hdev);
8561 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8562 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8563 INIT_WORK(&hdev->service_task, hclge_service_task);
8564 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8565 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8567 hclge_clear_all_event_cause(hdev);
8569 /* Enable MISC vector(vector0) */
8570 hclge_enable_vector(&hdev->misc_vector, true);
8572 hclge_state_init(hdev);
8573 hdev->last_reset_time = jiffies;
8575 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8579 if (hdev->hw.mac.phydev)
8580 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8582 hclge_misc_irq_uninit(hdev);
8584 pci_free_irq_vectors(pdev);
8586 hclge_cmd_uninit(hdev);
8588 pcim_iounmap(pdev, hdev->hw.io_base);
8589 pci_clear_master(pdev);
8590 pci_release_regions(pdev);
8591 pci_disable_device(pdev);
8596 static void hclge_stats_clear(struct hclge_dev *hdev)
8598 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8601 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8603 struct hclge_vport *vport = hdev->vport;
8606 for (i = 0; i < hdev->num_alloc_vport; i++) {
8607 hclge_vport_stop(vport);
8612 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8614 struct hclge_dev *hdev = ae_dev->priv;
8615 struct pci_dev *pdev = ae_dev->pdev;
8618 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8620 hclge_stats_clear(hdev);
8621 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8623 ret = hclge_cmd_init(hdev);
8625 dev_err(&pdev->dev, "Cmd queue init failed\n");
8629 ret = hclge_map_tqp(hdev);
8631 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8635 hclge_reset_umv_space(hdev);
8637 ret = hclge_mac_init(hdev);
8639 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8643 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8645 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8649 ret = hclge_config_gro(hdev, true);
8653 ret = hclge_init_vlan_config(hdev);
8655 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8659 ret = hclge_tm_init_hw(hdev, true);
8661 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8665 ret = hclge_rss_init_hw(hdev);
8667 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8671 ret = hclge_init_fd_config(hdev);
8674 "fd table init fail, ret=%d\n", ret);
8678 /* Re-enable the hw error interrupts because
8679 * the interrupts get disabled on core/global reset.
8681 ret = hclge_hw_error_set_state(hdev, true);
8684 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8688 hclge_reset_vport_state(hdev);
8690 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8696 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8698 struct hclge_dev *hdev = ae_dev->priv;
8699 struct hclge_mac *mac = &hdev->hw.mac;
8701 hclge_state_uninit(hdev);
8704 mdiobus_unregister(mac->mdio_bus);
8706 hclge_uninit_umv_space(hdev);
8708 /* Disable MISC vector(vector0) */
8709 hclge_enable_vector(&hdev->misc_vector, false);
8710 synchronize_irq(hdev->misc_vector.vector_irq);
8712 hclge_config_mac_tnl_int(hdev, false);
8713 hclge_hw_error_set_state(hdev, false);
8714 hclge_cmd_uninit(hdev);
8715 hclge_misc_irq_uninit(hdev);
8716 hclge_pci_uninit(hdev);
8717 mutex_destroy(&hdev->vport_lock);
8718 hclge_uninit_vport_mac_table(hdev);
8719 hclge_uninit_vport_vlan_table(hdev);
8720 mutex_destroy(&hdev->vport_cfg_mutex);
8721 ae_dev->priv = NULL;
8724 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8726 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8727 struct hclge_vport *vport = hclge_get_vport(handle);
8728 struct hclge_dev *hdev = vport->back;
8730 return min_t(u32, hdev->rss_size_max,
8731 vport->alloc_tqps / kinfo->num_tc);
8734 static void hclge_get_channels(struct hnae3_handle *handle,
8735 struct ethtool_channels *ch)
8737 ch->max_combined = hclge_get_max_channels(handle);
8738 ch->other_count = 1;
8740 ch->combined_count = handle->kinfo.rss_size;
8743 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8744 u16 *alloc_tqps, u16 *max_rss_size)
8746 struct hclge_vport *vport = hclge_get_vport(handle);
8747 struct hclge_dev *hdev = vport->back;
8749 *alloc_tqps = vport->alloc_tqps;
8750 *max_rss_size = hdev->rss_size_max;
8753 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8754 bool rxfh_configured)
8756 struct hclge_vport *vport = hclge_get_vport(handle);
8757 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8758 struct hclge_dev *hdev = vport->back;
8759 int cur_rss_size = kinfo->rss_size;
8760 int cur_tqps = kinfo->num_tqps;
8761 u16 tc_offset[HCLGE_MAX_TC_NUM];
8762 u16 tc_valid[HCLGE_MAX_TC_NUM];
8763 u16 tc_size[HCLGE_MAX_TC_NUM];
8768 kinfo->req_rss_size = new_tqps_num;
8770 ret = hclge_tm_vport_map_update(hdev);
8772 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8776 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8777 roundup_size = ilog2(roundup_size);
8778 /* Set the RSS TC mode according to the new RSS size */
8779 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8782 if (!(hdev->hw_tc_map & BIT(i)))
8786 tc_size[i] = roundup_size;
8787 tc_offset[i] = kinfo->rss_size * i;
8789 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8793 /* RSS indirection table has been configuared by user */
8794 if (rxfh_configured)
8797 /* Reinitializes the rss indirect table according to the new RSS size */
8798 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8802 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8803 rss_indir[i] = i % kinfo->rss_size;
8805 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8807 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8814 dev_info(&hdev->pdev->dev,
8815 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8816 cur_rss_size, kinfo->rss_size,
8817 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8822 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8823 u32 *regs_num_64_bit)
8825 struct hclge_desc desc;
8829 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8832 dev_err(&hdev->pdev->dev,
8833 "Query register number cmd failed, ret = %d.\n", ret);
8837 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8838 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8840 total_num = *regs_num_32_bit + *regs_num_64_bit;
8847 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8850 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8852 struct hclge_desc *desc;
8853 u32 *reg_val = data;
8862 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8863 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8867 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8868 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8870 dev_err(&hdev->pdev->dev,
8871 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8876 for (i = 0; i < cmd_num; i++) {
8878 desc_data = (__le32 *)(&desc[i].data[0]);
8879 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8881 desc_data = (__le32 *)(&desc[i]);
8882 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8884 for (k = 0; k < n; k++) {
8885 *reg_val++ = le32_to_cpu(*desc_data++);
8897 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8900 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8902 struct hclge_desc *desc;
8903 u64 *reg_val = data;
8912 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8913 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8917 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8918 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8920 dev_err(&hdev->pdev->dev,
8921 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8926 for (i = 0; i < cmd_num; i++) {
8928 desc_data = (__le64 *)(&desc[i].data[0]);
8929 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8931 desc_data = (__le64 *)(&desc[i]);
8932 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8934 for (k = 0; k < n; k++) {
8935 *reg_val++ = le64_to_cpu(*desc_data++);
8947 #define MAX_SEPARATE_NUM 4
8948 #define SEPARATOR_VALUE 0xFFFFFFFF
8949 #define REG_NUM_PER_LINE 4
8950 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8952 static int hclge_get_regs_len(struct hnae3_handle *handle)
8954 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8955 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8956 struct hclge_vport *vport = hclge_get_vport(handle);
8957 struct hclge_dev *hdev = vport->back;
8958 u32 regs_num_32_bit, regs_num_64_bit;
8961 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8963 dev_err(&hdev->pdev->dev,
8964 "Get register number failed, ret = %d.\n", ret);
8968 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8969 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8970 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8971 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8973 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8974 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8975 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8978 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8981 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8982 struct hclge_vport *vport = hclge_get_vport(handle);
8983 struct hclge_dev *hdev = vport->back;
8984 u32 regs_num_32_bit, regs_num_64_bit;
8985 int i, j, reg_um, separator_num;
8989 *version = hdev->fw_version;
8991 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8993 dev_err(&hdev->pdev->dev,
8994 "Get register number failed, ret = %d.\n", ret);
8998 /* fetching per-PF registers valus from PF PCIe register space */
8999 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9000 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9001 for (i = 0; i < reg_um; i++)
9002 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9003 for (i = 0; i < separator_num; i++)
9004 *reg++ = SEPARATOR_VALUE;
9006 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9007 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9008 for (i = 0; i < reg_um; i++)
9009 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9010 for (i = 0; i < separator_num; i++)
9011 *reg++ = SEPARATOR_VALUE;
9013 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9014 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9015 for (j = 0; j < kinfo->num_tqps; j++) {
9016 for (i = 0; i < reg_um; i++)
9017 *reg++ = hclge_read_dev(&hdev->hw,
9018 ring_reg_addr_list[i] +
9020 for (i = 0; i < separator_num; i++)
9021 *reg++ = SEPARATOR_VALUE;
9024 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9025 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9026 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9027 for (i = 0; i < reg_um; i++)
9028 *reg++ = hclge_read_dev(&hdev->hw,
9029 tqp_intr_reg_addr_list[i] +
9031 for (i = 0; i < separator_num; i++)
9032 *reg++ = SEPARATOR_VALUE;
9035 /* fetching PF common registers values from firmware */
9036 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9038 dev_err(&hdev->pdev->dev,
9039 "Get 32 bit register failed, ret = %d.\n", ret);
9043 reg += regs_num_32_bit;
9044 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9046 dev_err(&hdev->pdev->dev,
9047 "Get 64 bit register failed, ret = %d.\n", ret);
9050 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9052 struct hclge_set_led_state_cmd *req;
9053 struct hclge_desc desc;
9056 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9058 req = (struct hclge_set_led_state_cmd *)desc.data;
9059 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9060 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9062 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9064 dev_err(&hdev->pdev->dev,
9065 "Send set led state cmd error, ret =%d\n", ret);
9070 enum hclge_led_status {
9073 HCLGE_LED_NO_CHANGE = 0xFF,
9076 static int hclge_set_led_id(struct hnae3_handle *handle,
9077 enum ethtool_phys_id_state status)
9079 struct hclge_vport *vport = hclge_get_vport(handle);
9080 struct hclge_dev *hdev = vport->back;
9083 case ETHTOOL_ID_ACTIVE:
9084 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9085 case ETHTOOL_ID_INACTIVE:
9086 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9092 static void hclge_get_link_mode(struct hnae3_handle *handle,
9093 unsigned long *supported,
9094 unsigned long *advertising)
9096 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9097 struct hclge_vport *vport = hclge_get_vport(handle);
9098 struct hclge_dev *hdev = vport->back;
9099 unsigned int idx = 0;
9101 for (; idx < size; idx++) {
9102 supported[idx] = hdev->hw.mac.supported[idx];
9103 advertising[idx] = hdev->hw.mac.advertising[idx];
9107 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9109 struct hclge_vport *vport = hclge_get_vport(handle);
9110 struct hclge_dev *hdev = vport->back;
9112 return hclge_config_gro(hdev, enable);
9115 static const struct hnae3_ae_ops hclge_ops = {
9116 .init_ae_dev = hclge_init_ae_dev,
9117 .uninit_ae_dev = hclge_uninit_ae_dev,
9118 .flr_prepare = hclge_flr_prepare,
9119 .flr_done = hclge_flr_done,
9120 .init_client_instance = hclge_init_client_instance,
9121 .uninit_client_instance = hclge_uninit_client_instance,
9122 .map_ring_to_vector = hclge_map_ring_to_vector,
9123 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9124 .get_vector = hclge_get_vector,
9125 .put_vector = hclge_put_vector,
9126 .set_promisc_mode = hclge_set_promisc_mode,
9127 .set_loopback = hclge_set_loopback,
9128 .start = hclge_ae_start,
9129 .stop = hclge_ae_stop,
9130 .client_start = hclge_client_start,
9131 .client_stop = hclge_client_stop,
9132 .get_status = hclge_get_status,
9133 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9134 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9135 .get_media_type = hclge_get_media_type,
9136 .check_port_speed = hclge_check_port_speed,
9137 .get_fec = hclge_get_fec,
9138 .set_fec = hclge_set_fec,
9139 .get_rss_key_size = hclge_get_rss_key_size,
9140 .get_rss_indir_size = hclge_get_rss_indir_size,
9141 .get_rss = hclge_get_rss,
9142 .set_rss = hclge_set_rss,
9143 .set_rss_tuple = hclge_set_rss_tuple,
9144 .get_rss_tuple = hclge_get_rss_tuple,
9145 .get_tc_size = hclge_get_tc_size,
9146 .get_mac_addr = hclge_get_mac_addr,
9147 .set_mac_addr = hclge_set_mac_addr,
9148 .do_ioctl = hclge_do_ioctl,
9149 .add_uc_addr = hclge_add_uc_addr,
9150 .rm_uc_addr = hclge_rm_uc_addr,
9151 .add_mc_addr = hclge_add_mc_addr,
9152 .rm_mc_addr = hclge_rm_mc_addr,
9153 .set_autoneg = hclge_set_autoneg,
9154 .get_autoneg = hclge_get_autoneg,
9155 .restart_autoneg = hclge_restart_autoneg,
9156 .get_pauseparam = hclge_get_pauseparam,
9157 .set_pauseparam = hclge_set_pauseparam,
9158 .set_mtu = hclge_set_mtu,
9159 .reset_queue = hclge_reset_tqp,
9160 .get_stats = hclge_get_stats,
9161 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9162 .update_stats = hclge_update_stats,
9163 .get_strings = hclge_get_strings,
9164 .get_sset_count = hclge_get_sset_count,
9165 .get_fw_version = hclge_get_fw_version,
9166 .get_mdix_mode = hclge_get_mdix_mode,
9167 .enable_vlan_filter = hclge_enable_vlan_filter,
9168 .set_vlan_filter = hclge_set_vlan_filter,
9169 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9170 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9171 .reset_event = hclge_reset_event,
9172 .set_default_reset_request = hclge_set_def_reset_request,
9173 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9174 .set_channels = hclge_set_channels,
9175 .get_channels = hclge_get_channels,
9176 .get_regs_len = hclge_get_regs_len,
9177 .get_regs = hclge_get_regs,
9178 .set_led_id = hclge_set_led_id,
9179 .get_link_mode = hclge_get_link_mode,
9180 .add_fd_entry = hclge_add_fd_entry,
9181 .del_fd_entry = hclge_del_fd_entry,
9182 .del_all_fd_entries = hclge_del_all_fd_entries,
9183 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9184 .get_fd_rule_info = hclge_get_fd_rule_info,
9185 .get_fd_all_rules = hclge_get_all_rules,
9186 .restore_fd_rules = hclge_restore_fd_entries,
9187 .enable_fd = hclge_enable_fd,
9188 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9189 .dbg_run_cmd = hclge_dbg_run_cmd,
9190 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9191 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9192 .ae_dev_resetting = hclge_ae_dev_resetting,
9193 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9194 .set_gro_en = hclge_gro_en,
9195 .get_global_queue_id = hclge_covert_handle_qid_global,
9196 .set_timer_task = hclge_set_timer_task,
9197 .mac_connect_phy = hclge_mac_connect_phy,
9198 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9201 static struct hnae3_ae_algo ae_algo = {
9203 .pdev_id_table = ae_algo_pci_tbl,
9206 static int hclge_init(void)
9208 pr_info("%s is initializing\n", HCLGE_NAME);
9210 hnae3_register_ae_algo(&ae_algo);
9215 static void hclge_exit(void)
9217 hnae3_unregister_ae_algo(&ae_algo);
9219 module_init(hclge_init);
9220 module_exit(hclge_exit);
9222 MODULE_LICENSE("GPL");
9223 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9224 MODULE_DESCRIPTION("HCLGE Driver");
9225 MODULE_VERSION(HCLGE_MOD_VERSION);