1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
41 static struct hnae3_ae_algo ae_algo;
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51 /* required last entry */
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58 HCLGE_CMDQ_TX_ADDR_H_REG,
59 HCLGE_CMDQ_TX_DEPTH_REG,
60 HCLGE_CMDQ_TX_TAIL_REG,
61 HCLGE_CMDQ_TX_HEAD_REG,
62 HCLGE_CMDQ_RX_ADDR_L_REG,
63 HCLGE_CMDQ_RX_ADDR_H_REG,
64 HCLGE_CMDQ_RX_DEPTH_REG,
65 HCLGE_CMDQ_RX_TAIL_REG,
66 HCLGE_CMDQ_RX_HEAD_REG,
67 HCLGE_VECTOR0_CMDQ_SRC_REG,
68 HCLGE_CMDQ_INTR_STS_REG,
69 HCLGE_CMDQ_INTR_EN_REG,
70 HCLGE_CMDQ_INTR_GEN_REG};
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73 HCLGE_VECTOR0_OTER_EN_REG,
74 HCLGE_MISC_RESET_STS_REG,
75 HCLGE_MISC_VECTOR_INT_STS,
76 HCLGE_GLOBAL_RESET_REG,
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81 HCLGE_RING_RX_ADDR_H_REG,
82 HCLGE_RING_RX_BD_NUM_REG,
83 HCLGE_RING_RX_BD_LENGTH_REG,
84 HCLGE_RING_RX_MERGE_EN_REG,
85 HCLGE_RING_RX_TAIL_REG,
86 HCLGE_RING_RX_HEAD_REG,
87 HCLGE_RING_RX_FBD_NUM_REG,
88 HCLGE_RING_RX_OFFSET_REG,
89 HCLGE_RING_RX_FBD_OFFSET_REG,
90 HCLGE_RING_RX_STASH_REG,
91 HCLGE_RING_RX_BD_ERR_REG,
92 HCLGE_RING_TX_ADDR_L_REG,
93 HCLGE_RING_TX_ADDR_H_REG,
94 HCLGE_RING_TX_BD_NUM_REG,
95 HCLGE_RING_TX_PRIORITY_REG,
97 HCLGE_RING_TX_MERGE_EN_REG,
98 HCLGE_RING_TX_TAIL_REG,
99 HCLGE_RING_TX_HEAD_REG,
100 HCLGE_RING_TX_FBD_NUM_REG,
101 HCLGE_RING_TX_OFFSET_REG,
102 HCLGE_RING_TX_EBD_NUM_REG,
103 HCLGE_RING_TX_EBD_OFFSET_REG,
104 HCLGE_RING_TX_BD_ERR_REG,
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108 HCLGE_TQP_INTR_GL0_REG,
109 HCLGE_TQP_INTR_GL1_REG,
110 HCLGE_TQP_INTR_GL2_REG,
111 HCLGE_TQP_INTR_RL_REG};
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
115 "Serdes serial Loopback test",
116 "Serdes parallel Loopback test",
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121 {"mac_tx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123 {"mac_rx_mac_pause_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125 {"mac_tx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127 {"mac_rx_control_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129 {"mac_tx_pfc_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131 {"mac_tx_pfc_pri0_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133 {"mac_tx_pfc_pri1_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135 {"mac_tx_pfc_pri2_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137 {"mac_tx_pfc_pri3_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139 {"mac_tx_pfc_pri4_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141 {"mac_tx_pfc_pri5_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143 {"mac_tx_pfc_pri6_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145 {"mac_tx_pfc_pri7_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147 {"mac_rx_pfc_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149 {"mac_rx_pfc_pri0_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151 {"mac_rx_pfc_pri1_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153 {"mac_rx_pfc_pri2_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155 {"mac_rx_pfc_pri3_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157 {"mac_rx_pfc_pri4_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159 {"mac_rx_pfc_pri5_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161 {"mac_rx_pfc_pri6_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163 {"mac_rx_pfc_pri7_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165 {"mac_tx_total_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167 {"mac_tx_total_oct_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169 {"mac_tx_good_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171 {"mac_tx_bad_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173 {"mac_tx_good_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175 {"mac_tx_bad_oct_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177 {"mac_tx_uni_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179 {"mac_tx_multi_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181 {"mac_tx_broad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183 {"mac_tx_undersize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185 {"mac_tx_oversize_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187 {"mac_tx_64_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189 {"mac_tx_65_127_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191 {"mac_tx_128_255_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193 {"mac_tx_256_511_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195 {"mac_tx_512_1023_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197 {"mac_tx_1024_1518_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199 {"mac_tx_1519_2047_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201 {"mac_tx_2048_4095_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203 {"mac_tx_4096_8191_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205 {"mac_tx_8192_9216_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207 {"mac_tx_9217_12287_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209 {"mac_tx_12288_16383_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211 {"mac_tx_1519_max_good_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213 {"mac_tx_1519_max_bad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215 {"mac_rx_total_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217 {"mac_rx_total_oct_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219 {"mac_rx_good_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221 {"mac_rx_bad_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223 {"mac_rx_good_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225 {"mac_rx_bad_oct_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227 {"mac_rx_uni_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229 {"mac_rx_multi_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231 {"mac_rx_broad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233 {"mac_rx_undersize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235 {"mac_rx_oversize_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237 {"mac_rx_64_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239 {"mac_rx_65_127_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241 {"mac_rx_128_255_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243 {"mac_rx_256_511_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245 {"mac_rx_512_1023_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247 {"mac_rx_1024_1518_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249 {"mac_rx_1519_2047_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251 {"mac_rx_2048_4095_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253 {"mac_rx_4096_8191_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255 {"mac_rx_8192_9216_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257 {"mac_rx_9217_12287_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259 {"mac_rx_12288_16383_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261 {"mac_rx_1519_max_good_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263 {"mac_rx_1519_max_bad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
266 {"mac_tx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268 {"mac_tx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270 {"mac_tx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272 {"mac_tx_err_all_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274 {"mac_tx_from_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276 {"mac_tx_from_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278 {"mac_rx_fragment_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280 {"mac_rx_undermin_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282 {"mac_rx_jabber_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284 {"mac_rx_fcs_err_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286 {"mac_rx_send_app_good_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288 {"mac_rx_send_app_bad_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
294 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298 .i_port_bitmap = 0x1,
302 static const u8 hclge_hash_key[] = {
303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
312 #define HCLGE_MAC_CMD_NUM 21
314 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
323 dev_err(&hdev->pdev->dev,
324 "Get MAC pkt stats fail, status = %d.\n", ret);
329 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330 /* for special opcode 0032, only the first desc has the head */
331 if (unlikely(i == 0)) {
332 desc_data = (__le64 *)(&desc[i].data[0]);
333 n = HCLGE_RD_FIRST_STATS_NUM;
335 desc_data = (__le64 *)(&desc[i]);
336 n = HCLGE_RD_OTHER_STATS_NUM;
339 for (k = 0; k < n; k++) {
340 *data += le64_to_cpu(*desc_data);
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
351 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352 struct hclge_desc *desc;
357 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
360 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
367 for (i = 0; i < desc_num; i++) {
368 /* for special opcode 0034, only the first desc has the head */
370 desc_data = (__le64 *)(&desc[i].data[0]);
371 n = HCLGE_RD_FIRST_STATS_NUM;
373 desc_data = (__le64 *)(&desc[i]);
374 n = HCLGE_RD_OTHER_STATS_NUM;
377 for (k = 0; k < n; k++) {
378 *data += le64_to_cpu(*desc_data);
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
391 struct hclge_desc desc;
396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
401 desc_data = (__le32 *)(&desc.data[0]);
402 reg_num = le32_to_cpu(*desc_data);
404 *desc_num = 1 + ((reg_num - 3) >> 2) +
405 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 ret = hclge_mac_query_reg_num(hdev, &desc_num);
417 /* The firmware supports the new statistics acquisition method */
419 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420 else if (ret == -EOPNOTSUPP)
421 ret = hclge_mac_update_stats_defective(hdev);
423 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
430 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431 struct hclge_vport *vport = hclge_get_vport(handle);
432 struct hclge_dev *hdev = vport->back;
433 struct hnae3_queue *queue;
434 struct hclge_desc desc[1];
435 struct hclge_tqp *tqp;
438 for (i = 0; i < kinfo->num_tqps; i++) {
439 queue = handle->kinfo.tqp[i];
440 tqp = container_of(queue, struct hclge_tqp, q);
441 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442 hclge_cmd_setup_basic_desc(&desc[0],
443 HCLGE_OPC_QUERY_RX_STATUS,
446 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447 ret = hclge_cmd_send(&hdev->hw, desc, 1);
449 dev_err(&hdev->pdev->dev,
450 "Query tqp stat fail, status = %d,queue = %d\n",
454 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455 le32_to_cpu(desc[0].data[1]);
458 for (i = 0; i < kinfo->num_tqps; i++) {
459 queue = handle->kinfo.tqp[i];
460 tqp = container_of(queue, struct hclge_tqp, q);
461 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462 hclge_cmd_setup_basic_desc(&desc[0],
463 HCLGE_OPC_QUERY_TX_STATUS,
466 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467 ret = hclge_cmd_send(&hdev->hw, desc, 1);
469 dev_err(&hdev->pdev->dev,
470 "Query tqp stat fail, status = %d,queue = %d\n",
474 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475 le32_to_cpu(desc[0].data[1]);
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
483 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484 struct hclge_tqp *tqp;
488 for (i = 0; i < kinfo->num_tqps; i++) {
489 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
493 for (i = 0; i < kinfo->num_tqps; i++) {
494 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
505 return kinfo->num_tqps * (2);
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
510 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 for (i = 0; i < kinfo->num_tqps; i++) {
515 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516 struct hclge_tqp, q);
517 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
519 buff = buff + ETH_GSTRING_LEN;
522 for (i = 0; i < kinfo->num_tqps; i++) {
523 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524 struct hclge_tqp, q);
525 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
527 buff = buff + ETH_GSTRING_LEN;
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534 const struct hclge_comm_stats_str strs[],
540 for (i = 0; i < size; i++)
541 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546 static u8 *hclge_comm_get_strings(u32 stringset,
547 const struct hclge_comm_stats_str strs[],
550 char *buff = (char *)data;
553 if (stringset != ETH_SS_STATS)
556 for (i = 0; i < size; i++) {
557 snprintf(buff, ETH_GSTRING_LEN,
559 buff = buff + ETH_GSTRING_LEN;
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
567 struct hnae3_handle *handle;
570 handle = &hdev->vport[0].nic;
571 if (handle->client) {
572 status = hclge_tqps_update_stats(handle);
574 dev_err(&hdev->pdev->dev,
575 "Update TQPS stats fail, status = %d.\n",
580 status = hclge_mac_update_stats(hdev);
582 dev_err(&hdev->pdev->dev,
583 "Update MAC stats fail, status = %d.\n", status);
586 static void hclge_update_stats(struct hnae3_handle *handle,
587 struct net_device_stats *net_stats)
589 struct hclge_vport *vport = hclge_get_vport(handle);
590 struct hclge_dev *hdev = vport->back;
593 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
596 status = hclge_mac_update_stats(hdev);
598 dev_err(&hdev->pdev->dev,
599 "Update MAC stats fail, status = %d.\n",
602 status = hclge_tqps_update_stats(handle);
604 dev_err(&hdev->pdev->dev,
605 "Update TQPS stats fail, status = %d.\n",
608 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614 HNAE3_SUPPORT_PHY_LOOPBACK |\
615 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
618 struct hclge_vport *vport = hclge_get_vport(handle);
619 struct hclge_dev *hdev = vport->back;
622 /* Loopback test support rules:
623 * mac: only GE mode support
624 * serdes: all mac mode will support include GE/XGE/LGE/CGE
625 * phy: only support when phy device exist on board
627 if (stringset == ETH_SS_TEST) {
628 /* clear loopback bit flags at first */
629 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630 if (hdev->pdev->revision >= 0x21 ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
635 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
639 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641 } else if (stringset == ETH_SS_STATS) {
642 count = ARRAY_SIZE(g_mac_stats_string) +
643 hclge_tqps_get_sset_count(handle, stringset);
649 static void hclge_get_strings(struct hnae3_handle *handle,
653 u8 *p = (char *)data;
656 if (stringset == ETH_SS_STATS) {
657 size = ARRAY_SIZE(g_mac_stats_string);
658 p = hclge_comm_get_strings(stringset,
662 p = hclge_tqps_get_strings(handle, p);
663 } else if (stringset == ETH_SS_TEST) {
664 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666 hns3_nic_test_strs[HNAE3_LOOP_APP],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
672 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
678 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
680 p += ETH_GSTRING_LEN;
682 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684 hns3_nic_test_strs[HNAE3_LOOP_PHY],
686 p += ETH_GSTRING_LEN;
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
697 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
699 ARRAY_SIZE(g_mac_stats_string),
701 p = hclge_tqps_get_stats(handle, p);
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
707 struct hclge_vport *vport = hclge_get_vport(handle);
708 struct hclge_dev *hdev = vport->back;
710 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715 struct hclge_func_status_cmd *status)
717 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
720 /* Set the pf to main pf */
721 if (status->pf_state & HCLGE_PF_STATE_MAIN)
722 hdev->flag |= HCLGE_FLAG_MAIN;
724 hdev->flag &= ~HCLGE_FLAG_MAIN;
729 static int hclge_query_function_status(struct hclge_dev *hdev)
731 struct hclge_func_status_cmd *req;
732 struct hclge_desc desc;
736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737 req = (struct hclge_func_status_cmd *)desc.data;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
742 dev_err(&hdev->pdev->dev,
743 "query function status failed %d.\n",
749 /* Check pf reset is done */
752 usleep_range(1000, 2000);
753 } while (timeout++ < 5);
755 ret = hclge_parse_func_status(hdev, req);
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 struct hclge_pf_res_cmd *req;
763 struct hclge_desc desc;
766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769 dev_err(&hdev->pdev->dev,
770 "query pf resource failed %d.\n", ret);
774 req = (struct hclge_pf_res_cmd *)desc.data;
775 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778 if (req->tx_buf_size)
780 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786 if (req->dv_buf_size)
788 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794 if (hnae3_dev_roce_supported(hdev)) {
795 hdev->roce_base_msix_offset =
796 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 /* PF should have NIC vectors and Roce vectors,
803 * NIC vectors are queued before Roce vectors.
805 hdev->num_msi = hdev->num_roce_msi +
806 hdev->roce_base_msix_offset;
809 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
816 static int hclge_parse_speed(int speed_cmd, int *speed)
820 *speed = HCLGE_MAC_SPEED_10M;
823 *speed = HCLGE_MAC_SPEED_100M;
826 *speed = HCLGE_MAC_SPEED_1G;
829 *speed = HCLGE_MAC_SPEED_10G;
832 *speed = HCLGE_MAC_SPEED_25G;
835 *speed = HCLGE_MAC_SPEED_40G;
838 *speed = HCLGE_MAC_SPEED_50G;
841 *speed = HCLGE_MAC_SPEED_100G;
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 struct hclge_vport *vport = hclge_get_vport(handle);
853 struct hclge_dev *hdev = vport->back;
854 u32 speed_ability = hdev->hw.mac.speed_ability;
858 case HCLGE_MAC_SPEED_10M:
859 speed_bit = HCLGE_SUPPORT_10M_BIT;
861 case HCLGE_MAC_SPEED_100M:
862 speed_bit = HCLGE_SUPPORT_100M_BIT;
864 case HCLGE_MAC_SPEED_1G:
865 speed_bit = HCLGE_SUPPORT_1G_BIT;
867 case HCLGE_MAC_SPEED_10G:
868 speed_bit = HCLGE_SUPPORT_10G_BIT;
870 case HCLGE_MAC_SPEED_25G:
871 speed_bit = HCLGE_SUPPORT_25G_BIT;
873 case HCLGE_MAC_SPEED_40G:
874 speed_bit = HCLGE_SUPPORT_40G_BIT;
876 case HCLGE_MAC_SPEED_50G:
877 speed_bit = HCLGE_SUPPORT_50G_BIT;
879 case HCLGE_MAC_SPEED_100G:
880 speed_bit = HCLGE_SUPPORT_100G_BIT;
886 if (speed_bit & speed_ability)
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 switch (mac->speed) {
977 case HCLGE_MAC_SPEED_10G:
978 case HCLGE_MAC_SPEED_40G:
979 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
982 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984 case HCLGE_MAC_SPEED_25G:
985 case HCLGE_MAC_SPEED_50G:
986 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
989 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
992 case HCLGE_MAC_SPEED_100G:
993 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
997 mac->fec_ability = 0;
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1005 struct hclge_mac *mac = &hdev->hw.mac;
1007 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1011 hclge_convert_setting_sr(mac, speed_ability);
1012 hclge_convert_setting_lr(mac, speed_ability);
1013 hclge_convert_setting_cr(mac, speed_ability);
1014 if (hdev->pdev->revision >= 0x21)
1015 hclge_convert_setting_fec(mac);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1025 struct hclge_mac *mac = &hdev->hw.mac;
1027 hclge_convert_setting_kr(mac, speed_ability);
1028 if (hdev->pdev->revision >= 0x21)
1029 hclge_convert_setting_fec(mac);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1038 unsigned long *supported = hdev->hw.mac.supported;
1040 /* default to support all speed for GE port */
1042 speed_ability = HCLGE_SUPPORT_GE;
1044 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1067 u8 media_type = hdev->hw.mac.media_type;
1069 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072 hclge_parse_copper_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074 hclge_parse_backplane_link_mode(hdev, speed_ability);
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1078 struct hclge_cfg_param_cmd *req;
1079 u64 mac_addr_tmp_high;
1083 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1085 /* get the configuration */
1086 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TQP_DESC_N_M,
1093 HCLGE_CFG_TQP_DESC_N_S);
1095 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096 HCLGE_CFG_PHY_ADDR_M,
1097 HCLGE_CFG_PHY_ADDR_S);
1098 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099 HCLGE_CFG_MEDIA_TP_M,
1100 HCLGE_CFG_MEDIA_TP_S);
1101 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102 HCLGE_CFG_RX_BUF_LEN_M,
1103 HCLGE_CFG_RX_BUF_LEN_S);
1104 /* get mac_address */
1105 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107 HCLGE_CFG_MAC_ADDR_H_M,
1108 HCLGE_CFG_MAC_ADDR_H_S);
1110 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1112 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113 HCLGE_CFG_DEFAULT_SPEED_M,
1114 HCLGE_CFG_DEFAULT_SPEED_S);
1115 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116 HCLGE_CFG_RSS_SIZE_M,
1117 HCLGE_CFG_RSS_SIZE_S);
1119 for (i = 0; i < ETH_ALEN; i++)
1120 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1122 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1125 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126 HCLGE_CFG_SPEED_ABILITY_M,
1127 HCLGE_CFG_SPEED_ABILITY_S);
1128 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129 HCLGE_CFG_UMV_TBL_SPACE_M,
1130 HCLGE_CFG_UMV_TBL_SPACE_S);
1131 if (!cfg->umv_space)
1132 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 /* hclge_get_cfg: query the static parameter from flash
1136 * @hdev: pointer to struct hclge_dev
1137 * @hcfg: the config structure to be getted
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1141 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142 struct hclge_cfg_param_cmd *req;
1145 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1148 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1151 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153 /* Len should be united by 4 bytes when send to hardware */
1154 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156 req->offset = cpu_to_le32(offset);
1159 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1161 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 hclge_parse_cfg(hcfg, desc);
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1174 ret = hclge_query_function_status(hdev);
1176 dev_err(&hdev->pdev->dev,
1177 "query function status error %d.\n", ret);
1181 /* get pf resource */
1182 ret = hclge_query_pf_resource(hdev);
1184 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1191 #define HCLGE_MIN_TX_DESC 64
1192 #define HCLGE_MIN_RX_DESC 64
1194 if (!is_kdump_kernel())
1197 dev_info(&hdev->pdev->dev,
1198 "Running kdump kernel. Using minimal resources\n");
1200 /* minimal queue pairs equals to the number of vports */
1201 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1206 static int hclge_configure(struct hclge_dev *hdev)
1208 struct hclge_cfg cfg;
1211 ret = hclge_get_cfg(hdev, &cfg);
1213 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1217 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218 hdev->base_tqp_pid = 0;
1219 hdev->rss_size_max = cfg.rss_size_max;
1220 hdev->rx_buf_len = cfg.rx_buf_len;
1221 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222 hdev->hw.mac.media_type = cfg.media_type;
1223 hdev->hw.mac.phy_addr = cfg.phy_addr;
1224 hdev->num_tx_desc = cfg.tqp_desc_num;
1225 hdev->num_rx_desc = cfg.tqp_desc_num;
1226 hdev->tm_info.num_pg = 1;
1227 hdev->tc_max = cfg.tc_num;
1228 hdev->tm_info.hw_pfc_map = 0;
1229 hdev->wanted_umv_size = cfg.umv_space;
1231 if (hnae3_dev_fd_supported(hdev)) {
1233 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1236 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1238 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1242 hclge_parse_link_mode(hdev, cfg.speed_ability);
1244 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245 (hdev->tc_max < 1)) {
1246 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 /* Dev does not support DCB */
1252 if (!hnae3_dev_dcb_supported(hdev)) {
1256 hdev->pfc_max = hdev->tc_max;
1259 hdev->tm_info.num_tc = 1;
1261 /* Currently not support uncontiuous tc */
1262 for (i = 0; i < hdev->tm_info.num_tc; i++)
1263 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1265 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1267 hclge_init_kdump_kernel_config(hdev);
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1275 struct hclge_cfg_tso_status_cmd *req;
1276 struct hclge_desc desc;
1279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1281 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1284 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286 req->tso_mss_min = cpu_to_le16(tso_mss);
1289 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291 req->tso_mss_max = cpu_to_le16(tso_mss);
1293 return hclge_cmd_send(&hdev->hw, &desc, 1);
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1298 struct hclge_cfg_gro_status_cmd *req;
1299 struct hclge_desc desc;
1302 if (!hnae3_dev_gro_supported(hdev))
1305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1308 req->gro_en = cpu_to_le16(en ? 1 : 0);
1310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1312 dev_err(&hdev->pdev->dev,
1313 "GRO hardware config cmd failed, ret = %d\n", ret);
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1320 struct hclge_tqp *tqp;
1323 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324 sizeof(struct hclge_tqp), GFP_KERNEL);
1330 for (i = 0; i < hdev->num_tqps; i++) {
1331 tqp->dev = &hdev->pdev->dev;
1334 tqp->q.ae_algo = &ae_algo;
1335 tqp->q.buf_size = hdev->rx_buf_len;
1336 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339 i * HCLGE_TQP_REG_SIZE;
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1350 struct hclge_tqp_map_cmd *req;
1351 struct hclge_desc desc;
1354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1356 req = (struct hclge_tqp_map_cmd *)desc.data;
1357 req->tqp_id = cpu_to_le16(tqp_pid);
1358 req->tqp_vf = func_id;
1359 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360 1 << HCLGE_TQP_MAP_EN_B;
1361 req->tqp_vid = cpu_to_le16(tqp_vid);
1363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1365 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1372 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373 struct hclge_dev *hdev = vport->back;
1376 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377 alloced < num_tqps; i++) {
1378 if (!hdev->htqp[i].alloced) {
1379 hdev->htqp[i].q.handle = &vport->nic;
1380 hdev->htqp[i].q.tqp_index = alloced;
1381 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384 hdev->htqp[i].alloced = true;
1388 vport->alloc_tqps = alloced;
1389 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390 vport->alloc_tqps / hdev->tm_info.num_tc);
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396 u16 num_tx_desc, u16 num_rx_desc)
1399 struct hnae3_handle *nic = &vport->nic;
1400 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401 struct hclge_dev *hdev = vport->back;
1404 kinfo->num_tx_desc = num_tx_desc;
1405 kinfo->num_rx_desc = num_rx_desc;
1407 kinfo->rx_buf_len = hdev->rx_buf_len;
1409 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410 sizeof(struct hnae3_queue *), GFP_KERNEL);
1414 ret = hclge_assign_tqp(vport, num_tqps);
1416 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422 struct hclge_vport *vport)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hnae3_knic_private_info *kinfo;
1428 kinfo = &nic->kinfo;
1429 for (i = 0; i < vport->alloc_tqps; i++) {
1430 struct hclge_tqp *q =
1431 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1435 is_pf = !(vport->vport_id);
1436 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1447 struct hclge_vport *vport = hdev->vport;
1450 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451 for (i = 0; i < num_vport; i++) {
1454 ret = hclge_map_tqp_to_vport(hdev, vport);
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1466 /* this would be initialized later */
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1471 struct hnae3_handle *nic = &vport->nic;
1472 struct hclge_dev *hdev = vport->back;
1475 nic->pdev = hdev->pdev;
1476 nic->ae_algo = &ae_algo;
1477 nic->numa_node_mask = hdev->numa_node_mask;
1479 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480 ret = hclge_knic_setup(vport, num_tqps,
1481 hdev->num_tx_desc, hdev->num_rx_desc);
1484 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1489 hclge_unic_setup(vport, num_tqps);
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1497 struct pci_dev *pdev = hdev->pdev;
1498 struct hclge_vport *vport;
1504 /* We need to alloc a vport for main NIC of PF */
1505 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1507 if (hdev->num_tqps < num_vport) {
1508 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509 hdev->num_tqps, num_vport);
1513 /* Alloc the same number of TQPs for every vport */
1514 tqp_per_vport = hdev->num_tqps / num_vport;
1515 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1517 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1522 hdev->vport = vport;
1523 hdev->num_alloc_vport = num_vport;
1525 if (IS_ENABLED(CONFIG_PCI_IOV))
1526 hdev->num_alloc_vfs = hdev->num_req_vfs;
1528 for (i = 0; i < num_vport; i++) {
1530 vport->vport_id = i;
1531 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534 INIT_LIST_HEAD(&vport->vlan_list);
1535 INIT_LIST_HEAD(&vport->uc_mac_list);
1536 INIT_LIST_HEAD(&vport->mc_mac_list);
1539 ret = hclge_vport_setup(vport, tqp_main_vport);
1541 ret = hclge_vport_setup(vport, tqp_per_vport);
1544 "vport setup failed for vport %d, %d\n",
1555 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556 struct hclge_pkt_buf_alloc *buf_alloc)
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1561 struct hclge_tx_buff_alloc_cmd *req;
1562 struct hclge_desc desc;
1566 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1568 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1572 req->tx_pkt_buff[i] =
1573 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1579 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1591 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1600 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601 if (hdev->hw_tc_map & BIT(i))
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 struct hclge_priv_buf *priv;
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625 struct hclge_pkt_buf_alloc *buf_alloc)
1627 struct hclge_priv_buf *priv;
1630 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631 priv = &buf_alloc->priv_buf[i];
1632 if (hdev->hw_tc_map & BIT(i) &&
1633 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1643 struct hclge_priv_buf *priv;
1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648 priv = &buf_alloc->priv_buf[i];
1650 rx_priv += priv->buf_size;
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1657 u32 i, total_tx_size = 0;
1659 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1662 return total_tx_size;
1665 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666 struct hclge_pkt_buf_alloc *buf_alloc,
1669 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670 u32 tc_num = hclge_get_tc_num(hdev);
1671 u32 shared_buf, aligned_mps;
1675 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1677 if (hnae3_dev_dcb_supported(hdev))
1678 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1680 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681 + hdev->dv_buf_size;
1683 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685 HCLGE_BUF_SIZE_UNIT);
1687 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688 if (rx_all < rx_priv + shared_std)
1691 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692 buf_alloc->s_buf.buf_size = shared_buf;
1693 if (hnae3_dev_dcb_supported(hdev)) {
1694 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1698 buf_alloc->s_buf.self.high = aligned_mps +
1699 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700 buf_alloc->s_buf.self.low = aligned_mps;
1703 if (hnae3_dev_dcb_supported(hdev)) {
1705 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1707 hi_thrd = shared_buf - hdev->dv_buf_size;
1709 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711 lo_thrd = hi_thrd - aligned_mps / 2;
1713 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714 lo_thrd = aligned_mps;
1717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726 struct hclge_pkt_buf_alloc *buf_alloc)
1730 total_size = hdev->pkt_buf_size;
1732 /* alloc tx buffer for all enabled tc */
1733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736 if (hdev->hw_tc_map & BIT(i)) {
1737 if (total_size < hdev->tx_buf_size)
1740 priv->tx_buf_size = hdev->tx_buf_size;
1742 priv->tx_buf_size = 0;
1745 total_size -= priv->tx_buf_size;
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
1754 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1758 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1766 if (!(hdev->hw_tc_map & BIT(i)))
1771 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772 priv->wl.low = max ? aligned_mps : 256;
1773 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774 HCLGE_BUF_SIZE_UNIT);
1777 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1780 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1783 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787 struct hclge_pkt_buf_alloc *buf_alloc)
1789 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1793 /* let the last to be cleared first */
1794 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 if (hdev->hw_tc_map & BIT(i) &&
1798 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799 /* Clear the no pfc TC private buffer */
1807 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808 no_pfc_priv_num == 0)
1812 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816 struct hclge_pkt_buf_alloc *buf_alloc)
1818 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1822 /* let the last to be cleared first */
1823 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1826 if (hdev->hw_tc_map & BIT(i) &&
1827 hdev->tm_info.hw_pfc_map & BIT(i)) {
1828 /* Reduce the number of pfc TC with private buffer */
1836 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1841 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845 * @hdev: pointer to struct hclge_dev
1846 * @buf_alloc: pointer to buffer calculation data
1847 * @return: 0: calculate sucessful, negative: fail
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850 struct hclge_pkt_buf_alloc *buf_alloc)
1852 /* When DCB is not supported, rx private buffer is not allocated. */
1853 if (!hnae3_dev_dcb_supported(hdev)) {
1854 u32 rx_all = hdev->pkt_buf_size;
1856 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1863 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1866 /* try to decrease the buffer size */
1867 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1870 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1873 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1882 struct hclge_rx_priv_buff_cmd *req;
1883 struct hclge_desc desc;
1887 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1890 /* Alloc private buffer TCs */
1891 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1897 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1901 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1906 dev_err(&hdev->pdev->dev,
1907 "rx private buffer alloc cmd failed %d\n", ret);
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913 struct hclge_pkt_buf_alloc *buf_alloc)
1915 struct hclge_rx_priv_wl_buf *req;
1916 struct hclge_priv_buf *priv;
1917 struct hclge_desc desc[2];
1921 for (i = 0; i < 2; i++) {
1922 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1924 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1926 /* The first descriptor set the NEXT bit to 1 */
1928 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1930 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1932 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1935 priv = &buf_alloc->priv_buf[idx];
1936 req->tc_wl[j].high =
1937 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938 req->tc_wl[j].high |=
1939 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1941 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942 req->tc_wl[j].low |=
1943 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1947 /* Send 2 descriptor at one time */
1948 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1950 dev_err(&hdev->pdev->dev,
1951 "rx private waterline config cmd failed %d\n",
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957 struct hclge_pkt_buf_alloc *buf_alloc)
1959 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960 struct hclge_rx_com_thrd *req;
1961 struct hclge_desc desc[2];
1962 struct hclge_tc_thrd *tc;
1966 for (i = 0; i < 2; i++) {
1967 hclge_cmd_setup_basic_desc(&desc[i],
1968 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1971 /* The first descriptor set the NEXT bit to 1 */
1973 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1975 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1977 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1980 req->com_thrd[j].high =
1981 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982 req->com_thrd[j].high |=
1983 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984 req->com_thrd[j].low =
1985 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986 req->com_thrd[j].low |=
1987 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1991 /* Send 2 descriptors at one time */
1992 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1994 dev_err(&hdev->pdev->dev,
1995 "common threshold config cmd failed %d\n", ret);
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000 struct hclge_pkt_buf_alloc *buf_alloc)
2002 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003 struct hclge_rx_com_wl *req;
2004 struct hclge_desc desc;
2007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2009 req = (struct hclge_rx_com_wl *)desc.data;
2010 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2013 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2016 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2018 dev_err(&hdev->pdev->dev,
2019 "common waterline config cmd failed %d\n", ret);
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2026 struct hclge_pkt_buf_alloc *pkt_buf;
2029 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2033 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2035 dev_err(&hdev->pdev->dev,
2036 "could not calc tx buffer size for all TCs %d\n", ret);
2040 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2042 dev_err(&hdev->pdev->dev,
2043 "could not alloc tx buffers %d\n", ret);
2047 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2049 dev_err(&hdev->pdev->dev,
2050 "could not calc rx priv buffer size for all TCs %d\n",
2055 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2057 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2062 if (hnae3_dev_dcb_supported(hdev)) {
2063 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2065 dev_err(&hdev->pdev->dev,
2066 "could not configure rx private waterline %d\n",
2071 ret = hclge_common_thrd_config(hdev, pkt_buf);
2073 dev_err(&hdev->pdev->dev,
2074 "could not configure common threshold %d\n",
2080 ret = hclge_common_wl_config(hdev, pkt_buf);
2082 dev_err(&hdev->pdev->dev,
2083 "could not configure common waterline %d\n", ret);
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2092 struct hnae3_handle *roce = &vport->roce;
2093 struct hnae3_handle *nic = &vport->nic;
2095 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2097 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098 vport->back->num_msi_left == 0)
2101 roce->rinfo.base_vector = vport->back->roce_base_vector;
2103 roce->rinfo.netdev = nic->kinfo.netdev;
2104 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2106 roce->pdev = nic->pdev;
2107 roce->ae_algo = nic->ae_algo;
2108 roce->numa_node_mask = nic->numa_node_mask;
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2115 struct pci_dev *pdev = hdev->pdev;
2119 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2123 "failed(%d) to allocate MSI/MSI-X vectors\n",
2127 if (vectors < hdev->num_msi)
2128 dev_warn(&hdev->pdev->dev,
2129 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130 hdev->num_msi, vectors);
2132 hdev->num_msi = vectors;
2133 hdev->num_msi_left = vectors;
2134 hdev->base_msi_vector = pdev->irq;
2135 hdev->roce_base_vector = hdev->base_msi_vector +
2136 hdev->roce_base_msix_offset;
2138 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139 sizeof(u16), GFP_KERNEL);
2140 if (!hdev->vector_status) {
2141 pci_free_irq_vectors(pdev);
2145 for (i = 0; i < hdev->num_msi; i++)
2146 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2148 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149 sizeof(int), GFP_KERNEL);
2150 if (!hdev->vector_irq) {
2151 pci_free_irq_vectors(pdev);
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2161 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162 duplex = HCLGE_MAC_FULL;
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2170 struct hclge_config_mac_speed_dup_cmd *req;
2171 struct hclge_desc desc;
2174 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2178 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2181 case HCLGE_MAC_SPEED_10M:
2182 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 HCLGE_CFG_SPEED_S, 6);
2185 case HCLGE_MAC_SPEED_100M:
2186 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 HCLGE_CFG_SPEED_S, 7);
2189 case HCLGE_MAC_SPEED_1G:
2190 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191 HCLGE_CFG_SPEED_S, 0);
2193 case HCLGE_MAC_SPEED_10G:
2194 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195 HCLGE_CFG_SPEED_S, 1);
2197 case HCLGE_MAC_SPEED_25G:
2198 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199 HCLGE_CFG_SPEED_S, 2);
2201 case HCLGE_MAC_SPEED_40G:
2202 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203 HCLGE_CFG_SPEED_S, 3);
2205 case HCLGE_MAC_SPEED_50G:
2206 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207 HCLGE_CFG_SPEED_S, 4);
2209 case HCLGE_MAC_SPEED_100G:
2210 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211 HCLGE_CFG_SPEED_S, 5);
2214 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2218 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2223 dev_err(&hdev->pdev->dev,
2224 "mac speed/duplex config cmd failed %d.\n", ret);
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2235 duplex = hclge_check_speed_dup(duplex, speed);
2236 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2239 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2243 hdev->hw.mac.speed = speed;
2244 hdev->hw.mac.duplex = duplex;
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2252 struct hclge_vport *vport = hclge_get_vport(handle);
2253 struct hclge_dev *hdev = vport->back;
2255 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2260 struct hclge_config_auto_neg_cmd *req;
2261 struct hclge_desc desc;
2265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2267 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2273 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2281 struct hclge_vport *vport = hclge_get_vport(handle);
2282 struct hclge_dev *hdev = vport->back;
2284 if (!hdev->hw.mac.support_autoneg) {
2286 dev_err(&hdev->pdev->dev,
2287 "autoneg is not supported by current port\n");
2294 return hclge_set_autoneg_en(hdev, enable);
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2299 struct hclge_vport *vport = hclge_get_vport(handle);
2300 struct hclge_dev *hdev = vport->back;
2301 struct phy_device *phydev = hdev->hw.mac.phydev;
2304 return phydev->autoneg;
2306 return hdev->hw.mac.autoneg;
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2311 struct hclge_vport *vport = hclge_get_vport(handle);
2312 struct hclge_dev *hdev = vport->back;
2315 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2317 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2320 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2325 struct hclge_config_fec_cmd *req;
2326 struct hclge_desc desc;
2329 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2331 req = (struct hclge_config_fec_cmd *)desc.data;
2332 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334 if (fec_mode & BIT(HNAE3_FEC_RS))
2335 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337 if (fec_mode & BIT(HNAE3_FEC_BASER))
2338 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2341 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2343 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2350 struct hclge_vport *vport = hclge_get_vport(handle);
2351 struct hclge_dev *hdev = vport->back;
2352 struct hclge_mac *mac = &hdev->hw.mac;
2355 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2360 ret = hclge_set_fec_hw(hdev, fec_mode);
2364 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2371 struct hclge_vport *vport = hclge_get_vport(handle);
2372 struct hclge_dev *hdev = vport->back;
2373 struct hclge_mac *mac = &hdev->hw.mac;
2376 *fec_ability = mac->fec_ability;
2378 *fec_mode = mac->fec_mode;
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2383 struct hclge_mac *mac = &hdev->hw.mac;
2386 hdev->support_sfp_query = true;
2387 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389 hdev->hw.mac.duplex);
2391 dev_err(&hdev->pdev->dev,
2392 "Config mac speed dup fail ret=%d\n", ret);
2398 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2401 dev_err(&hdev->pdev->dev,
2402 "Fec mode init fail, ret = %d\n", ret);
2407 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2409 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2413 ret = hclge_buffer_alloc(hdev);
2415 dev_err(&hdev->pdev->dev,
2416 "allocate buffer fail, ret=%d\n", ret);
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2423 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425 schedule_work(&hdev->mbx_service_task);
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2430 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431 schedule_work(&hdev->rst_service_task);
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2436 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439 (void)schedule_work(&hdev->service_task);
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2444 struct hclge_link_status_cmd *req;
2445 struct hclge_desc desc;
2449 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2452 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2457 req = (struct hclge_link_status_cmd *)desc.data;
2458 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2460 return !!link_status;
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2468 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2471 mac_state = hclge_get_mac_link_status(hdev);
2473 if (hdev->hw.mac.phydev) {
2474 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475 link_stat = mac_state &
2476 hdev->hw.mac.phydev->link;
2481 link_stat = mac_state;
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2489 struct hnae3_client *rclient = hdev->roce_client;
2490 struct hnae3_client *client = hdev->nic_client;
2491 struct hnae3_handle *rhandle;
2492 struct hnae3_handle *handle;
2498 state = hclge_get_mac_phy_link(hdev);
2499 if (state != hdev->hw.mac.link) {
2500 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501 handle = &hdev->vport[i].nic;
2502 client->ops->link_status_change(handle, state);
2503 hclge_config_mac_tnl_int(hdev, state);
2504 rhandle = &hdev->vport[i].roce;
2505 if (rclient && rclient->ops->link_status_change)
2506 rclient->ops->link_status_change(rhandle,
2509 hdev->hw.mac.link = state;
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2515 /* update fec ability by speed */
2516 hclge_convert_setting_fec(mac);
2518 /* firmware can not identify back plane type, the media type
2519 * read from configuration can help deal it
2521 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525 mac->module_type = HNAE3_MODULE_TYPE_TP;
2527 if (mac->support_autoneg == true) {
2528 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529 linkmode_copy(mac->advertising, mac->supported);
2531 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2533 linkmode_zero(mac->advertising);
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2539 struct hclge_sfp_info_cmd *resp = NULL;
2540 struct hclge_desc desc;
2543 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544 resp = (struct hclge_sfp_info_cmd *)desc.data;
2545 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546 if (ret == -EOPNOTSUPP) {
2547 dev_warn(&hdev->pdev->dev,
2548 "IMP do not support get SFP speed %d\n", ret);
2551 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2555 *speed = le32_to_cpu(resp->speed);
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2562 struct hclge_sfp_info_cmd *resp;
2563 struct hclge_desc desc;
2566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567 resp = (struct hclge_sfp_info_cmd *)desc.data;
2569 resp->query_type = QUERY_ACTIVE_SPEED;
2571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572 if (ret == -EOPNOTSUPP) {
2573 dev_warn(&hdev->pdev->dev,
2574 "IMP does not support get SFP info %d\n", ret);
2577 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2581 mac->speed = le32_to_cpu(resp->speed);
2582 /* if resp->speed_ability is 0, it means it's an old version
2583 * firmware, do not update these params
2585 if (resp->speed_ability) {
2586 mac->module_type = le32_to_cpu(resp->module_type);
2587 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588 mac->autoneg = resp->autoneg;
2589 mac->support_autoneg = resp->autoneg_ability;
2590 if (!resp->active_fec)
2593 mac->fec_mode = BIT(resp->active_fec);
2595 mac->speed_type = QUERY_SFP_SPEED;
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2603 struct hclge_mac *mac = &hdev->hw.mac;
2604 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2607 /* get the port info from SFP cmd if not copper port */
2608 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2611 /* if IMP does not support get SFP/qSFP info, return directly */
2612 if (!hdev->support_sfp_query)
2615 if (hdev->pdev->revision >= 0x21)
2616 ret = hclge_get_sfp_info(hdev, mac);
2618 ret = hclge_get_sfp_speed(hdev, &speed);
2620 if (ret == -EOPNOTSUPP) {
2621 hdev->support_sfp_query = false;
2627 if (hdev->pdev->revision >= 0x21) {
2628 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629 hclge_update_port_capability(mac);
2632 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2635 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636 return 0; /* do nothing if no SFP */
2638 /* must config full duplex for SFP */
2639 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2643 static int hclge_get_status(struct hnae3_handle *handle)
2645 struct hclge_vport *vport = hclge_get_vport(handle);
2646 struct hclge_dev *hdev = vport->back;
2648 hclge_update_link_status(hdev);
2650 return hdev->hw.mac.link;
2653 static void hclge_service_timer(struct timer_list *t)
2655 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2657 mod_timer(&hdev->service_timer, jiffies + HZ);
2658 hdev->hw_stats.stats_timer++;
2659 hdev->fd_arfs_expire_timer++;
2660 hclge_task_schedule(hdev);
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2665 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2667 /* Flush memory before next watchdog */
2668 smp_mb__before_atomic();
2669 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2674 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2676 /* fetch the events from their corresponding regs */
2677 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679 msix_src_reg = hclge_read_dev(&hdev->hw,
2680 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2682 /* Assumption: If by any chance reset and mailbox events are reported
2683 * together then we will only process reset event in this go and will
2684 * defer the processing of the mailbox events. Since, we would have not
2685 * cleared RX CMDQ event this time we would receive again another
2686 * interrupt from H/W just for the mailbox.
2689 /* check for vector0 reset event sources */
2690 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695 hdev->rst_stats.imp_rst_cnt++;
2696 return HCLGE_VECTOR0_EVENT_RST;
2699 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704 hdev->rst_stats.global_rst_cnt++;
2705 return HCLGE_VECTOR0_EVENT_RST;
2708 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713 hdev->rst_stats.core_rst_cnt++;
2714 return HCLGE_VECTOR0_EVENT_RST;
2717 /* check for vector0 msix event source */
2718 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2721 return HCLGE_VECTOR0_EVENT_ERR;
2724 /* check for vector0 mailbox(=CMDQ RX) event source */
2725 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727 *clearval = cmdq_src_reg;
2728 return HCLGE_VECTOR0_EVENT_MBX;
2731 /* print other vector0 event source */
2732 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733 cmdq_src_reg, msix_src_reg);
2734 return HCLGE_VECTOR0_EVENT_OTHER;
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2740 switch (event_type) {
2741 case HCLGE_VECTOR0_EVENT_RST:
2742 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2744 case HCLGE_VECTOR0_EVENT_MBX:
2745 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2754 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2763 writel(enable ? 1 : 0, vector->addr);
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2768 struct hclge_dev *hdev = data;
2772 hclge_enable_vector(&hdev->misc_vector, false);
2773 event_cause = hclge_check_event_cause(hdev, &clearval);
2775 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776 switch (event_cause) {
2777 case HCLGE_VECTOR0_EVENT_ERR:
2778 /* we do not know what type of reset is required now. This could
2779 * only be decided after we fetch the type of errors which
2780 * caused this event. Therefore, we will do below for now:
2781 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782 * have defered type of reset to be used.
2783 * 2. Schedule the reset serivce task.
2784 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2785 * will fetch the correct type of reset. This would be done
2786 * by first decoding the types of errors.
2788 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2790 case HCLGE_VECTOR0_EVENT_RST:
2791 hclge_reset_task_schedule(hdev);
2793 case HCLGE_VECTOR0_EVENT_MBX:
2794 /* If we are here then,
2795 * 1. Either we are not handling any mbx task and we are not
2798 * 2. We could be handling a mbx task but nothing more is
2800 * In both cases, we should schedule mbx task as there are more
2801 * mbx messages reported by this interrupt.
2803 hclge_mbx_task_schedule(hdev);
2806 dev_warn(&hdev->pdev->dev,
2807 "received unknown or unhandled event of vector0\n");
2811 /* clear the source of interrupt if it is not cause by reset */
2812 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813 hclge_clear_event_cause(hdev, event_cause, clearval);
2814 hclge_enable_vector(&hdev->misc_vector, true);
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2822 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823 dev_warn(&hdev->pdev->dev,
2824 "vector(vector_id %d) has been freed.\n", vector_id);
2828 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829 hdev->num_msi_left += 1;
2830 hdev->num_msi_used -= 1;
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2835 struct hclge_misc_vector *vector = &hdev->misc_vector;
2837 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2839 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840 hdev->vector_status[0] = 0;
2842 hdev->num_msi_left -= 1;
2843 hdev->num_msi_used += 1;
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2850 hclge_get_misc_vector(hdev);
2852 /* this would be explicitly freed in the end */
2853 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854 0, "hclge_misc", hdev);
2856 hclge_free_vector(hdev, 0);
2857 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858 hdev->misc_vector.vector_irq);
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2866 free_irq(hdev->misc_vector.vector_irq, hdev);
2867 hclge_free_vector(hdev, 0);
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871 enum hnae3_reset_notify_type type)
2873 struct hnae3_client *client = hdev->nic_client;
2876 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2880 if (!client->ops->reset_notify)
2883 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2884 struct hnae3_handle *handle = &hdev->vport[i].nic;
2887 ret = client->ops->reset_notify(handle, type);
2889 dev_err(&hdev->pdev->dev,
2890 "notify nic client failed %d(%d)\n", type, ret);
2898 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2899 enum hnae3_reset_notify_type type)
2901 struct hnae3_client *client = hdev->roce_client;
2905 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2909 if (!client->ops->reset_notify)
2912 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2913 struct hnae3_handle *handle = &hdev->vport[i].roce;
2915 ret = client->ops->reset_notify(handle, type);
2917 dev_err(&hdev->pdev->dev,
2918 "notify roce client failed %d(%d)",
2927 static int hclge_reset_wait(struct hclge_dev *hdev)
2929 #define HCLGE_RESET_WATI_MS 100
2930 #define HCLGE_RESET_WAIT_CNT 200
2931 u32 val, reg, reg_bit;
2934 switch (hdev->reset_type) {
2935 case HNAE3_IMP_RESET:
2936 reg = HCLGE_GLOBAL_RESET_REG;
2937 reg_bit = HCLGE_IMP_RESET_BIT;
2939 case HNAE3_GLOBAL_RESET:
2940 reg = HCLGE_GLOBAL_RESET_REG;
2941 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2943 case HNAE3_CORE_RESET:
2944 reg = HCLGE_GLOBAL_RESET_REG;
2945 reg_bit = HCLGE_CORE_RESET_BIT;
2947 case HNAE3_FUNC_RESET:
2948 reg = HCLGE_FUN_RST_ING;
2949 reg_bit = HCLGE_FUN_RST_ING_B;
2951 case HNAE3_FLR_RESET:
2954 dev_err(&hdev->pdev->dev,
2955 "Wait for unsupported reset type: %d\n",
2960 if (hdev->reset_type == HNAE3_FLR_RESET) {
2961 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2962 cnt++ < HCLGE_RESET_WAIT_CNT)
2963 msleep(HCLGE_RESET_WATI_MS);
2965 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2966 dev_err(&hdev->pdev->dev,
2967 "flr wait timeout: %d\n", cnt);
2974 val = hclge_read_dev(&hdev->hw, reg);
2975 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2976 msleep(HCLGE_RESET_WATI_MS);
2977 val = hclge_read_dev(&hdev->hw, reg);
2981 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2982 dev_warn(&hdev->pdev->dev,
2983 "Wait for reset timeout: %d\n", hdev->reset_type);
2990 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2992 struct hclge_vf_rst_cmd *req;
2993 struct hclge_desc desc;
2995 req = (struct hclge_vf_rst_cmd *)desc.data;
2996 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2997 req->dest_vfid = func_id;
3002 return hclge_cmd_send(&hdev->hw, &desc, 1);
3005 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3009 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3010 struct hclge_vport *vport = &hdev->vport[i];
3013 /* Send cmd to set/clear VF's FUNC_RST_ING */
3014 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3016 dev_err(&hdev->pdev->dev,
3017 "set vf(%d) rst failed %d!\n",
3018 vport->vport_id, ret);
3022 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3025 /* Inform VF to process the reset.
3026 * hclge_inform_reset_assert_to_vf may fail if VF
3027 * driver is not loaded.
3029 ret = hclge_inform_reset_assert_to_vf(vport);
3031 dev_warn(&hdev->pdev->dev,
3032 "inform reset to vf(%d) failed %d!\n",
3033 vport->vport_id, ret);
3039 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3041 struct hclge_desc desc;
3042 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3045 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3046 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3047 req->fun_reset_vfid = func_id;
3049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3051 dev_err(&hdev->pdev->dev,
3052 "send function reset cmd fail, status =%d\n", ret);
3057 static void hclge_do_reset(struct hclge_dev *hdev)
3059 struct hnae3_handle *handle = &hdev->vport[0].nic;
3060 struct pci_dev *pdev = hdev->pdev;
3063 if (hclge_get_hw_reset_stat(handle)) {
3064 dev_info(&pdev->dev, "Hardware reset not finish\n");
3065 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3066 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3067 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3071 switch (hdev->reset_type) {
3072 case HNAE3_GLOBAL_RESET:
3073 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3074 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3075 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3076 dev_info(&pdev->dev, "Global Reset requested\n");
3078 case HNAE3_CORE_RESET:
3079 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3080 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3081 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3082 dev_info(&pdev->dev, "Core Reset requested\n");
3084 case HNAE3_FUNC_RESET:
3085 dev_info(&pdev->dev, "PF Reset requested\n");
3086 /* schedule again to check later */
3087 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3088 hclge_reset_task_schedule(hdev);
3090 case HNAE3_FLR_RESET:
3091 dev_info(&pdev->dev, "FLR requested\n");
3092 /* schedule again to check later */
3093 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3094 hclge_reset_task_schedule(hdev);
3097 dev_warn(&pdev->dev,
3098 "Unsupported reset type: %d\n", hdev->reset_type);
3103 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3104 unsigned long *addr)
3106 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3108 /* first, resolve any unknown reset type to the known type(s) */
3109 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3110 /* we will intentionally ignore any errors from this function
3111 * as we will end up in *some* reset request in any case
3113 hclge_handle_hw_msix_error(hdev, addr);
3114 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3115 /* We defered the clearing of the error event which caused
3116 * interrupt since it was not posssible to do that in
3117 * interrupt context (and this is the reason we introduced
3118 * new UNKNOWN reset type). Now, the errors have been
3119 * handled and cleared in hardware we can safely enable
3120 * interrupts. This is an exception to the norm.
3122 hclge_enable_vector(&hdev->misc_vector, true);
3125 /* return the highest priority reset level amongst all */
3126 if (test_bit(HNAE3_IMP_RESET, addr)) {
3127 rst_level = HNAE3_IMP_RESET;
3128 clear_bit(HNAE3_IMP_RESET, addr);
3129 clear_bit(HNAE3_GLOBAL_RESET, addr);
3130 clear_bit(HNAE3_CORE_RESET, addr);
3131 clear_bit(HNAE3_FUNC_RESET, addr);
3132 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3133 rst_level = HNAE3_GLOBAL_RESET;
3134 clear_bit(HNAE3_GLOBAL_RESET, addr);
3135 clear_bit(HNAE3_CORE_RESET, addr);
3136 clear_bit(HNAE3_FUNC_RESET, addr);
3137 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3138 rst_level = HNAE3_CORE_RESET;
3139 clear_bit(HNAE3_CORE_RESET, addr);
3140 clear_bit(HNAE3_FUNC_RESET, addr);
3141 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3142 rst_level = HNAE3_FUNC_RESET;
3143 clear_bit(HNAE3_FUNC_RESET, addr);
3144 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3145 rst_level = HNAE3_FLR_RESET;
3146 clear_bit(HNAE3_FLR_RESET, addr);
3149 if (hdev->reset_type != HNAE3_NONE_RESET &&
3150 rst_level < hdev->reset_type)
3151 return HNAE3_NONE_RESET;
3156 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3160 switch (hdev->reset_type) {
3161 case HNAE3_IMP_RESET:
3162 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3164 case HNAE3_GLOBAL_RESET:
3165 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3167 case HNAE3_CORE_RESET:
3168 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3177 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3178 hclge_enable_vector(&hdev->misc_vector, true);
3181 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3185 switch (hdev->reset_type) {
3186 case HNAE3_FUNC_RESET:
3188 case HNAE3_FLR_RESET:
3189 ret = hclge_set_all_vf_rst(hdev, true);
3198 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3203 switch (hdev->reset_type) {
3204 case HNAE3_FUNC_RESET:
3205 /* There is no mechanism for PF to know if VF has stopped IO
3206 * for now, just wait 100 ms for VF to stop IO
3209 ret = hclge_func_reset_cmd(hdev, 0);
3211 dev_err(&hdev->pdev->dev,
3212 "asserting function reset fail %d!\n", ret);
3216 /* After performaning pf reset, it is not necessary to do the
3217 * mailbox handling or send any command to firmware, because
3218 * any mailbox handling or command to firmware is only valid
3219 * after hclge_cmd_init is called.
3221 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3222 hdev->rst_stats.pf_rst_cnt++;
3224 case HNAE3_FLR_RESET:
3225 /* There is no mechanism for PF to know if VF has stopped IO
3226 * for now, just wait 100 ms for VF to stop IO
3229 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3230 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3231 hdev->rst_stats.flr_rst_cnt++;
3233 case HNAE3_IMP_RESET:
3234 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3235 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3236 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3242 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3247 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3249 #define MAX_RESET_FAIL_CNT 5
3250 #define RESET_UPGRADE_DELAY_SEC 10
3252 if (hdev->reset_pending) {
3253 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3254 hdev->reset_pending);
3256 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3257 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3258 BIT(HCLGE_IMP_RESET_BIT))) {
3259 dev_info(&hdev->pdev->dev,
3260 "reset failed because IMP Reset is pending\n");
3261 hclge_clear_reset_cause(hdev);
3263 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3264 hdev->reset_fail_cnt++;
3266 set_bit(hdev->reset_type, &hdev->reset_pending);
3267 dev_info(&hdev->pdev->dev,
3268 "re-schedule to wait for hw reset done\n");
3272 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3273 hclge_clear_reset_cause(hdev);
3274 mod_timer(&hdev->reset_timer,
3275 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3280 hclge_clear_reset_cause(hdev);
3281 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3285 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3289 switch (hdev->reset_type) {
3290 case HNAE3_FUNC_RESET:
3292 case HNAE3_FLR_RESET:
3293 ret = hclge_set_all_vf_rst(hdev, false);
3302 static void hclge_reset(struct hclge_dev *hdev)
3304 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3305 bool is_timeout = false;
3308 /* Initialize ae_dev reset status as well, in case enet layer wants to
3309 * know if device is undergoing reset
3311 ae_dev->reset_type = hdev->reset_type;
3312 hdev->rst_stats.reset_cnt++;
3313 /* perform reset of the stack & ae device for a client */
3314 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3318 ret = hclge_reset_prepare_down(hdev);
3323 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3325 goto err_reset_lock;
3329 ret = hclge_reset_prepare_wait(hdev);
3333 if (hclge_reset_wait(hdev)) {
3338 hdev->rst_stats.hw_reset_done_cnt++;
3340 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3345 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3347 goto err_reset_lock;
3349 ret = hclge_reset_ae_dev(hdev->ae_dev);
3351 goto err_reset_lock;
3353 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3355 goto err_reset_lock;
3357 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3359 goto err_reset_lock;
3361 hclge_clear_reset_cause(hdev);
3363 ret = hclge_reset_prepare_up(hdev);
3365 goto err_reset_lock;
3367 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3369 goto err_reset_lock;
3373 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3377 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3381 hdev->last_reset_time = jiffies;
3382 hdev->reset_fail_cnt = 0;
3383 hdev->rst_stats.reset_done_cnt++;
3384 ae_dev->reset_type = HNAE3_NONE_RESET;
3385 del_timer(&hdev->reset_timer);
3392 if (hclge_reset_err_handle(hdev, is_timeout))
3393 hclge_reset_task_schedule(hdev);
3396 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3398 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3399 struct hclge_dev *hdev = ae_dev->priv;
3401 /* We might end up getting called broadly because of 2 below cases:
3402 * 1. Recoverable error was conveyed through APEI and only way to bring
3403 * normalcy is to reset.
3404 * 2. A new reset request from the stack due to timeout
3406 * For the first case,error event might not have ae handle available.
3407 * check if this is a new reset request and we are not here just because
3408 * last reset attempt did not succeed and watchdog hit us again. We will
3409 * know this if last reset request did not occur very recently (watchdog
3410 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3411 * In case of new request we reset the "reset level" to PF reset.
3412 * And if it is a repeat reset request of the most recent one then we
3413 * want to make sure we throttle the reset request. Therefore, we will
3414 * not allow it again before 3*HZ times.
3417 handle = &hdev->vport[0].nic;
3419 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3421 else if (hdev->default_reset_request)
3423 hclge_get_reset_level(hdev,
3424 &hdev->default_reset_request);
3425 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3426 hdev->reset_level = HNAE3_FUNC_RESET;
3428 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3431 /* request reset & schedule reset task */
3432 set_bit(hdev->reset_level, &hdev->reset_request);
3433 hclge_reset_task_schedule(hdev);
3435 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3436 hdev->reset_level++;
3439 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3440 enum hnae3_reset_type rst_type)
3442 struct hclge_dev *hdev = ae_dev->priv;
3444 set_bit(rst_type, &hdev->default_reset_request);
3447 static void hclge_reset_timer(struct timer_list *t)
3449 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3451 dev_info(&hdev->pdev->dev,
3452 "triggering global reset in reset timer\n");
3453 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3454 hclge_reset_event(hdev->pdev, NULL);
3457 static void hclge_reset_subtask(struct hclge_dev *hdev)
3459 /* check if there is any ongoing reset in the hardware. This status can
3460 * be checked from reset_pending. If there is then, we need to wait for
3461 * hardware to complete reset.
3462 * a. If we are able to figure out in reasonable time that hardware
3463 * has fully resetted then, we can proceed with driver, client
3465 * b. else, we can come back later to check this status so re-sched
3468 hdev->last_reset_time = jiffies;
3469 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3470 if (hdev->reset_type != HNAE3_NONE_RESET)
3473 /* check if we got any *new* reset requests to be honored */
3474 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3475 if (hdev->reset_type != HNAE3_NONE_RESET)
3476 hclge_do_reset(hdev);
3478 hdev->reset_type = HNAE3_NONE_RESET;
3481 static void hclge_reset_service_task(struct work_struct *work)
3483 struct hclge_dev *hdev =
3484 container_of(work, struct hclge_dev, rst_service_task);
3486 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3489 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3491 hclge_reset_subtask(hdev);
3493 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3496 static void hclge_mailbox_service_task(struct work_struct *work)
3498 struct hclge_dev *hdev =
3499 container_of(work, struct hclge_dev, mbx_service_task);
3501 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3504 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3506 hclge_mbx_handler(hdev);
3508 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3511 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3515 /* start from vport 1 for PF is always alive */
3516 for (i = 1; i < hdev->num_alloc_vport; i++) {
3517 struct hclge_vport *vport = &hdev->vport[i];
3519 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3520 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3522 /* If vf is not alive, set to default value */
3523 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3524 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3528 static void hclge_service_task(struct work_struct *work)
3530 struct hclge_dev *hdev =
3531 container_of(work, struct hclge_dev, service_task);
3533 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3534 hclge_update_stats_for_all(hdev);
3535 hdev->hw_stats.stats_timer = 0;
3538 hclge_update_port_info(hdev);
3539 hclge_update_link_status(hdev);
3540 hclge_update_vport_alive(hdev);
3541 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3542 hclge_rfs_filter_expire(hdev);
3543 hdev->fd_arfs_expire_timer = 0;
3545 hclge_service_complete(hdev);
3548 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3550 /* VF handle has no client */
3551 if (!handle->client)
3552 return container_of(handle, struct hclge_vport, nic);
3553 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3554 return container_of(handle, struct hclge_vport, roce);
3556 return container_of(handle, struct hclge_vport, nic);
3559 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3560 struct hnae3_vector_info *vector_info)
3562 struct hclge_vport *vport = hclge_get_vport(handle);
3563 struct hnae3_vector_info *vector = vector_info;
3564 struct hclge_dev *hdev = vport->back;
3568 vector_num = min(hdev->num_msi_left, vector_num);
3570 for (j = 0; j < vector_num; j++) {
3571 for (i = 1; i < hdev->num_msi; i++) {
3572 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3573 vector->vector = pci_irq_vector(hdev->pdev, i);
3574 vector->io_addr = hdev->hw.io_base +
3575 HCLGE_VECTOR_REG_BASE +
3576 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3578 HCLGE_VECTOR_VF_OFFSET;
3579 hdev->vector_status[i] = vport->vport_id;
3580 hdev->vector_irq[i] = vector->vector;
3589 hdev->num_msi_left -= alloc;
3590 hdev->num_msi_used += alloc;
3595 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3599 for (i = 0; i < hdev->num_msi; i++)
3600 if (vector == hdev->vector_irq[i])
3606 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3608 struct hclge_vport *vport = hclge_get_vport(handle);
3609 struct hclge_dev *hdev = vport->back;
3612 vector_id = hclge_get_vector_index(hdev, vector);
3613 if (vector_id < 0) {
3614 dev_err(&hdev->pdev->dev,
3615 "Get vector index fail. vector_id =%d\n", vector_id);
3619 hclge_free_vector(hdev, vector_id);
3624 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3626 return HCLGE_RSS_KEY_SIZE;
3629 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3631 return HCLGE_RSS_IND_TBL_SIZE;
3634 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3635 const u8 hfunc, const u8 *key)
3637 struct hclge_rss_config_cmd *req;
3638 struct hclge_desc desc;
3643 req = (struct hclge_rss_config_cmd *)desc.data;
3645 for (key_offset = 0; key_offset < 3; key_offset++) {
3646 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3649 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3650 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3652 if (key_offset == 2)
3654 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3656 key_size = HCLGE_RSS_HASH_KEY_NUM;
3658 memcpy(req->hash_key,
3659 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3661 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3663 dev_err(&hdev->pdev->dev,
3664 "Configure RSS config fail, status = %d\n",
3672 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3674 struct hclge_rss_indirection_table_cmd *req;
3675 struct hclge_desc desc;
3679 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3681 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3682 hclge_cmd_setup_basic_desc
3683 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3685 req->start_table_index =
3686 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3687 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3689 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3690 req->rss_result[j] =
3691 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3693 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3695 dev_err(&hdev->pdev->dev,
3696 "Configure rss indir table fail,status = %d\n",
3704 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3705 u16 *tc_size, u16 *tc_offset)
3707 struct hclge_rss_tc_mode_cmd *req;
3708 struct hclge_desc desc;
3712 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3713 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3715 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3718 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3719 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3720 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3721 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3722 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3724 req->rss_tc_mode[i] = cpu_to_le16(mode);
3727 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3729 dev_err(&hdev->pdev->dev,
3730 "Configure rss tc mode fail, status = %d\n", ret);
3735 static void hclge_get_rss_type(struct hclge_vport *vport)
3737 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3738 vport->rss_tuple_sets.ipv4_udp_en ||
3739 vport->rss_tuple_sets.ipv4_sctp_en ||
3740 vport->rss_tuple_sets.ipv6_tcp_en ||
3741 vport->rss_tuple_sets.ipv6_udp_en ||
3742 vport->rss_tuple_sets.ipv6_sctp_en)
3743 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3744 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3745 vport->rss_tuple_sets.ipv6_fragment_en)
3746 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3748 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3751 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3753 struct hclge_rss_input_tuple_cmd *req;
3754 struct hclge_desc desc;
3757 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3759 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3761 /* Get the tuple cfg from pf */
3762 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3763 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3764 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3765 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3766 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3767 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3768 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3769 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3770 hclge_get_rss_type(&hdev->vport[0]);
3771 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3773 dev_err(&hdev->pdev->dev,
3774 "Configure rss input fail, status = %d\n", ret);
3778 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3781 struct hclge_vport *vport = hclge_get_vport(handle);
3784 /* Get hash algorithm */
3786 switch (vport->rss_algo) {
3787 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3788 *hfunc = ETH_RSS_HASH_TOP;
3790 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3791 *hfunc = ETH_RSS_HASH_XOR;
3794 *hfunc = ETH_RSS_HASH_UNKNOWN;
3799 /* Get the RSS Key required by the user */
3801 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3803 /* Get indirect table */
3805 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3806 indir[i] = vport->rss_indirection_tbl[i];
3811 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3812 const u8 *key, const u8 hfunc)
3814 struct hclge_vport *vport = hclge_get_vport(handle);
3815 struct hclge_dev *hdev = vport->back;
3819 /* Set the RSS Hash Key if specififed by the user */
3822 case ETH_RSS_HASH_TOP:
3823 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3825 case ETH_RSS_HASH_XOR:
3826 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3828 case ETH_RSS_HASH_NO_CHANGE:
3829 hash_algo = vport->rss_algo;
3835 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3839 /* Update the shadow RSS key with user specified qids */
3840 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3841 vport->rss_algo = hash_algo;
3844 /* Update the shadow RSS table with user specified qids */
3845 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3846 vport->rss_indirection_tbl[i] = indir[i];
3848 /* Update the hardware */
3849 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3852 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3854 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3856 if (nfc->data & RXH_L4_B_2_3)
3857 hash_sets |= HCLGE_D_PORT_BIT;
3859 hash_sets &= ~HCLGE_D_PORT_BIT;
3861 if (nfc->data & RXH_IP_SRC)
3862 hash_sets |= HCLGE_S_IP_BIT;
3864 hash_sets &= ~HCLGE_S_IP_BIT;
3866 if (nfc->data & RXH_IP_DST)
3867 hash_sets |= HCLGE_D_IP_BIT;
3869 hash_sets &= ~HCLGE_D_IP_BIT;
3871 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3872 hash_sets |= HCLGE_V_TAG_BIT;
3877 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3878 struct ethtool_rxnfc *nfc)
3880 struct hclge_vport *vport = hclge_get_vport(handle);
3881 struct hclge_dev *hdev = vport->back;
3882 struct hclge_rss_input_tuple_cmd *req;
3883 struct hclge_desc desc;
3887 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3888 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3891 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3892 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3894 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3895 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3896 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3897 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3898 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3899 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3900 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3901 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3903 tuple_sets = hclge_get_rss_hash_bits(nfc);
3904 switch (nfc->flow_type) {
3906 req->ipv4_tcp_en = tuple_sets;
3909 req->ipv6_tcp_en = tuple_sets;
3912 req->ipv4_udp_en = tuple_sets;
3915 req->ipv6_udp_en = tuple_sets;
3918 req->ipv4_sctp_en = tuple_sets;
3921 if ((nfc->data & RXH_L4_B_0_1) ||
3922 (nfc->data & RXH_L4_B_2_3))
3925 req->ipv6_sctp_en = tuple_sets;
3928 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3931 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3937 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3939 dev_err(&hdev->pdev->dev,
3940 "Set rss tuple fail, status = %d\n", ret);
3944 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3945 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3946 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3947 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3948 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3949 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3950 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3951 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3952 hclge_get_rss_type(vport);
3956 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3957 struct ethtool_rxnfc *nfc)
3959 struct hclge_vport *vport = hclge_get_vport(handle);
3964 switch (nfc->flow_type) {
3966 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3969 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3972 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3975 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3978 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3981 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3985 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3994 if (tuple_sets & HCLGE_D_PORT_BIT)
3995 nfc->data |= RXH_L4_B_2_3;
3996 if (tuple_sets & HCLGE_S_PORT_BIT)
3997 nfc->data |= RXH_L4_B_0_1;
3998 if (tuple_sets & HCLGE_D_IP_BIT)
3999 nfc->data |= RXH_IP_DST;
4000 if (tuple_sets & HCLGE_S_IP_BIT)
4001 nfc->data |= RXH_IP_SRC;
4006 static int hclge_get_tc_size(struct hnae3_handle *handle)
4008 struct hclge_vport *vport = hclge_get_vport(handle);
4009 struct hclge_dev *hdev = vport->back;
4011 return hdev->rss_size_max;
4014 int hclge_rss_init_hw(struct hclge_dev *hdev)
4016 struct hclge_vport *vport = hdev->vport;
4017 u8 *rss_indir = vport[0].rss_indirection_tbl;
4018 u16 rss_size = vport[0].alloc_rss_size;
4019 u8 *key = vport[0].rss_hash_key;
4020 u8 hfunc = vport[0].rss_algo;
4021 u16 tc_offset[HCLGE_MAX_TC_NUM];
4022 u16 tc_valid[HCLGE_MAX_TC_NUM];
4023 u16 tc_size[HCLGE_MAX_TC_NUM];
4027 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4031 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4035 ret = hclge_set_rss_input_tuple(hdev);
4039 /* Each TC have the same queue size, and tc_size set to hardware is
4040 * the log2 of roundup power of two of rss_size, the acutal queue
4041 * size is limited by indirection table.
4043 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4044 dev_err(&hdev->pdev->dev,
4045 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4050 roundup_size = roundup_pow_of_two(rss_size);
4051 roundup_size = ilog2(roundup_size);
4053 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4056 if (!(hdev->hw_tc_map & BIT(i)))
4060 tc_size[i] = roundup_size;
4061 tc_offset[i] = rss_size * i;
4064 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4067 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4069 struct hclge_vport *vport = hdev->vport;
4072 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4073 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4074 vport[j].rss_indirection_tbl[i] =
4075 i % vport[j].alloc_rss_size;
4079 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4081 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4082 struct hclge_vport *vport = hdev->vport;
4084 if (hdev->pdev->revision >= 0x21)
4085 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4087 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4088 vport[i].rss_tuple_sets.ipv4_tcp_en =
4089 HCLGE_RSS_INPUT_TUPLE_OTHER;
4090 vport[i].rss_tuple_sets.ipv4_udp_en =
4091 HCLGE_RSS_INPUT_TUPLE_OTHER;
4092 vport[i].rss_tuple_sets.ipv4_sctp_en =
4093 HCLGE_RSS_INPUT_TUPLE_SCTP;
4094 vport[i].rss_tuple_sets.ipv4_fragment_en =
4095 HCLGE_RSS_INPUT_TUPLE_OTHER;
4096 vport[i].rss_tuple_sets.ipv6_tcp_en =
4097 HCLGE_RSS_INPUT_TUPLE_OTHER;
4098 vport[i].rss_tuple_sets.ipv6_udp_en =
4099 HCLGE_RSS_INPUT_TUPLE_OTHER;
4100 vport[i].rss_tuple_sets.ipv6_sctp_en =
4101 HCLGE_RSS_INPUT_TUPLE_SCTP;
4102 vport[i].rss_tuple_sets.ipv6_fragment_en =
4103 HCLGE_RSS_INPUT_TUPLE_OTHER;
4105 vport[i].rss_algo = rss_algo;
4107 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4108 HCLGE_RSS_KEY_SIZE);
4111 hclge_rss_indir_init_cfg(hdev);
4114 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4115 int vector_id, bool en,
4116 struct hnae3_ring_chain_node *ring_chain)
4118 struct hclge_dev *hdev = vport->back;
4119 struct hnae3_ring_chain_node *node;
4120 struct hclge_desc desc;
4121 struct hclge_ctrl_vector_chain_cmd *req
4122 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4123 enum hclge_cmd_status status;
4124 enum hclge_opcode_type op;
4125 u16 tqp_type_and_id;
4128 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4129 hclge_cmd_setup_basic_desc(&desc, op, false);
4130 req->int_vector_id = vector_id;
4133 for (node = ring_chain; node; node = node->next) {
4134 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4135 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4137 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4138 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4139 HCLGE_TQP_ID_S, node->tqp_index);
4140 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4142 hnae3_get_field(node->int_gl_idx,
4143 HNAE3_RING_GL_IDX_M,
4144 HNAE3_RING_GL_IDX_S));
4145 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4146 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4147 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4148 req->vfid = vport->vport_id;
4150 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4152 dev_err(&hdev->pdev->dev,
4153 "Map TQP fail, status is %d.\n",
4159 hclge_cmd_setup_basic_desc(&desc,
4162 req->int_vector_id = vector_id;
4167 req->int_cause_num = i;
4168 req->vfid = vport->vport_id;
4169 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4171 dev_err(&hdev->pdev->dev,
4172 "Map TQP fail, status is %d.\n", status);
4180 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4182 struct hnae3_ring_chain_node *ring_chain)
4184 struct hclge_vport *vport = hclge_get_vport(handle);
4185 struct hclge_dev *hdev = vport->back;
4188 vector_id = hclge_get_vector_index(hdev, vector);
4189 if (vector_id < 0) {
4190 dev_err(&hdev->pdev->dev,
4191 "Get vector index fail. vector_id =%d\n", vector_id);
4195 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4198 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4200 struct hnae3_ring_chain_node *ring_chain)
4202 struct hclge_vport *vport = hclge_get_vport(handle);
4203 struct hclge_dev *hdev = vport->back;
4206 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4209 vector_id = hclge_get_vector_index(hdev, vector);
4210 if (vector_id < 0) {
4211 dev_err(&handle->pdev->dev,
4212 "Get vector index fail. ret =%d\n", vector_id);
4216 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4218 dev_err(&handle->pdev->dev,
4219 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4226 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4227 struct hclge_promisc_param *param)
4229 struct hclge_promisc_cfg_cmd *req;
4230 struct hclge_desc desc;
4233 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4235 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4236 req->vf_id = param->vf_id;
4238 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4239 * pdev revision(0x20), new revision support them. The
4240 * value of this two fields will not return error when driver
4241 * send command to fireware in revision(0x20).
4243 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4244 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4246 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4248 dev_err(&hdev->pdev->dev,
4249 "Set promisc mode fail, status is %d.\n", ret);
4254 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4255 bool en_mc, bool en_bc, int vport_id)
4260 memset(param, 0, sizeof(struct hclge_promisc_param));
4262 param->enable = HCLGE_PROMISC_EN_UC;
4264 param->enable |= HCLGE_PROMISC_EN_MC;
4266 param->enable |= HCLGE_PROMISC_EN_BC;
4267 param->vf_id = vport_id;
4270 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4273 struct hclge_vport *vport = hclge_get_vport(handle);
4274 struct hclge_dev *hdev = vport->back;
4275 struct hclge_promisc_param param;
4276 bool en_bc_pmc = true;
4278 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4279 * always bypassed. So broadcast promisc should be disabled until
4280 * user enable promisc mode
4282 if (handle->pdev->revision == 0x20)
4283 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4285 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4287 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4290 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4292 struct hclge_get_fd_mode_cmd *req;
4293 struct hclge_desc desc;
4296 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4298 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4300 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4302 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4306 *fd_mode = req->mode;
4311 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4312 u32 *stage1_entry_num,
4313 u32 *stage2_entry_num,
4314 u16 *stage1_counter_num,
4315 u16 *stage2_counter_num)
4317 struct hclge_get_fd_allocation_cmd *req;
4318 struct hclge_desc desc;
4321 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4323 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4325 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4327 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4332 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4333 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4334 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4335 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4340 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4342 struct hclge_set_fd_key_config_cmd *req;
4343 struct hclge_fd_key_cfg *stage;
4344 struct hclge_desc desc;
4347 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4349 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4350 stage = &hdev->fd_cfg.key_cfg[stage_num];
4351 req->stage = stage_num;
4352 req->key_select = stage->key_sel;
4353 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4354 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4355 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4356 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4357 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4358 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4360 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4362 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4367 static int hclge_init_fd_config(struct hclge_dev *hdev)
4369 #define LOW_2_WORDS 0x03
4370 struct hclge_fd_key_cfg *key_cfg;
4373 if (!hnae3_dev_fd_supported(hdev))
4376 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4380 switch (hdev->fd_cfg.fd_mode) {
4381 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4382 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4384 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4385 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4388 dev_err(&hdev->pdev->dev,
4389 "Unsupported flow director mode %d\n",
4390 hdev->fd_cfg.fd_mode);
4394 hdev->fd_cfg.proto_support =
4395 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4396 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4397 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4398 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4399 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4400 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4401 key_cfg->outer_sipv6_word_en = 0;
4402 key_cfg->outer_dipv6_word_en = 0;
4404 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4405 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4406 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4407 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4409 /* If use max 400bit key, we can support tuples for ether type */
4410 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4411 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4412 key_cfg->tuple_active |=
4413 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4416 /* roce_type is used to filter roce frames
4417 * dst_vport is used to specify the rule
4419 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4421 ret = hclge_get_fd_allocation(hdev,
4422 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4423 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4424 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4425 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4429 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4432 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4433 int loc, u8 *key, bool is_add)
4435 struct hclge_fd_tcam_config_1_cmd *req1;
4436 struct hclge_fd_tcam_config_2_cmd *req2;
4437 struct hclge_fd_tcam_config_3_cmd *req3;
4438 struct hclge_desc desc[3];
4441 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4442 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4443 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4444 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4445 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4447 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4448 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4449 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4451 req1->stage = stage;
4452 req1->xy_sel = sel_x ? 1 : 0;
4453 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4454 req1->index = cpu_to_le32(loc);
4455 req1->entry_vld = sel_x ? is_add : 0;
4458 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4459 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4460 sizeof(req2->tcam_data));
4461 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4462 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4465 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4467 dev_err(&hdev->pdev->dev,
4468 "config tcam key fail, ret=%d\n",
4474 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4475 struct hclge_fd_ad_data *action)
4477 struct hclge_fd_ad_config_cmd *req;
4478 struct hclge_desc desc;
4482 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4484 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4485 req->index = cpu_to_le32(loc);
4488 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4489 action->write_rule_id_to_bd);
4490 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4493 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4494 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4495 action->forward_to_direct_queue);
4496 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4498 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4499 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4500 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4501 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4502 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4503 action->counter_id);
4505 req->ad_data = cpu_to_le64(ad_data);
4506 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4508 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4513 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4514 struct hclge_fd_rule *rule)
4516 u16 tmp_x_s, tmp_y_s;
4517 u32 tmp_x_l, tmp_y_l;
4520 if (rule->unused_tuple & tuple_bit)
4523 switch (tuple_bit) {
4526 case BIT(INNER_DST_MAC):
4527 for (i = 0; i < 6; i++) {
4528 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4529 rule->tuples_mask.dst_mac[i]);
4530 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4531 rule->tuples_mask.dst_mac[i]);
4535 case BIT(INNER_SRC_MAC):
4536 for (i = 0; i < 6; i++) {
4537 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4538 rule->tuples.src_mac[i]);
4539 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4540 rule->tuples.src_mac[i]);
4544 case BIT(INNER_VLAN_TAG_FST):
4545 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4546 rule->tuples_mask.vlan_tag1);
4547 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4548 rule->tuples_mask.vlan_tag1);
4549 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4550 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4553 case BIT(INNER_ETH_TYPE):
4554 calc_x(tmp_x_s, rule->tuples.ether_proto,
4555 rule->tuples_mask.ether_proto);
4556 calc_y(tmp_y_s, rule->tuples.ether_proto,
4557 rule->tuples_mask.ether_proto);
4558 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4559 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4562 case BIT(INNER_IP_TOS):
4563 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4564 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4567 case BIT(INNER_IP_PROTO):
4568 calc_x(*key_x, rule->tuples.ip_proto,
4569 rule->tuples_mask.ip_proto);
4570 calc_y(*key_y, rule->tuples.ip_proto,
4571 rule->tuples_mask.ip_proto);
4574 case BIT(INNER_SRC_IP):
4575 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4576 rule->tuples_mask.src_ip[3]);
4577 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4578 rule->tuples_mask.src_ip[3]);
4579 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4580 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4583 case BIT(INNER_DST_IP):
4584 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4585 rule->tuples_mask.dst_ip[3]);
4586 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4587 rule->tuples_mask.dst_ip[3]);
4588 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4589 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4592 case BIT(INNER_SRC_PORT):
4593 calc_x(tmp_x_s, rule->tuples.src_port,
4594 rule->tuples_mask.src_port);
4595 calc_y(tmp_y_s, rule->tuples.src_port,
4596 rule->tuples_mask.src_port);
4597 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4598 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4601 case BIT(INNER_DST_PORT):
4602 calc_x(tmp_x_s, rule->tuples.dst_port,
4603 rule->tuples_mask.dst_port);
4604 calc_y(tmp_y_s, rule->tuples.dst_port,
4605 rule->tuples_mask.dst_port);
4606 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4607 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4615 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4616 u8 vf_id, u8 network_port_id)
4618 u32 port_number = 0;
4620 if (port_type == HOST_PORT) {
4621 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4623 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4625 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4627 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4628 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4629 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4635 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4636 __le32 *key_x, __le32 *key_y,
4637 struct hclge_fd_rule *rule)
4639 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4640 u8 cur_pos = 0, tuple_size, shift_bits;
4643 for (i = 0; i < MAX_META_DATA; i++) {
4644 tuple_size = meta_data_key_info[i].key_length;
4645 tuple_bit = key_cfg->meta_data_active & BIT(i);
4647 switch (tuple_bit) {
4648 case BIT(ROCE_TYPE):
4649 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4650 cur_pos += tuple_size;
4652 case BIT(DST_VPORT):
4653 port_number = hclge_get_port_number(HOST_PORT, 0,
4655 hnae3_set_field(meta_data,
4656 GENMASK(cur_pos + tuple_size, cur_pos),
4657 cur_pos, port_number);
4658 cur_pos += tuple_size;
4665 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4666 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4667 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4669 *key_x = cpu_to_le32(tmp_x << shift_bits);
4670 *key_y = cpu_to_le32(tmp_y << shift_bits);
4673 /* A complete key is combined with meta data key and tuple key.
4674 * Meta data key is stored at the MSB region, and tuple key is stored at
4675 * the LSB region, unused bits will be filled 0.
4677 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4678 struct hclge_fd_rule *rule)
4680 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4681 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4682 u8 *cur_key_x, *cur_key_y;
4683 int i, ret, tuple_size;
4684 u8 meta_data_region;
4686 memset(key_x, 0, sizeof(key_x));
4687 memset(key_y, 0, sizeof(key_y));
4691 for (i = 0 ; i < MAX_TUPLE; i++) {
4695 tuple_size = tuple_key_info[i].key_length / 8;
4696 check_tuple = key_cfg->tuple_active & BIT(i);
4698 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4701 cur_key_x += tuple_size;
4702 cur_key_y += tuple_size;
4706 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4707 MAX_META_DATA_LENGTH / 8;
4709 hclge_fd_convert_meta_data(key_cfg,
4710 (__le32 *)(key_x + meta_data_region),
4711 (__le32 *)(key_y + meta_data_region),
4714 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4717 dev_err(&hdev->pdev->dev,
4718 "fd key_y config fail, loc=%d, ret=%d\n",
4719 rule->queue_id, ret);
4723 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4726 dev_err(&hdev->pdev->dev,
4727 "fd key_x config fail, loc=%d, ret=%d\n",
4728 rule->queue_id, ret);
4732 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4733 struct hclge_fd_rule *rule)
4735 struct hclge_fd_ad_data ad_data;
4737 ad_data.ad_id = rule->location;
4739 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4740 ad_data.drop_packet = true;
4741 ad_data.forward_to_direct_queue = false;
4742 ad_data.queue_id = 0;
4744 ad_data.drop_packet = false;
4745 ad_data.forward_to_direct_queue = true;
4746 ad_data.queue_id = rule->queue_id;
4749 ad_data.use_counter = false;
4750 ad_data.counter_id = 0;
4752 ad_data.use_next_stage = false;
4753 ad_data.next_input_key = 0;
4755 ad_data.write_rule_id_to_bd = true;
4756 ad_data.rule_id = rule->location;
4758 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4761 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4762 struct ethtool_rx_flow_spec *fs, u32 *unused)
4764 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4765 struct ethtool_usrip4_spec *usr_ip4_spec;
4766 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4767 struct ethtool_usrip6_spec *usr_ip6_spec;
4768 struct ethhdr *ether_spec;
4770 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4773 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4776 if ((fs->flow_type & FLOW_EXT) &&
4777 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4778 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4782 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4786 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4787 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4789 if (!tcp_ip4_spec->ip4src)
4790 *unused |= BIT(INNER_SRC_IP);
4792 if (!tcp_ip4_spec->ip4dst)
4793 *unused |= BIT(INNER_DST_IP);
4795 if (!tcp_ip4_spec->psrc)
4796 *unused |= BIT(INNER_SRC_PORT);
4798 if (!tcp_ip4_spec->pdst)
4799 *unused |= BIT(INNER_DST_PORT);
4801 if (!tcp_ip4_spec->tos)
4802 *unused |= BIT(INNER_IP_TOS);
4806 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4807 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4808 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4810 if (!usr_ip4_spec->ip4src)
4811 *unused |= BIT(INNER_SRC_IP);
4813 if (!usr_ip4_spec->ip4dst)
4814 *unused |= BIT(INNER_DST_IP);
4816 if (!usr_ip4_spec->tos)
4817 *unused |= BIT(INNER_IP_TOS);
4819 if (!usr_ip4_spec->proto)
4820 *unused |= BIT(INNER_IP_PROTO);
4822 if (usr_ip4_spec->l4_4_bytes)
4825 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4832 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4833 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4836 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4837 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4838 *unused |= BIT(INNER_SRC_IP);
4840 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4841 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4842 *unused |= BIT(INNER_DST_IP);
4844 if (!tcp_ip6_spec->psrc)
4845 *unused |= BIT(INNER_SRC_PORT);
4847 if (!tcp_ip6_spec->pdst)
4848 *unused |= BIT(INNER_DST_PORT);
4850 if (tcp_ip6_spec->tclass)
4854 case IPV6_USER_FLOW:
4855 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4856 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4857 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4858 BIT(INNER_DST_PORT);
4860 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4861 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4862 *unused |= BIT(INNER_SRC_IP);
4864 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4865 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4866 *unused |= BIT(INNER_DST_IP);
4868 if (!usr_ip6_spec->l4_proto)
4869 *unused |= BIT(INNER_IP_PROTO);
4871 if (usr_ip6_spec->tclass)
4874 if (usr_ip6_spec->l4_4_bytes)
4879 ether_spec = &fs->h_u.ether_spec;
4880 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4881 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4882 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4884 if (is_zero_ether_addr(ether_spec->h_source))
4885 *unused |= BIT(INNER_SRC_MAC);
4887 if (is_zero_ether_addr(ether_spec->h_dest))
4888 *unused |= BIT(INNER_DST_MAC);
4890 if (!ether_spec->h_proto)
4891 *unused |= BIT(INNER_ETH_TYPE);
4898 if ((fs->flow_type & FLOW_EXT)) {
4899 if (fs->h_ext.vlan_etype)
4901 if (!fs->h_ext.vlan_tci)
4902 *unused |= BIT(INNER_VLAN_TAG_FST);
4904 if (fs->m_ext.vlan_tci) {
4905 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4909 *unused |= BIT(INNER_VLAN_TAG_FST);
4912 if (fs->flow_type & FLOW_MAC_EXT) {
4913 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4916 if (is_zero_ether_addr(fs->h_ext.h_dest))
4917 *unused |= BIT(INNER_DST_MAC);
4919 *unused &= ~(BIT(INNER_DST_MAC));
4925 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4927 struct hclge_fd_rule *rule = NULL;
4928 struct hlist_node *node2;
4930 spin_lock_bh(&hdev->fd_rule_lock);
4931 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4932 if (rule->location >= location)
4936 spin_unlock_bh(&hdev->fd_rule_lock);
4938 return rule && rule->location == location;
4941 /* make sure being called after lock up with fd_rule_lock */
4942 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4943 struct hclge_fd_rule *new_rule,
4947 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4948 struct hlist_node *node2;
4950 if (is_add && !new_rule)
4953 hlist_for_each_entry_safe(rule, node2,
4954 &hdev->fd_rule_list, rule_node) {
4955 if (rule->location >= location)
4960 if (rule && rule->location == location) {
4961 hlist_del(&rule->rule_node);
4963 hdev->hclge_fd_rule_num--;
4966 if (!hdev->hclge_fd_rule_num)
4967 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4968 clear_bit(location, hdev->fd_bmap);
4972 } else if (!is_add) {
4973 dev_err(&hdev->pdev->dev,
4974 "delete fail, rule %d is inexistent\n",
4979 INIT_HLIST_NODE(&new_rule->rule_node);
4982 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4984 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4986 set_bit(location, hdev->fd_bmap);
4987 hdev->hclge_fd_rule_num++;
4988 hdev->fd_active_type = new_rule->rule_type;
4993 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4994 struct ethtool_rx_flow_spec *fs,
4995 struct hclge_fd_rule *rule)
4997 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4999 switch (flow_type) {
5003 rule->tuples.src_ip[3] =
5004 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5005 rule->tuples_mask.src_ip[3] =
5006 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5008 rule->tuples.dst_ip[3] =
5009 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5010 rule->tuples_mask.dst_ip[3] =
5011 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5013 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5014 rule->tuples_mask.src_port =
5015 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5017 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5018 rule->tuples_mask.dst_port =
5019 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5021 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5022 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5024 rule->tuples.ether_proto = ETH_P_IP;
5025 rule->tuples_mask.ether_proto = 0xFFFF;
5029 rule->tuples.src_ip[3] =
5030 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5031 rule->tuples_mask.src_ip[3] =
5032 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5034 rule->tuples.dst_ip[3] =
5035 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5036 rule->tuples_mask.dst_ip[3] =
5037 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5039 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5040 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5042 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5043 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5045 rule->tuples.ether_proto = ETH_P_IP;
5046 rule->tuples_mask.ether_proto = 0xFFFF;
5052 be32_to_cpu_array(rule->tuples.src_ip,
5053 fs->h_u.tcp_ip6_spec.ip6src, 4);
5054 be32_to_cpu_array(rule->tuples_mask.src_ip,
5055 fs->m_u.tcp_ip6_spec.ip6src, 4);
5057 be32_to_cpu_array(rule->tuples.dst_ip,
5058 fs->h_u.tcp_ip6_spec.ip6dst, 4);
5059 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5060 fs->m_u.tcp_ip6_spec.ip6dst, 4);
5062 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5063 rule->tuples_mask.src_port =
5064 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5066 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5067 rule->tuples_mask.dst_port =
5068 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5070 rule->tuples.ether_proto = ETH_P_IPV6;
5071 rule->tuples_mask.ether_proto = 0xFFFF;
5074 case IPV6_USER_FLOW:
5075 be32_to_cpu_array(rule->tuples.src_ip,
5076 fs->h_u.usr_ip6_spec.ip6src, 4);
5077 be32_to_cpu_array(rule->tuples_mask.src_ip,
5078 fs->m_u.usr_ip6_spec.ip6src, 4);
5080 be32_to_cpu_array(rule->tuples.dst_ip,
5081 fs->h_u.usr_ip6_spec.ip6dst, 4);
5082 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5083 fs->m_u.usr_ip6_spec.ip6dst, 4);
5085 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5086 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5088 rule->tuples.ether_proto = ETH_P_IPV6;
5089 rule->tuples_mask.ether_proto = 0xFFFF;
5093 ether_addr_copy(rule->tuples.src_mac,
5094 fs->h_u.ether_spec.h_source);
5095 ether_addr_copy(rule->tuples_mask.src_mac,
5096 fs->m_u.ether_spec.h_source);
5098 ether_addr_copy(rule->tuples.dst_mac,
5099 fs->h_u.ether_spec.h_dest);
5100 ether_addr_copy(rule->tuples_mask.dst_mac,
5101 fs->m_u.ether_spec.h_dest);
5103 rule->tuples.ether_proto =
5104 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5105 rule->tuples_mask.ether_proto =
5106 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5113 switch (flow_type) {
5116 rule->tuples.ip_proto = IPPROTO_SCTP;
5117 rule->tuples_mask.ip_proto = 0xFF;
5121 rule->tuples.ip_proto = IPPROTO_TCP;
5122 rule->tuples_mask.ip_proto = 0xFF;
5126 rule->tuples.ip_proto = IPPROTO_UDP;
5127 rule->tuples_mask.ip_proto = 0xFF;
5133 if ((fs->flow_type & FLOW_EXT)) {
5134 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5135 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5138 if (fs->flow_type & FLOW_MAC_EXT) {
5139 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5140 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5146 /* make sure being called after lock up with fd_rule_lock */
5147 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5148 struct hclge_fd_rule *rule)
5153 dev_err(&hdev->pdev->dev,
5154 "The flow director rule is NULL\n");
5158 /* it will never fail here, so needn't to check return value */
5159 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5161 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5165 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5172 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5176 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5177 struct ethtool_rxnfc *cmd)
5179 struct hclge_vport *vport = hclge_get_vport(handle);
5180 struct hclge_dev *hdev = vport->back;
5181 u16 dst_vport_id = 0, q_index = 0;
5182 struct ethtool_rx_flow_spec *fs;
5183 struct hclge_fd_rule *rule;
5188 if (!hnae3_dev_fd_supported(hdev))
5192 dev_warn(&hdev->pdev->dev,
5193 "Please enable flow director first\n");
5197 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5199 ret = hclge_fd_check_spec(hdev, fs, &unused);
5201 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5205 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5206 action = HCLGE_FD_ACTION_DROP_PACKET;
5208 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5209 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5212 if (vf > hdev->num_req_vfs) {
5213 dev_err(&hdev->pdev->dev,
5214 "Error: vf id (%d) > max vf num (%d)\n",
5215 vf, hdev->num_req_vfs);
5219 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5220 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5223 dev_err(&hdev->pdev->dev,
5224 "Error: queue id (%d) > max tqp num (%d)\n",
5229 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5233 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5237 ret = hclge_fd_get_tuple(hdev, fs, rule);
5243 rule->flow_type = fs->flow_type;
5245 rule->location = fs->location;
5246 rule->unused_tuple = unused;
5247 rule->vf_id = dst_vport_id;
5248 rule->queue_id = q_index;
5249 rule->action = action;
5250 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5252 /* to avoid rule conflict, when user configure rule by ethtool,
5253 * we need to clear all arfs rules
5255 hclge_clear_arfs_rules(handle);
5257 spin_lock_bh(&hdev->fd_rule_lock);
5258 ret = hclge_fd_config_rule(hdev, rule);
5260 spin_unlock_bh(&hdev->fd_rule_lock);
5265 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5266 struct ethtool_rxnfc *cmd)
5268 struct hclge_vport *vport = hclge_get_vport(handle);
5269 struct hclge_dev *hdev = vport->back;
5270 struct ethtool_rx_flow_spec *fs;
5273 if (!hnae3_dev_fd_supported(hdev))
5276 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5278 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5281 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5282 dev_err(&hdev->pdev->dev,
5283 "Delete fail, rule %d is inexistent\n",
5288 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5289 fs->location, NULL, false);
5293 spin_lock_bh(&hdev->fd_rule_lock);
5294 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5296 spin_unlock_bh(&hdev->fd_rule_lock);
5301 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5304 struct hclge_vport *vport = hclge_get_vport(handle);
5305 struct hclge_dev *hdev = vport->back;
5306 struct hclge_fd_rule *rule;
5307 struct hlist_node *node;
5310 if (!hnae3_dev_fd_supported(hdev))
5313 spin_lock_bh(&hdev->fd_rule_lock);
5314 for_each_set_bit(location, hdev->fd_bmap,
5315 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5316 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5320 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5322 hlist_del(&rule->rule_node);
5325 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5326 hdev->hclge_fd_rule_num = 0;
5327 bitmap_zero(hdev->fd_bmap,
5328 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5331 spin_unlock_bh(&hdev->fd_rule_lock);
5334 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5336 struct hclge_vport *vport = hclge_get_vport(handle);
5337 struct hclge_dev *hdev = vport->back;
5338 struct hclge_fd_rule *rule;
5339 struct hlist_node *node;
5342 /* Return ok here, because reset error handling will check this
5343 * return value. If error is returned here, the reset process will
5346 if (!hnae3_dev_fd_supported(hdev))
5349 /* if fd is disabled, should not restore it when reset */
5353 spin_lock_bh(&hdev->fd_rule_lock);
5354 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5355 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5357 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5360 dev_warn(&hdev->pdev->dev,
5361 "Restore rule %d failed, remove it\n",
5363 clear_bit(rule->location, hdev->fd_bmap);
5364 hlist_del(&rule->rule_node);
5366 hdev->hclge_fd_rule_num--;
5370 if (hdev->hclge_fd_rule_num)
5371 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5373 spin_unlock_bh(&hdev->fd_rule_lock);
5378 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5379 struct ethtool_rxnfc *cmd)
5381 struct hclge_vport *vport = hclge_get_vport(handle);
5382 struct hclge_dev *hdev = vport->back;
5384 if (!hnae3_dev_fd_supported(hdev))
5387 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5388 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5393 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5394 struct ethtool_rxnfc *cmd)
5396 struct hclge_vport *vport = hclge_get_vport(handle);
5397 struct hclge_fd_rule *rule = NULL;
5398 struct hclge_dev *hdev = vport->back;
5399 struct ethtool_rx_flow_spec *fs;
5400 struct hlist_node *node2;
5402 if (!hnae3_dev_fd_supported(hdev))
5405 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5407 spin_lock_bh(&hdev->fd_rule_lock);
5409 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5410 if (rule->location >= fs->location)
5414 if (!rule || fs->location != rule->location) {
5415 spin_unlock_bh(&hdev->fd_rule_lock);
5420 fs->flow_type = rule->flow_type;
5421 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5425 fs->h_u.tcp_ip4_spec.ip4src =
5426 cpu_to_be32(rule->tuples.src_ip[3]);
5427 fs->m_u.tcp_ip4_spec.ip4src =
5428 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5429 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5431 fs->h_u.tcp_ip4_spec.ip4dst =
5432 cpu_to_be32(rule->tuples.dst_ip[3]);
5433 fs->m_u.tcp_ip4_spec.ip4dst =
5434 rule->unused_tuple & BIT(INNER_DST_IP) ?
5435 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5437 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5438 fs->m_u.tcp_ip4_spec.psrc =
5439 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5440 0 : cpu_to_be16(rule->tuples_mask.src_port);
5442 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5443 fs->m_u.tcp_ip4_spec.pdst =
5444 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5445 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5447 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5448 fs->m_u.tcp_ip4_spec.tos =
5449 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5450 0 : rule->tuples_mask.ip_tos;
5454 fs->h_u.usr_ip4_spec.ip4src =
5455 cpu_to_be32(rule->tuples.src_ip[3]);
5456 fs->m_u.tcp_ip4_spec.ip4src =
5457 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5458 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5460 fs->h_u.usr_ip4_spec.ip4dst =
5461 cpu_to_be32(rule->tuples.dst_ip[3]);
5462 fs->m_u.usr_ip4_spec.ip4dst =
5463 rule->unused_tuple & BIT(INNER_DST_IP) ?
5464 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5466 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5467 fs->m_u.usr_ip4_spec.tos =
5468 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5469 0 : rule->tuples_mask.ip_tos;
5471 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5472 fs->m_u.usr_ip4_spec.proto =
5473 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5474 0 : rule->tuples_mask.ip_proto;
5476 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5482 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5483 rule->tuples.src_ip, 4);
5484 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5485 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5487 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5488 rule->tuples_mask.src_ip, 4);
5490 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5491 rule->tuples.dst_ip, 4);
5492 if (rule->unused_tuple & BIT(INNER_DST_IP))
5493 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5495 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5496 rule->tuples_mask.dst_ip, 4);
5498 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5499 fs->m_u.tcp_ip6_spec.psrc =
5500 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5501 0 : cpu_to_be16(rule->tuples_mask.src_port);
5503 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5504 fs->m_u.tcp_ip6_spec.pdst =
5505 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5506 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5509 case IPV6_USER_FLOW:
5510 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5511 rule->tuples.src_ip, 4);
5512 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5513 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5515 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5516 rule->tuples_mask.src_ip, 4);
5518 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5519 rule->tuples.dst_ip, 4);
5520 if (rule->unused_tuple & BIT(INNER_DST_IP))
5521 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5523 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5524 rule->tuples_mask.dst_ip, 4);
5526 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5527 fs->m_u.usr_ip6_spec.l4_proto =
5528 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5529 0 : rule->tuples_mask.ip_proto;
5533 ether_addr_copy(fs->h_u.ether_spec.h_source,
5534 rule->tuples.src_mac);
5535 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5536 eth_zero_addr(fs->m_u.ether_spec.h_source);
5538 ether_addr_copy(fs->m_u.ether_spec.h_source,
5539 rule->tuples_mask.src_mac);
5541 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5542 rule->tuples.dst_mac);
5543 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5544 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5546 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5547 rule->tuples_mask.dst_mac);
5549 fs->h_u.ether_spec.h_proto =
5550 cpu_to_be16(rule->tuples.ether_proto);
5551 fs->m_u.ether_spec.h_proto =
5552 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5553 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5557 spin_unlock_bh(&hdev->fd_rule_lock);
5561 if (fs->flow_type & FLOW_EXT) {
5562 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5563 fs->m_ext.vlan_tci =
5564 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5565 cpu_to_be16(VLAN_VID_MASK) :
5566 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5569 if (fs->flow_type & FLOW_MAC_EXT) {
5570 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5571 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5572 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5574 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5575 rule->tuples_mask.dst_mac);
5578 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5579 fs->ring_cookie = RX_CLS_FLOW_DISC;
5583 fs->ring_cookie = rule->queue_id;
5584 vf_id = rule->vf_id;
5585 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5586 fs->ring_cookie |= vf_id;
5589 spin_unlock_bh(&hdev->fd_rule_lock);
5594 static int hclge_get_all_rules(struct hnae3_handle *handle,
5595 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5597 struct hclge_vport *vport = hclge_get_vport(handle);
5598 struct hclge_dev *hdev = vport->back;
5599 struct hclge_fd_rule *rule;
5600 struct hlist_node *node2;
5603 if (!hnae3_dev_fd_supported(hdev))
5606 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5608 spin_lock_bh(&hdev->fd_rule_lock);
5609 hlist_for_each_entry_safe(rule, node2,
5610 &hdev->fd_rule_list, rule_node) {
5611 if (cnt == cmd->rule_cnt) {
5612 spin_unlock_bh(&hdev->fd_rule_lock);
5616 rule_locs[cnt] = rule->location;
5620 spin_unlock_bh(&hdev->fd_rule_lock);
5622 cmd->rule_cnt = cnt;
5627 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5628 struct hclge_fd_rule_tuples *tuples)
5630 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5631 tuples->ip_proto = fkeys->basic.ip_proto;
5632 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5634 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5635 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5636 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5638 memcpy(tuples->src_ip,
5639 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5640 sizeof(tuples->src_ip));
5641 memcpy(tuples->dst_ip,
5642 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5643 sizeof(tuples->dst_ip));
5647 /* traverse all rules, check whether an existed rule has the same tuples */
5648 static struct hclge_fd_rule *
5649 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5650 const struct hclge_fd_rule_tuples *tuples)
5652 struct hclge_fd_rule *rule = NULL;
5653 struct hlist_node *node;
5655 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5656 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5663 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5664 struct hclge_fd_rule *rule)
5666 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5667 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5668 BIT(INNER_SRC_PORT);
5671 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5672 if (tuples->ether_proto == ETH_P_IP) {
5673 if (tuples->ip_proto == IPPROTO_TCP)
5674 rule->flow_type = TCP_V4_FLOW;
5676 rule->flow_type = UDP_V4_FLOW;
5678 if (tuples->ip_proto == IPPROTO_TCP)
5679 rule->flow_type = TCP_V6_FLOW;
5681 rule->flow_type = UDP_V6_FLOW;
5683 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5684 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5687 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5688 u16 flow_id, struct flow_keys *fkeys)
5690 struct hclge_vport *vport = hclge_get_vport(handle);
5691 struct hclge_fd_rule_tuples new_tuples;
5692 struct hclge_dev *hdev = vport->back;
5693 struct hclge_fd_rule *rule;
5698 if (!hnae3_dev_fd_supported(hdev))
5701 memset(&new_tuples, 0, sizeof(new_tuples));
5702 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5704 spin_lock_bh(&hdev->fd_rule_lock);
5706 /* when there is already fd rule existed add by user,
5707 * arfs should not work
5709 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5710 spin_unlock_bh(&hdev->fd_rule_lock);
5715 /* check is there flow director filter existed for this flow,
5716 * if not, create a new filter for it;
5717 * if filter exist with different queue id, modify the filter;
5718 * if filter exist with same queue id, do nothing
5720 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5722 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5723 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5724 spin_unlock_bh(&hdev->fd_rule_lock);
5729 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5731 spin_unlock_bh(&hdev->fd_rule_lock);
5736 set_bit(bit_id, hdev->fd_bmap);
5737 rule->location = bit_id;
5738 rule->flow_id = flow_id;
5739 rule->queue_id = queue_id;
5740 hclge_fd_build_arfs_rule(&new_tuples, rule);
5741 ret = hclge_fd_config_rule(hdev, rule);
5743 spin_unlock_bh(&hdev->fd_rule_lock);
5748 return rule->location;
5751 spin_unlock_bh(&hdev->fd_rule_lock);
5753 if (rule->queue_id == queue_id)
5754 return rule->location;
5756 tmp_queue_id = rule->queue_id;
5757 rule->queue_id = queue_id;
5758 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5760 rule->queue_id = tmp_queue_id;
5764 return rule->location;
5767 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5769 #ifdef CONFIG_RFS_ACCEL
5770 struct hnae3_handle *handle = &hdev->vport[0].nic;
5771 struct hclge_fd_rule *rule;
5772 struct hlist_node *node;
5773 HLIST_HEAD(del_list);
5775 spin_lock_bh(&hdev->fd_rule_lock);
5776 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5777 spin_unlock_bh(&hdev->fd_rule_lock);
5780 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5781 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5782 rule->flow_id, rule->location)) {
5783 hlist_del_init(&rule->rule_node);
5784 hlist_add_head(&rule->rule_node, &del_list);
5785 hdev->hclge_fd_rule_num--;
5786 clear_bit(rule->location, hdev->fd_bmap);
5789 spin_unlock_bh(&hdev->fd_rule_lock);
5791 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5792 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5793 rule->location, NULL, false);
5799 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5801 #ifdef CONFIG_RFS_ACCEL
5802 struct hclge_vport *vport = hclge_get_vport(handle);
5803 struct hclge_dev *hdev = vport->back;
5805 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5806 hclge_del_all_fd_entries(handle, true);
5810 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5812 struct hclge_vport *vport = hclge_get_vport(handle);
5813 struct hclge_dev *hdev = vport->back;
5815 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5816 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5819 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5821 struct hclge_vport *vport = hclge_get_vport(handle);
5822 struct hclge_dev *hdev = vport->back;
5824 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5827 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5829 struct hclge_vport *vport = hclge_get_vport(handle);
5830 struct hclge_dev *hdev = vport->back;
5832 return hdev->rst_stats.hw_reset_done_cnt;
5835 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5837 struct hclge_vport *vport = hclge_get_vport(handle);
5838 struct hclge_dev *hdev = vport->back;
5841 hdev->fd_en = enable;
5842 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5844 hclge_del_all_fd_entries(handle, clear);
5846 hclge_restore_fd_entries(handle);
5849 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5851 struct hclge_desc desc;
5852 struct hclge_config_mac_mode_cmd *req =
5853 (struct hclge_config_mac_mode_cmd *)desc.data;
5857 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5858 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5859 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5860 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5861 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5862 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5863 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5864 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5865 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5866 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5867 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5868 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5869 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5870 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5871 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5872 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5874 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5876 dev_err(&hdev->pdev->dev,
5877 "mac enable fail, ret =%d.\n", ret);
5880 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5882 struct hclge_config_mac_mode_cmd *req;
5883 struct hclge_desc desc;
5887 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5888 /* 1 Read out the MAC mode config at first */
5889 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5890 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5892 dev_err(&hdev->pdev->dev,
5893 "mac loopback get fail, ret =%d.\n", ret);
5897 /* 2 Then setup the loopback flag */
5898 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5899 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5900 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5901 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5903 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5905 /* 3 Config mac work mode with loopback flag
5906 * and its original configure parameters
5908 hclge_cmd_reuse_desc(&desc, false);
5909 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5911 dev_err(&hdev->pdev->dev,
5912 "mac loopback set fail, ret =%d.\n", ret);
5916 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5917 enum hnae3_loop loop_mode)
5919 #define HCLGE_SERDES_RETRY_MS 10
5920 #define HCLGE_SERDES_RETRY_NUM 100
5922 #define HCLGE_MAC_LINK_STATUS_MS 10
5923 #define HCLGE_MAC_LINK_STATUS_NUM 100
5924 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5925 #define HCLGE_MAC_LINK_STATUS_UP 1
5927 struct hclge_serdes_lb_cmd *req;
5928 struct hclge_desc desc;
5929 int mac_link_ret = 0;
5933 req = (struct hclge_serdes_lb_cmd *)desc.data;
5934 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5936 switch (loop_mode) {
5937 case HNAE3_LOOP_SERIAL_SERDES:
5938 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5940 case HNAE3_LOOP_PARALLEL_SERDES:
5941 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5944 dev_err(&hdev->pdev->dev,
5945 "unsupported serdes loopback mode %d\n", loop_mode);
5950 req->enable = loop_mode_b;
5951 req->mask = loop_mode_b;
5952 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5954 req->mask = loop_mode_b;
5955 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5958 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5960 dev_err(&hdev->pdev->dev,
5961 "serdes loopback set fail, ret = %d\n", ret);
5966 msleep(HCLGE_SERDES_RETRY_MS);
5967 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5969 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5971 dev_err(&hdev->pdev->dev,
5972 "serdes loopback get, ret = %d\n", ret);
5975 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5976 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5978 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5979 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5981 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5982 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5986 hclge_cfg_mac_mode(hdev, en);
5990 /* serdes Internal loopback, independent of the network cable.*/
5991 msleep(HCLGE_MAC_LINK_STATUS_MS);
5992 ret = hclge_get_mac_link_status(hdev);
5993 if (ret == mac_link_ret)
5995 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5997 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6002 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6003 int stream_id, bool enable)
6005 struct hclge_desc desc;
6006 struct hclge_cfg_com_tqp_queue_cmd *req =
6007 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6010 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6011 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6012 req->stream_id = cpu_to_le16(stream_id);
6013 req->enable |= enable << HCLGE_TQP_ENABLE_B;
6015 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6017 dev_err(&hdev->pdev->dev,
6018 "Tqp enable fail, status =%d.\n", ret);
6022 static int hclge_set_loopback(struct hnae3_handle *handle,
6023 enum hnae3_loop loop_mode, bool en)
6025 struct hclge_vport *vport = hclge_get_vport(handle);
6026 struct hnae3_knic_private_info *kinfo;
6027 struct hclge_dev *hdev = vport->back;
6030 switch (loop_mode) {
6031 case HNAE3_LOOP_APP:
6032 ret = hclge_set_app_loopback(hdev, en);
6034 case HNAE3_LOOP_SERIAL_SERDES:
6035 case HNAE3_LOOP_PARALLEL_SERDES:
6036 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6040 dev_err(&hdev->pdev->dev,
6041 "loop_mode %d is not supported\n", loop_mode);
6048 kinfo = &vport->nic.kinfo;
6049 for (i = 0; i < kinfo->num_tqps; i++) {
6050 ret = hclge_tqp_enable(hdev, i, 0, en);
6058 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6060 struct hclge_vport *vport = hclge_get_vport(handle);
6061 struct hnae3_knic_private_info *kinfo;
6062 struct hnae3_queue *queue;
6063 struct hclge_tqp *tqp;
6066 kinfo = &vport->nic.kinfo;
6067 for (i = 0; i < kinfo->num_tqps; i++) {
6068 queue = handle->kinfo.tqp[i];
6069 tqp = container_of(queue, struct hclge_tqp, q);
6070 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6074 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6076 struct hclge_vport *vport = hclge_get_vport(handle);
6077 struct hclge_dev *hdev = vport->back;
6080 mod_timer(&hdev->service_timer, jiffies + HZ);
6082 del_timer_sync(&hdev->service_timer);
6083 cancel_work_sync(&hdev->service_task);
6084 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6088 static int hclge_ae_start(struct hnae3_handle *handle)
6090 struct hclge_vport *vport = hclge_get_vport(handle);
6091 struct hclge_dev *hdev = vport->back;
6094 hclge_cfg_mac_mode(hdev, true);
6095 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6096 hdev->hw.mac.link = 0;
6098 /* reset tqp stats */
6099 hclge_reset_tqp_stats(handle);
6101 hclge_mac_start_phy(hdev);
6106 static void hclge_ae_stop(struct hnae3_handle *handle)
6108 struct hclge_vport *vport = hclge_get_vport(handle);
6109 struct hclge_dev *hdev = vport->back;
6112 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6114 hclge_clear_arfs_rules(handle);
6116 /* If it is not PF reset, the firmware will disable the MAC,
6117 * so it only need to stop phy here.
6119 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6120 hdev->reset_type != HNAE3_FUNC_RESET) {
6121 hclge_mac_stop_phy(hdev);
6125 for (i = 0; i < handle->kinfo.num_tqps; i++)
6126 hclge_reset_tqp(handle, i);
6129 hclge_cfg_mac_mode(hdev, false);
6131 hclge_mac_stop_phy(hdev);
6133 /* reset tqp stats */
6134 hclge_reset_tqp_stats(handle);
6135 hclge_update_link_status(hdev);
6138 int hclge_vport_start(struct hclge_vport *vport)
6140 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6141 vport->last_active_jiffies = jiffies;
6145 void hclge_vport_stop(struct hclge_vport *vport)
6147 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6150 static int hclge_client_start(struct hnae3_handle *handle)
6152 struct hclge_vport *vport = hclge_get_vport(handle);
6154 return hclge_vport_start(vport);
6157 static void hclge_client_stop(struct hnae3_handle *handle)
6159 struct hclge_vport *vport = hclge_get_vport(handle);
6161 hclge_vport_stop(vport);
6164 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6165 u16 cmdq_resp, u8 resp_code,
6166 enum hclge_mac_vlan_tbl_opcode op)
6168 struct hclge_dev *hdev = vport->back;
6169 int return_status = -EIO;
6172 dev_err(&hdev->pdev->dev,
6173 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6178 if (op == HCLGE_MAC_VLAN_ADD) {
6179 if ((!resp_code) || (resp_code == 1)) {
6181 } else if (resp_code == 2) {
6182 return_status = -ENOSPC;
6183 dev_err(&hdev->pdev->dev,
6184 "add mac addr failed for uc_overflow.\n");
6185 } else if (resp_code == 3) {
6186 return_status = -ENOSPC;
6187 dev_err(&hdev->pdev->dev,
6188 "add mac addr failed for mc_overflow.\n");
6190 dev_err(&hdev->pdev->dev,
6191 "add mac addr failed for undefined, code=%d.\n",
6194 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6197 } else if (resp_code == 1) {
6198 return_status = -ENOENT;
6199 dev_dbg(&hdev->pdev->dev,
6200 "remove mac addr failed for miss.\n");
6202 dev_err(&hdev->pdev->dev,
6203 "remove mac addr failed for undefined, code=%d.\n",
6206 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6209 } else if (resp_code == 1) {
6210 return_status = -ENOENT;
6211 dev_dbg(&hdev->pdev->dev,
6212 "lookup mac addr failed for miss.\n");
6214 dev_err(&hdev->pdev->dev,
6215 "lookup mac addr failed for undefined, code=%d.\n",
6219 return_status = -EINVAL;
6220 dev_err(&hdev->pdev->dev,
6221 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6225 return return_status;
6228 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6233 if (vfid > 255 || vfid < 0)
6236 if (vfid >= 0 && vfid <= 191) {
6237 word_num = vfid / 32;
6238 bit_num = vfid % 32;
6240 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6242 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6244 word_num = (vfid - 192) / 32;
6245 bit_num = vfid % 32;
6247 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6249 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6255 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6257 #define HCLGE_DESC_NUMBER 3
6258 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6261 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6262 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6263 if (desc[i].data[j])
6269 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6270 const u8 *addr, bool is_mc)
6272 const unsigned char *mac_addr = addr;
6273 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6274 (mac_addr[0]) | (mac_addr[1] << 8);
6275 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6277 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6279 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6280 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6283 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6284 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6287 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6288 struct hclge_mac_vlan_tbl_entry_cmd *req)
6290 struct hclge_dev *hdev = vport->back;
6291 struct hclge_desc desc;
6296 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6298 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6300 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6302 dev_err(&hdev->pdev->dev,
6303 "del mac addr failed for cmd_send, ret =%d.\n",
6307 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6308 retval = le16_to_cpu(desc.retval);
6310 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6311 HCLGE_MAC_VLAN_REMOVE);
6314 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6315 struct hclge_mac_vlan_tbl_entry_cmd *req,
6316 struct hclge_desc *desc,
6319 struct hclge_dev *hdev = vport->back;
6324 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6326 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6327 memcpy(desc[0].data,
6329 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6330 hclge_cmd_setup_basic_desc(&desc[1],
6331 HCLGE_OPC_MAC_VLAN_ADD,
6333 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6334 hclge_cmd_setup_basic_desc(&desc[2],
6335 HCLGE_OPC_MAC_VLAN_ADD,
6337 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6339 memcpy(desc[0].data,
6341 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6342 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6345 dev_err(&hdev->pdev->dev,
6346 "lookup mac addr failed for cmd_send, ret =%d.\n",
6350 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6351 retval = le16_to_cpu(desc[0].retval);
6353 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6354 HCLGE_MAC_VLAN_LKUP);
6357 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6358 struct hclge_mac_vlan_tbl_entry_cmd *req,
6359 struct hclge_desc *mc_desc)
6361 struct hclge_dev *hdev = vport->back;
6368 struct hclge_desc desc;
6370 hclge_cmd_setup_basic_desc(&desc,
6371 HCLGE_OPC_MAC_VLAN_ADD,
6373 memcpy(desc.data, req,
6374 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6375 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6376 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6377 retval = le16_to_cpu(desc.retval);
6379 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6381 HCLGE_MAC_VLAN_ADD);
6383 hclge_cmd_reuse_desc(&mc_desc[0], false);
6384 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6385 hclge_cmd_reuse_desc(&mc_desc[1], false);
6386 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6387 hclge_cmd_reuse_desc(&mc_desc[2], false);
6388 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6389 memcpy(mc_desc[0].data, req,
6390 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6391 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6392 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6393 retval = le16_to_cpu(mc_desc[0].retval);
6395 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6397 HCLGE_MAC_VLAN_ADD);
6401 dev_err(&hdev->pdev->dev,
6402 "add mac addr failed for cmd_send, ret =%d.\n",
6410 static int hclge_init_umv_space(struct hclge_dev *hdev)
6412 u16 allocated_size = 0;
6415 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6420 if (allocated_size < hdev->wanted_umv_size)
6421 dev_warn(&hdev->pdev->dev,
6422 "Alloc umv space failed, want %d, get %d\n",
6423 hdev->wanted_umv_size, allocated_size);
6425 mutex_init(&hdev->umv_mutex);
6426 hdev->max_umv_size = allocated_size;
6427 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6428 hdev->share_umv_size = hdev->priv_umv_size +
6429 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6434 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6438 if (hdev->max_umv_size > 0) {
6439 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6443 hdev->max_umv_size = 0;
6445 mutex_destroy(&hdev->umv_mutex);
6450 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6451 u16 *allocated_size, bool is_alloc)
6453 struct hclge_umv_spc_alc_cmd *req;
6454 struct hclge_desc desc;
6457 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6458 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6459 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6460 req->space_size = cpu_to_le32(space_size);
6462 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6464 dev_err(&hdev->pdev->dev,
6465 "%s umv space failed for cmd_send, ret =%d\n",
6466 is_alloc ? "allocate" : "free", ret);
6470 if (is_alloc && allocated_size)
6471 *allocated_size = le32_to_cpu(desc.data[1]);
6476 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6478 struct hclge_vport *vport;
6481 for (i = 0; i < hdev->num_alloc_vport; i++) {
6482 vport = &hdev->vport[i];
6483 vport->used_umv_num = 0;
6486 mutex_lock(&hdev->umv_mutex);
6487 hdev->share_umv_size = hdev->priv_umv_size +
6488 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6489 mutex_unlock(&hdev->umv_mutex);
6492 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6494 struct hclge_dev *hdev = vport->back;
6497 mutex_lock(&hdev->umv_mutex);
6498 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6499 hdev->share_umv_size == 0);
6500 mutex_unlock(&hdev->umv_mutex);
6505 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6507 struct hclge_dev *hdev = vport->back;
6509 mutex_lock(&hdev->umv_mutex);
6511 if (vport->used_umv_num > hdev->priv_umv_size)
6512 hdev->share_umv_size++;
6514 if (vport->used_umv_num > 0)
6515 vport->used_umv_num--;
6517 if (vport->used_umv_num >= hdev->priv_umv_size &&
6518 hdev->share_umv_size > 0)
6519 hdev->share_umv_size--;
6520 vport->used_umv_num++;
6522 mutex_unlock(&hdev->umv_mutex);
6525 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6526 const unsigned char *addr)
6528 struct hclge_vport *vport = hclge_get_vport(handle);
6530 return hclge_add_uc_addr_common(vport, addr);
6533 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6534 const unsigned char *addr)
6536 struct hclge_dev *hdev = vport->back;
6537 struct hclge_mac_vlan_tbl_entry_cmd req;
6538 struct hclge_desc desc;
6539 u16 egress_port = 0;
6542 /* mac addr check */
6543 if (is_zero_ether_addr(addr) ||
6544 is_broadcast_ether_addr(addr) ||
6545 is_multicast_ether_addr(addr)) {
6546 dev_err(&hdev->pdev->dev,
6547 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6549 is_zero_ether_addr(addr),
6550 is_broadcast_ether_addr(addr),
6551 is_multicast_ether_addr(addr));
6555 memset(&req, 0, sizeof(req));
6557 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6558 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6560 req.egress_port = cpu_to_le16(egress_port);
6562 hclge_prepare_mac_addr(&req, addr, false);
6564 /* Lookup the mac address in the mac_vlan table, and add
6565 * it if the entry is inexistent. Repeated unicast entry
6566 * is not allowed in the mac vlan table.
6568 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6569 if (ret == -ENOENT) {
6570 if (!hclge_is_umv_space_full(vport)) {
6571 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6573 hclge_update_umv_space(vport, false);
6577 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6578 hdev->priv_umv_size);
6583 /* check if we just hit the duplicate */
6585 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6586 vport->vport_id, addr);
6590 dev_err(&hdev->pdev->dev,
6591 "PF failed to add unicast entry(%pM) in the MAC table\n",
6597 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6598 const unsigned char *addr)
6600 struct hclge_vport *vport = hclge_get_vport(handle);
6602 return hclge_rm_uc_addr_common(vport, addr);
6605 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6606 const unsigned char *addr)
6608 struct hclge_dev *hdev = vport->back;
6609 struct hclge_mac_vlan_tbl_entry_cmd req;
6612 /* mac addr check */
6613 if (is_zero_ether_addr(addr) ||
6614 is_broadcast_ether_addr(addr) ||
6615 is_multicast_ether_addr(addr)) {
6616 dev_dbg(&hdev->pdev->dev,
6617 "Remove mac err! invalid mac:%pM.\n",
6622 memset(&req, 0, sizeof(req));
6623 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6624 hclge_prepare_mac_addr(&req, addr, false);
6625 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6627 hclge_update_umv_space(vport, true);
6632 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6633 const unsigned char *addr)
6635 struct hclge_vport *vport = hclge_get_vport(handle);
6637 return hclge_add_mc_addr_common(vport, addr);
6640 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6641 const unsigned char *addr)
6643 struct hclge_dev *hdev = vport->back;
6644 struct hclge_mac_vlan_tbl_entry_cmd req;
6645 struct hclge_desc desc[3];
6648 /* mac addr check */
6649 if (!is_multicast_ether_addr(addr)) {
6650 dev_err(&hdev->pdev->dev,
6651 "Add mc mac err! invalid mac:%pM.\n",
6655 memset(&req, 0, sizeof(req));
6656 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6657 hclge_prepare_mac_addr(&req, addr, true);
6658 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6660 /* This mac addr exist, update VFID for it */
6661 hclge_update_desc_vfid(desc, vport->vport_id, false);
6662 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6664 /* This mac addr do not exist, add new entry for it */
6665 memset(desc[0].data, 0, sizeof(desc[0].data));
6666 memset(desc[1].data, 0, sizeof(desc[0].data));
6667 memset(desc[2].data, 0, sizeof(desc[0].data));
6668 hclge_update_desc_vfid(desc, vport->vport_id, false);
6669 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6672 if (status == -ENOSPC)
6673 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6678 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6679 const unsigned char *addr)
6681 struct hclge_vport *vport = hclge_get_vport(handle);
6683 return hclge_rm_mc_addr_common(vport, addr);
6686 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6687 const unsigned char *addr)
6689 struct hclge_dev *hdev = vport->back;
6690 struct hclge_mac_vlan_tbl_entry_cmd req;
6691 enum hclge_cmd_status status;
6692 struct hclge_desc desc[3];
6694 /* mac addr check */
6695 if (!is_multicast_ether_addr(addr)) {
6696 dev_dbg(&hdev->pdev->dev,
6697 "Remove mc mac err! invalid mac:%pM.\n",
6702 memset(&req, 0, sizeof(req));
6703 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6704 hclge_prepare_mac_addr(&req, addr, true);
6705 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6707 /* This mac addr exist, remove this handle's VFID for it */
6708 hclge_update_desc_vfid(desc, vport->vport_id, true);
6710 if (hclge_is_all_function_id_zero(desc))
6711 /* All the vfid is zero, so need to delete this entry */
6712 status = hclge_remove_mac_vlan_tbl(vport, &req);
6714 /* Not all the vfid is zero, update the vfid */
6715 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6718 /* Maybe this mac address is in mta table, but it cannot be
6719 * deleted here because an entry of mta represents an address
6720 * range rather than a specific address. the delete action to
6721 * all entries will take effect in update_mta_status called by
6722 * hns3_nic_set_rx_mode.
6730 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6731 enum HCLGE_MAC_ADDR_TYPE mac_type)
6733 struct hclge_vport_mac_addr_cfg *mac_cfg;
6734 struct list_head *list;
6736 if (!vport->vport_id)
6739 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6743 mac_cfg->hd_tbl_status = true;
6744 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6746 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6747 &vport->uc_mac_list : &vport->mc_mac_list;
6749 list_add_tail(&mac_cfg->node, list);
6752 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6754 enum HCLGE_MAC_ADDR_TYPE mac_type)
6756 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6757 struct list_head *list;
6758 bool uc_flag, mc_flag;
6760 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6761 &vport->uc_mac_list : &vport->mc_mac_list;
6763 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6764 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6766 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6767 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6768 if (uc_flag && mac_cfg->hd_tbl_status)
6769 hclge_rm_uc_addr_common(vport, mac_addr);
6771 if (mc_flag && mac_cfg->hd_tbl_status)
6772 hclge_rm_mc_addr_common(vport, mac_addr);
6774 list_del(&mac_cfg->node);
6781 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6782 enum HCLGE_MAC_ADDR_TYPE mac_type)
6784 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6785 struct list_head *list;
6787 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6788 &vport->uc_mac_list : &vport->mc_mac_list;
6790 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6791 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6792 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6794 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6795 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6797 mac_cfg->hd_tbl_status = false;
6799 list_del(&mac_cfg->node);
6805 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6807 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6808 struct hclge_vport *vport;
6811 mutex_lock(&hdev->vport_cfg_mutex);
6812 for (i = 0; i < hdev->num_alloc_vport; i++) {
6813 vport = &hdev->vport[i];
6814 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6815 list_del(&mac->node);
6819 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6820 list_del(&mac->node);
6824 mutex_unlock(&hdev->vport_cfg_mutex);
6827 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6828 u16 cmdq_resp, u8 resp_code)
6830 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6831 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6832 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6833 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6838 dev_err(&hdev->pdev->dev,
6839 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6844 switch (resp_code) {
6845 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6846 case HCLGE_ETHERTYPE_ALREADY_ADD:
6849 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6850 dev_err(&hdev->pdev->dev,
6851 "add mac ethertype failed for manager table overflow.\n");
6852 return_status = -EIO;
6854 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6855 dev_err(&hdev->pdev->dev,
6856 "add mac ethertype failed for key conflict.\n");
6857 return_status = -EIO;
6860 dev_err(&hdev->pdev->dev,
6861 "add mac ethertype failed for undefined, code=%d.\n",
6863 return_status = -EIO;
6866 return return_status;
6869 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6870 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6872 struct hclge_desc desc;
6877 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6878 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6880 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6882 dev_err(&hdev->pdev->dev,
6883 "add mac ethertype failed for cmd_send, ret =%d.\n",
6888 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6889 retval = le16_to_cpu(desc.retval);
6891 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6894 static int init_mgr_tbl(struct hclge_dev *hdev)
6899 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6900 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6902 dev_err(&hdev->pdev->dev,
6903 "add mac ethertype failed, ret =%d.\n",
6912 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6914 struct hclge_vport *vport = hclge_get_vport(handle);
6915 struct hclge_dev *hdev = vport->back;
6917 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6920 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6923 const unsigned char *new_addr = (const unsigned char *)p;
6924 struct hclge_vport *vport = hclge_get_vport(handle);
6925 struct hclge_dev *hdev = vport->back;
6928 /* mac addr check */
6929 if (is_zero_ether_addr(new_addr) ||
6930 is_broadcast_ether_addr(new_addr) ||
6931 is_multicast_ether_addr(new_addr)) {
6932 dev_err(&hdev->pdev->dev,
6933 "Change uc mac err! invalid mac:%p.\n",
6938 if ((!is_first || is_kdump_kernel()) &&
6939 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6940 dev_warn(&hdev->pdev->dev,
6941 "remove old uc mac address fail.\n");
6943 ret = hclge_add_uc_addr(handle, new_addr);
6945 dev_err(&hdev->pdev->dev,
6946 "add uc mac address fail, ret =%d.\n",
6950 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6951 dev_err(&hdev->pdev->dev,
6952 "restore uc mac address fail.\n");
6957 ret = hclge_pause_addr_cfg(hdev, new_addr);
6959 dev_err(&hdev->pdev->dev,
6960 "configure mac pause address fail, ret =%d.\n",
6965 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6970 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6973 struct hclge_vport *vport = hclge_get_vport(handle);
6974 struct hclge_dev *hdev = vport->back;
6976 if (!hdev->hw.mac.phydev)
6979 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6982 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6983 u8 fe_type, bool filter_en, u8 vf_id)
6985 struct hclge_vlan_filter_ctrl_cmd *req;
6986 struct hclge_desc desc;
6989 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6991 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6992 req->vlan_type = vlan_type;
6993 req->vlan_fe = filter_en ? fe_type : 0;
6996 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6998 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7004 #define HCLGE_FILTER_TYPE_VF 0
7005 #define HCLGE_FILTER_TYPE_PORT 1
7006 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7007 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7008 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7009 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7010 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7011 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7012 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7013 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7014 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7016 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7018 struct hclge_vport *vport = hclge_get_vport(handle);
7019 struct hclge_dev *hdev = vport->back;
7021 if (hdev->pdev->revision >= 0x21) {
7022 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7023 HCLGE_FILTER_FE_EGRESS, enable, 0);
7024 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7025 HCLGE_FILTER_FE_INGRESS, enable, 0);
7027 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7028 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7032 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7034 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7037 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7038 bool is_kill, u16 vlan, u8 qos,
7041 #define HCLGE_MAX_VF_BYTES 16
7042 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7043 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7044 struct hclge_desc desc[2];
7049 hclge_cmd_setup_basic_desc(&desc[0],
7050 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7051 hclge_cmd_setup_basic_desc(&desc[1],
7052 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7054 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7056 vf_byte_off = vfid / 8;
7057 vf_byte_val = 1 << (vfid % 8);
7059 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7060 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7062 req0->vlan_id = cpu_to_le16(vlan);
7063 req0->vlan_cfg = is_kill;
7065 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7066 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7068 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7070 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7072 dev_err(&hdev->pdev->dev,
7073 "Send vf vlan command fail, ret =%d.\n",
7079 #define HCLGE_VF_VLAN_NO_ENTRY 2
7080 if (!req0->resp_code || req0->resp_code == 1)
7083 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7084 dev_warn(&hdev->pdev->dev,
7085 "vf vlan table is full, vf vlan filter is disabled\n");
7089 dev_err(&hdev->pdev->dev,
7090 "Add vf vlan filter fail, ret =%d.\n",
7093 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7094 if (!req0->resp_code)
7097 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7098 dev_warn(&hdev->pdev->dev,
7099 "vlan %d filter is not in vf vlan table\n",
7104 dev_err(&hdev->pdev->dev,
7105 "Kill vf vlan filter fail, ret =%d.\n",
7112 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7113 u16 vlan_id, bool is_kill)
7115 struct hclge_vlan_filter_pf_cfg_cmd *req;
7116 struct hclge_desc desc;
7117 u8 vlan_offset_byte_val;
7118 u8 vlan_offset_byte;
7122 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7124 vlan_offset_160 = vlan_id / 160;
7125 vlan_offset_byte = (vlan_id % 160) / 8;
7126 vlan_offset_byte_val = 1 << (vlan_id % 8);
7128 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7129 req->vlan_offset = vlan_offset_160;
7130 req->vlan_cfg = is_kill;
7131 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7133 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7135 dev_err(&hdev->pdev->dev,
7136 "port vlan command, send fail, ret =%d.\n", ret);
7140 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7141 u16 vport_id, u16 vlan_id, u8 qos,
7144 u16 vport_idx, vport_num = 0;
7147 if (is_kill && !vlan_id)
7150 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7153 dev_err(&hdev->pdev->dev,
7154 "Set %d vport vlan filter config fail, ret =%d.\n",
7159 /* vlan 0 may be added twice when 8021q module is enabled */
7160 if (!is_kill && !vlan_id &&
7161 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7164 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7165 dev_err(&hdev->pdev->dev,
7166 "Add port vlan failed, vport %d is already in vlan %d\n",
7172 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7173 dev_err(&hdev->pdev->dev,
7174 "Delete port vlan failed, vport %d is not in vlan %d\n",
7179 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7182 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7183 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7189 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7191 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7192 struct hclge_vport_vtag_tx_cfg_cmd *req;
7193 struct hclge_dev *hdev = vport->back;
7194 struct hclge_desc desc;
7197 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7199 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7200 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7201 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7202 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7203 vcfg->accept_tag1 ? 1 : 0);
7204 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7205 vcfg->accept_untag1 ? 1 : 0);
7206 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7207 vcfg->accept_tag2 ? 1 : 0);
7208 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7209 vcfg->accept_untag2 ? 1 : 0);
7210 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7211 vcfg->insert_tag1_en ? 1 : 0);
7212 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7213 vcfg->insert_tag2_en ? 1 : 0);
7214 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7216 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7217 req->vf_bitmap[req->vf_offset] =
7218 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7220 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7222 dev_err(&hdev->pdev->dev,
7223 "Send port txvlan cfg command fail, ret =%d\n",
7229 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7231 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7232 struct hclge_vport_vtag_rx_cfg_cmd *req;
7233 struct hclge_dev *hdev = vport->back;
7234 struct hclge_desc desc;
7237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7239 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7240 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7241 vcfg->strip_tag1_en ? 1 : 0);
7242 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7243 vcfg->strip_tag2_en ? 1 : 0);
7244 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7245 vcfg->vlan1_vlan_prionly ? 1 : 0);
7246 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7247 vcfg->vlan2_vlan_prionly ? 1 : 0);
7249 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7250 req->vf_bitmap[req->vf_offset] =
7251 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7253 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7255 dev_err(&hdev->pdev->dev,
7256 "Send port rxvlan cfg command fail, ret =%d\n",
7262 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7263 u16 port_base_vlan_state,
7268 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7269 vport->txvlan_cfg.accept_tag1 = true;
7270 vport->txvlan_cfg.insert_tag1_en = false;
7271 vport->txvlan_cfg.default_tag1 = 0;
7273 vport->txvlan_cfg.accept_tag1 = false;
7274 vport->txvlan_cfg.insert_tag1_en = true;
7275 vport->txvlan_cfg.default_tag1 = vlan_tag;
7278 vport->txvlan_cfg.accept_untag1 = true;
7280 /* accept_tag2 and accept_untag2 are not supported on
7281 * pdev revision(0x20), new revision support them,
7282 * this two fields can not be configured by user.
7284 vport->txvlan_cfg.accept_tag2 = true;
7285 vport->txvlan_cfg.accept_untag2 = true;
7286 vport->txvlan_cfg.insert_tag2_en = false;
7287 vport->txvlan_cfg.default_tag2 = 0;
7289 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7290 vport->rxvlan_cfg.strip_tag1_en = false;
7291 vport->rxvlan_cfg.strip_tag2_en =
7292 vport->rxvlan_cfg.rx_vlan_offload_en;
7294 vport->rxvlan_cfg.strip_tag1_en =
7295 vport->rxvlan_cfg.rx_vlan_offload_en;
7296 vport->rxvlan_cfg.strip_tag2_en = true;
7298 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7299 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7301 ret = hclge_set_vlan_tx_offload_cfg(vport);
7305 return hclge_set_vlan_rx_offload_cfg(vport);
7308 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7310 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7311 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7312 struct hclge_desc desc;
7315 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7316 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7317 rx_req->ot_fst_vlan_type =
7318 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7319 rx_req->ot_sec_vlan_type =
7320 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7321 rx_req->in_fst_vlan_type =
7322 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7323 rx_req->in_sec_vlan_type =
7324 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7326 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7328 dev_err(&hdev->pdev->dev,
7329 "Send rxvlan protocol type command fail, ret =%d\n",
7334 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7336 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7337 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7338 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7340 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7342 dev_err(&hdev->pdev->dev,
7343 "Send txvlan protocol type command fail, ret =%d\n",
7349 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7351 #define HCLGE_DEF_VLAN_TYPE 0x8100
7353 struct hnae3_handle *handle = &hdev->vport[0].nic;
7354 struct hclge_vport *vport;
7358 if (hdev->pdev->revision >= 0x21) {
7359 /* for revision 0x21, vf vlan filter is per function */
7360 for (i = 0; i < hdev->num_alloc_vport; i++) {
7361 vport = &hdev->vport[i];
7362 ret = hclge_set_vlan_filter_ctrl(hdev,
7363 HCLGE_FILTER_TYPE_VF,
7364 HCLGE_FILTER_FE_EGRESS,
7371 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7372 HCLGE_FILTER_FE_INGRESS, true,
7377 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7378 HCLGE_FILTER_FE_EGRESS_V1_B,
7384 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7386 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7387 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7388 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7389 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7390 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7391 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7393 ret = hclge_set_vlan_protocol_type(hdev);
7397 for (i = 0; i < hdev->num_alloc_vport; i++) {
7400 vport = &hdev->vport[i];
7401 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7403 ret = hclge_vlan_offload_cfg(vport,
7404 vport->port_base_vlan_cfg.state,
7410 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7413 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7416 struct hclge_vport_vlan_cfg *vlan;
7418 /* vlan 0 is reserved */
7422 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7426 vlan->hd_tbl_status = writen_to_tbl;
7427 vlan->vlan_id = vlan_id;
7429 list_add_tail(&vlan->node, &vport->vlan_list);
7432 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7434 struct hclge_vport_vlan_cfg *vlan, *tmp;
7435 struct hclge_dev *hdev = vport->back;
7438 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7439 if (!vlan->hd_tbl_status) {
7440 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7442 vlan->vlan_id, 0, false);
7444 dev_err(&hdev->pdev->dev,
7445 "restore vport vlan list failed, ret=%d\n",
7450 vlan->hd_tbl_status = true;
7456 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7459 struct hclge_vport_vlan_cfg *vlan, *tmp;
7460 struct hclge_dev *hdev = vport->back;
7462 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7463 if (vlan->vlan_id == vlan_id) {
7464 if (is_write_tbl && vlan->hd_tbl_status)
7465 hclge_set_vlan_filter_hw(hdev,
7471 list_del(&vlan->node);
7478 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7480 struct hclge_vport_vlan_cfg *vlan, *tmp;
7481 struct hclge_dev *hdev = vport->back;
7483 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7484 if (vlan->hd_tbl_status)
7485 hclge_set_vlan_filter_hw(hdev,
7491 vlan->hd_tbl_status = false;
7493 list_del(&vlan->node);
7499 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7501 struct hclge_vport_vlan_cfg *vlan, *tmp;
7502 struct hclge_vport *vport;
7505 mutex_lock(&hdev->vport_cfg_mutex);
7506 for (i = 0; i < hdev->num_alloc_vport; i++) {
7507 vport = &hdev->vport[i];
7508 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7509 list_del(&vlan->node);
7513 mutex_unlock(&hdev->vport_cfg_mutex);
7516 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7518 struct hclge_vport *vport = hclge_get_vport(handle);
7520 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7521 vport->rxvlan_cfg.strip_tag1_en = false;
7522 vport->rxvlan_cfg.strip_tag2_en = enable;
7524 vport->rxvlan_cfg.strip_tag1_en = enable;
7525 vport->rxvlan_cfg.strip_tag2_en = true;
7527 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7528 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7529 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7531 return hclge_set_vlan_rx_offload_cfg(vport);
7534 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7535 u16 port_base_vlan_state,
7536 struct hclge_vlan_info *new_info,
7537 struct hclge_vlan_info *old_info)
7539 struct hclge_dev *hdev = vport->back;
7542 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7543 hclge_rm_vport_all_vlan_table(vport, false);
7544 return hclge_set_vlan_filter_hw(hdev,
7545 htons(new_info->vlan_proto),
7548 new_info->qos, false);
7551 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7552 vport->vport_id, old_info->vlan_tag,
7553 old_info->qos, true);
7557 return hclge_add_vport_all_vlan_table(vport);
7560 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7561 struct hclge_vlan_info *vlan_info)
7563 struct hnae3_handle *nic = &vport->nic;
7564 struct hclge_vlan_info *old_vlan_info;
7565 struct hclge_dev *hdev = vport->back;
7568 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7570 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7574 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7575 /* add new VLAN tag */
7576 ret = hclge_set_vlan_filter_hw(hdev,
7577 htons(vlan_info->vlan_proto),
7579 vlan_info->vlan_tag,
7580 vlan_info->qos, false);
7584 /* remove old VLAN tag */
7585 ret = hclge_set_vlan_filter_hw(hdev,
7586 htons(old_vlan_info->vlan_proto),
7588 old_vlan_info->vlan_tag,
7589 old_vlan_info->qos, true);
7596 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7601 /* update state only when disable/enable port based VLAN */
7602 vport->port_base_vlan_cfg.state = state;
7603 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7604 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7606 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7609 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7610 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7611 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7616 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7617 enum hnae3_port_base_vlan_state state,
7620 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7622 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7624 return HNAE3_PORT_BASE_VLAN_ENABLE;
7627 return HNAE3_PORT_BASE_VLAN_DISABLE;
7628 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7629 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7631 return HNAE3_PORT_BASE_VLAN_MODIFY;
7635 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7636 u16 vlan, u8 qos, __be16 proto)
7638 struct hclge_vport *vport = hclge_get_vport(handle);
7639 struct hclge_dev *hdev = vport->back;
7640 struct hclge_vlan_info vlan_info;
7644 if (hdev->pdev->revision == 0x20)
7647 /* qos is a 3 bits value, so can not be bigger than 7 */
7648 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7650 if (proto != htons(ETH_P_8021Q))
7651 return -EPROTONOSUPPORT;
7653 vport = &hdev->vport[vfid];
7654 state = hclge_get_port_base_vlan_state(vport,
7655 vport->port_base_vlan_cfg.state,
7657 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7660 vlan_info.vlan_tag = vlan;
7661 vlan_info.qos = qos;
7662 vlan_info.vlan_proto = ntohs(proto);
7664 /* update port based VLAN for PF */
7666 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7667 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7668 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7673 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7674 return hclge_update_port_base_vlan_cfg(vport, state,
7677 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7685 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7686 u16 vlan_id, bool is_kill)
7688 struct hclge_vport *vport = hclge_get_vport(handle);
7689 struct hclge_dev *hdev = vport->back;
7690 bool writen_to_tbl = false;
7693 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7694 * filter entry. In this case, we don't update VLAN filter table
7695 * when user add new VLAN or remove exist VLAN, just update the vport
7696 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7697 * table until port based VLAN disabled
7699 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7700 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7701 vlan_id, 0, is_kill);
7702 writen_to_tbl = true;
7709 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7711 hclge_add_vport_vlan_table(vport, vlan_id,
7717 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7719 struct hclge_config_max_frm_size_cmd *req;
7720 struct hclge_desc desc;
7722 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7724 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7725 req->max_frm_size = cpu_to_le16(new_mps);
7726 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7728 return hclge_cmd_send(&hdev->hw, &desc, 1);
7731 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7733 struct hclge_vport *vport = hclge_get_vport(handle);
7735 return hclge_set_vport_mtu(vport, new_mtu);
7738 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7740 struct hclge_dev *hdev = vport->back;
7741 int i, max_frm_size, ret = 0;
7743 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7744 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7745 max_frm_size > HCLGE_MAC_MAX_FRAME)
7748 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7749 mutex_lock(&hdev->vport_lock);
7750 /* VF's mps must fit within hdev->mps */
7751 if (vport->vport_id && max_frm_size > hdev->mps) {
7752 mutex_unlock(&hdev->vport_lock);
7754 } else if (vport->vport_id) {
7755 vport->mps = max_frm_size;
7756 mutex_unlock(&hdev->vport_lock);
7760 /* PF's mps must be greater then VF's mps */
7761 for (i = 1; i < hdev->num_alloc_vport; i++)
7762 if (max_frm_size < hdev->vport[i].mps) {
7763 mutex_unlock(&hdev->vport_lock);
7767 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7769 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7771 dev_err(&hdev->pdev->dev,
7772 "Change mtu fail, ret =%d\n", ret);
7776 hdev->mps = max_frm_size;
7777 vport->mps = max_frm_size;
7779 ret = hclge_buffer_alloc(hdev);
7781 dev_err(&hdev->pdev->dev,
7782 "Allocate buffer fail, ret =%d\n", ret);
7785 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7786 mutex_unlock(&hdev->vport_lock);
7790 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7793 struct hclge_reset_tqp_queue_cmd *req;
7794 struct hclge_desc desc;
7797 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7799 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7800 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7801 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7803 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7805 dev_err(&hdev->pdev->dev,
7806 "Send tqp reset cmd error, status =%d\n", ret);
7813 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7815 struct hclge_reset_tqp_queue_cmd *req;
7816 struct hclge_desc desc;
7819 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7821 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7822 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7824 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7826 dev_err(&hdev->pdev->dev,
7827 "Get reset status error, status =%d\n", ret);
7831 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7834 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7836 struct hnae3_queue *queue;
7837 struct hclge_tqp *tqp;
7839 queue = handle->kinfo.tqp[queue_id];
7840 tqp = container_of(queue, struct hclge_tqp, q);
7845 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7847 struct hclge_vport *vport = hclge_get_vport(handle);
7848 struct hclge_dev *hdev = vport->back;
7849 int reset_try_times = 0;
7854 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7856 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7858 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7862 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7864 dev_err(&hdev->pdev->dev,
7865 "Send reset tqp cmd fail, ret = %d\n", ret);
7869 reset_try_times = 0;
7870 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7871 /* Wait for tqp hw reset */
7873 reset_status = hclge_get_reset_status(hdev, queue_gid);
7878 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7879 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7883 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7885 dev_err(&hdev->pdev->dev,
7886 "Deassert the soft reset fail, ret = %d\n", ret);
7891 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7893 struct hclge_dev *hdev = vport->back;
7894 int reset_try_times = 0;
7899 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7901 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7903 dev_warn(&hdev->pdev->dev,
7904 "Send reset tqp cmd fail, ret = %d\n", ret);
7908 reset_try_times = 0;
7909 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7910 /* Wait for tqp hw reset */
7912 reset_status = hclge_get_reset_status(hdev, queue_gid);
7917 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7918 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7922 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7924 dev_warn(&hdev->pdev->dev,
7925 "Deassert the soft reset fail, ret = %d\n", ret);
7928 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7930 struct hclge_vport *vport = hclge_get_vport(handle);
7931 struct hclge_dev *hdev = vport->back;
7933 return hdev->fw_version;
7936 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7938 struct phy_device *phydev = hdev->hw.mac.phydev;
7943 phy_set_asym_pause(phydev, rx_en, tx_en);
7946 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7951 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7952 else if (rx_en && !tx_en)
7953 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7954 else if (!rx_en && tx_en)
7955 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7957 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7959 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7962 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7964 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7969 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7974 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7976 struct phy_device *phydev = hdev->hw.mac.phydev;
7977 u16 remote_advertising = 0;
7978 u16 local_advertising = 0;
7979 u32 rx_pause, tx_pause;
7982 if (!phydev->link || !phydev->autoneg)
7985 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7988 remote_advertising = LPA_PAUSE_CAP;
7990 if (phydev->asym_pause)
7991 remote_advertising |= LPA_PAUSE_ASYM;
7993 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7994 remote_advertising);
7995 tx_pause = flowctl & FLOW_CTRL_TX;
7996 rx_pause = flowctl & FLOW_CTRL_RX;
7998 if (phydev->duplex == HCLGE_MAC_HALF) {
8003 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8006 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8007 u32 *rx_en, u32 *tx_en)
8009 struct hclge_vport *vport = hclge_get_vport(handle);
8010 struct hclge_dev *hdev = vport->back;
8012 *auto_neg = hclge_get_autoneg(handle);
8014 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8020 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8023 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8026 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8035 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8036 u32 rx_en, u32 tx_en)
8038 struct hclge_vport *vport = hclge_get_vport(handle);
8039 struct hclge_dev *hdev = vport->back;
8040 struct phy_device *phydev = hdev->hw.mac.phydev;
8043 fc_autoneg = hclge_get_autoneg(handle);
8044 if (auto_neg != fc_autoneg) {
8045 dev_info(&hdev->pdev->dev,
8046 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8050 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8051 dev_info(&hdev->pdev->dev,
8052 "Priority flow control enabled. Cannot set link flow control.\n");
8056 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8059 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8062 return phy_start_aneg(phydev);
8064 if (hdev->pdev->revision == 0x20)
8067 return hclge_restart_autoneg(handle);
8070 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8071 u8 *auto_neg, u32 *speed, u8 *duplex)
8073 struct hclge_vport *vport = hclge_get_vport(handle);
8074 struct hclge_dev *hdev = vport->back;
8077 *speed = hdev->hw.mac.speed;
8079 *duplex = hdev->hw.mac.duplex;
8081 *auto_neg = hdev->hw.mac.autoneg;
8084 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8087 struct hclge_vport *vport = hclge_get_vport(handle);
8088 struct hclge_dev *hdev = vport->back;
8091 *media_type = hdev->hw.mac.media_type;
8094 *module_type = hdev->hw.mac.module_type;
8097 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8098 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8100 struct hclge_vport *vport = hclge_get_vport(handle);
8101 struct hclge_dev *hdev = vport->back;
8102 struct phy_device *phydev = hdev->hw.mac.phydev;
8103 int mdix_ctrl, mdix, retval, is_resolved;
8106 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8107 *tp_mdix = ETH_TP_MDI_INVALID;
8111 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8113 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8114 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8115 HCLGE_PHY_MDIX_CTRL_S);
8117 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8118 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8119 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8121 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8123 switch (mdix_ctrl) {
8125 *tp_mdix_ctrl = ETH_TP_MDI;
8128 *tp_mdix_ctrl = ETH_TP_MDI_X;
8131 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8134 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8139 *tp_mdix = ETH_TP_MDI_INVALID;
8141 *tp_mdix = ETH_TP_MDI_X;
8143 *tp_mdix = ETH_TP_MDI;
8146 static void hclge_info_show(struct hclge_dev *hdev)
8148 struct device *dev = &hdev->pdev->dev;
8150 dev_info(dev, "PF info begin:\n");
8152 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8153 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8154 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8155 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8156 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8157 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8158 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8159 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8160 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8161 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8162 dev_info(dev, "This is %s PF\n",
8163 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8164 dev_info(dev, "DCB %s\n",
8165 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8166 dev_info(dev, "MQPRIO %s\n",
8167 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8169 dev_info(dev, "PF info end.\n");
8172 static int hclge_init_client_instance(struct hnae3_client *client,
8173 struct hnae3_ae_dev *ae_dev)
8175 struct hclge_dev *hdev = ae_dev->priv;
8176 struct hclge_vport *vport;
8179 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8180 vport = &hdev->vport[i];
8182 switch (client->type) {
8183 case HNAE3_CLIENT_KNIC:
8185 hdev->nic_client = client;
8186 vport->nic.client = client;
8187 ret = client->ops->init_instance(&vport->nic);
8191 hnae3_set_client_init_flag(client, ae_dev, 1);
8192 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8194 if (netif_msg_drv(&hdev->vport->nic))
8195 hclge_info_show(hdev);
8197 if (hdev->roce_client &&
8198 hnae3_dev_roce_supported(hdev)) {
8199 struct hnae3_client *rc = hdev->roce_client;
8201 ret = hclge_init_roce_base_info(vport);
8205 ret = rc->ops->init_instance(&vport->roce);
8209 set_bit(HCLGE_STATE_ROCE_REGISTERED,
8211 hnae3_set_client_init_flag(hdev->roce_client,
8216 case HNAE3_CLIENT_UNIC:
8217 hdev->nic_client = client;
8218 vport->nic.client = client;
8220 ret = client->ops->init_instance(&vport->nic);
8224 hnae3_set_client_init_flag(client, ae_dev, 1);
8227 case HNAE3_CLIENT_ROCE:
8228 if (hnae3_dev_roce_supported(hdev)) {
8229 hdev->roce_client = client;
8230 vport->roce.client = client;
8233 if (hdev->roce_client && hdev->nic_client) {
8234 ret = hclge_init_roce_base_info(vport);
8238 ret = client->ops->init_instance(&vport->roce);
8242 set_bit(HCLGE_STATE_ROCE_REGISTERED,
8244 hnae3_set_client_init_flag(client, ae_dev, 1);
8256 hdev->nic_client = NULL;
8257 vport->nic.client = NULL;
8260 hdev->roce_client = NULL;
8261 vport->roce.client = NULL;
8265 static void hclge_uninit_client_instance(struct hnae3_client *client,
8266 struct hnae3_ae_dev *ae_dev)
8268 struct hclge_dev *hdev = ae_dev->priv;
8269 struct hclge_vport *vport;
8272 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8273 vport = &hdev->vport[i];
8274 if (hdev->roce_client) {
8275 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8276 hdev->roce_client->ops->uninit_instance(&vport->roce,
8278 hdev->roce_client = NULL;
8279 vport->roce.client = NULL;
8281 if (client->type == HNAE3_CLIENT_ROCE)
8283 if (hdev->nic_client && client->ops->uninit_instance) {
8284 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8285 client->ops->uninit_instance(&vport->nic, 0);
8286 hdev->nic_client = NULL;
8287 vport->nic.client = NULL;
8292 static int hclge_pci_init(struct hclge_dev *hdev)
8294 struct pci_dev *pdev = hdev->pdev;
8295 struct hclge_hw *hw;
8298 ret = pci_enable_device(pdev);
8300 dev_err(&pdev->dev, "failed to enable PCI device\n");
8304 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8306 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8309 "can't set consistent PCI DMA");
8310 goto err_disable_device;
8312 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8315 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8317 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8318 goto err_disable_device;
8321 pci_set_master(pdev);
8323 hw->io_base = pcim_iomap(pdev, 2, 0);
8325 dev_err(&pdev->dev, "Can't map configuration register space\n");
8327 goto err_clr_master;
8330 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8334 pci_clear_master(pdev);
8335 pci_release_regions(pdev);
8337 pci_disable_device(pdev);
8342 static void hclge_pci_uninit(struct hclge_dev *hdev)
8344 struct pci_dev *pdev = hdev->pdev;
8346 pcim_iounmap(pdev, hdev->hw.io_base);
8347 pci_free_irq_vectors(pdev);
8348 pci_clear_master(pdev);
8349 pci_release_mem_regions(pdev);
8350 pci_disable_device(pdev);
8353 static void hclge_state_init(struct hclge_dev *hdev)
8355 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8356 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8357 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8358 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8359 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8360 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8363 static void hclge_state_uninit(struct hclge_dev *hdev)
8365 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8367 if (hdev->service_timer.function)
8368 del_timer_sync(&hdev->service_timer);
8369 if (hdev->reset_timer.function)
8370 del_timer_sync(&hdev->reset_timer);
8371 if (hdev->service_task.func)
8372 cancel_work_sync(&hdev->service_task);
8373 if (hdev->rst_service_task.func)
8374 cancel_work_sync(&hdev->rst_service_task);
8375 if (hdev->mbx_service_task.func)
8376 cancel_work_sync(&hdev->mbx_service_task);
8379 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8381 #define HCLGE_FLR_WAIT_MS 100
8382 #define HCLGE_FLR_WAIT_CNT 50
8383 struct hclge_dev *hdev = ae_dev->priv;
8386 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8387 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8388 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8389 hclge_reset_event(hdev->pdev, NULL);
8391 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8392 cnt++ < HCLGE_FLR_WAIT_CNT)
8393 msleep(HCLGE_FLR_WAIT_MS);
8395 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8396 dev_err(&hdev->pdev->dev,
8397 "flr wait down timeout: %d\n", cnt);
8400 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8402 struct hclge_dev *hdev = ae_dev->priv;
8404 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8407 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8409 struct pci_dev *pdev = ae_dev->pdev;
8410 struct hclge_dev *hdev;
8413 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8420 hdev->ae_dev = ae_dev;
8421 hdev->reset_type = HNAE3_NONE_RESET;
8422 hdev->reset_level = HNAE3_FUNC_RESET;
8423 ae_dev->priv = hdev;
8424 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8426 mutex_init(&hdev->vport_lock);
8427 mutex_init(&hdev->vport_cfg_mutex);
8428 spin_lock_init(&hdev->fd_rule_lock);
8430 ret = hclge_pci_init(hdev);
8432 dev_err(&pdev->dev, "PCI init failed\n");
8436 /* Firmware command queue initialize */
8437 ret = hclge_cmd_queue_init(hdev);
8439 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8440 goto err_pci_uninit;
8443 /* Firmware command initialize */
8444 ret = hclge_cmd_init(hdev);
8446 goto err_cmd_uninit;
8448 ret = hclge_get_cap(hdev);
8450 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8452 goto err_cmd_uninit;
8455 ret = hclge_configure(hdev);
8457 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8458 goto err_cmd_uninit;
8461 ret = hclge_init_msi(hdev);
8463 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8464 goto err_cmd_uninit;
8467 ret = hclge_misc_irq_init(hdev);
8470 "Misc IRQ(vector0) init error, ret = %d.\n",
8472 goto err_msi_uninit;
8475 ret = hclge_alloc_tqps(hdev);
8477 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8478 goto err_msi_irq_uninit;
8481 ret = hclge_alloc_vport(hdev);
8483 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8484 goto err_msi_irq_uninit;
8487 ret = hclge_map_tqp(hdev);
8489 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8490 goto err_msi_irq_uninit;
8493 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8494 ret = hclge_mac_mdio_config(hdev);
8496 dev_err(&hdev->pdev->dev,
8497 "mdio config fail ret=%d\n", ret);
8498 goto err_msi_irq_uninit;
8502 ret = hclge_init_umv_space(hdev);
8504 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8505 goto err_mdiobus_unreg;
8508 ret = hclge_mac_init(hdev);
8510 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8511 goto err_mdiobus_unreg;
8514 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8516 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8517 goto err_mdiobus_unreg;
8520 ret = hclge_config_gro(hdev, true);
8522 goto err_mdiobus_unreg;
8524 ret = hclge_init_vlan_config(hdev);
8526 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8527 goto err_mdiobus_unreg;
8530 ret = hclge_tm_schd_init(hdev);
8532 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8533 goto err_mdiobus_unreg;
8536 hclge_rss_init_cfg(hdev);
8537 ret = hclge_rss_init_hw(hdev);
8539 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8540 goto err_mdiobus_unreg;
8543 ret = init_mgr_tbl(hdev);
8545 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8546 goto err_mdiobus_unreg;
8549 ret = hclge_init_fd_config(hdev);
8552 "fd table init fail, ret=%d\n", ret);
8553 goto err_mdiobus_unreg;
8556 ret = hclge_hw_error_set_state(hdev, true);
8559 "fail(%d) to enable hw error interrupts\n", ret);
8560 goto err_mdiobus_unreg;
8563 INIT_KFIFO(hdev->mac_tnl_log);
8565 hclge_dcb_ops_set(hdev);
8567 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8568 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8569 INIT_WORK(&hdev->service_task, hclge_service_task);
8570 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8571 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8573 hclge_clear_all_event_cause(hdev);
8575 /* Enable MISC vector(vector0) */
8576 hclge_enable_vector(&hdev->misc_vector, true);
8578 hclge_state_init(hdev);
8579 hdev->last_reset_time = jiffies;
8581 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8585 if (hdev->hw.mac.phydev)
8586 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8588 hclge_misc_irq_uninit(hdev);
8590 pci_free_irq_vectors(pdev);
8592 hclge_cmd_uninit(hdev);
8594 pcim_iounmap(pdev, hdev->hw.io_base);
8595 pci_clear_master(pdev);
8596 pci_release_regions(pdev);
8597 pci_disable_device(pdev);
8602 static void hclge_stats_clear(struct hclge_dev *hdev)
8604 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8607 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8609 struct hclge_vport *vport = hdev->vport;
8612 for (i = 0; i < hdev->num_alloc_vport; i++) {
8613 hclge_vport_stop(vport);
8618 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8620 struct hclge_dev *hdev = ae_dev->priv;
8621 struct pci_dev *pdev = ae_dev->pdev;
8624 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8626 hclge_stats_clear(hdev);
8627 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8629 ret = hclge_cmd_init(hdev);
8631 dev_err(&pdev->dev, "Cmd queue init failed\n");
8635 ret = hclge_map_tqp(hdev);
8637 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8641 hclge_reset_umv_space(hdev);
8643 ret = hclge_mac_init(hdev);
8645 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8649 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8651 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8655 ret = hclge_config_gro(hdev, true);
8659 ret = hclge_init_vlan_config(hdev);
8661 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8665 ret = hclge_tm_init_hw(hdev, true);
8667 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8671 ret = hclge_rss_init_hw(hdev);
8673 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8677 ret = hclge_init_fd_config(hdev);
8680 "fd table init fail, ret=%d\n", ret);
8684 /* Re-enable the hw error interrupts because
8685 * the interrupts get disabled on core/global reset.
8687 ret = hclge_hw_error_set_state(hdev, true);
8690 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8694 hclge_reset_vport_state(hdev);
8696 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8702 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8704 struct hclge_dev *hdev = ae_dev->priv;
8705 struct hclge_mac *mac = &hdev->hw.mac;
8707 hclge_state_uninit(hdev);
8710 mdiobus_unregister(mac->mdio_bus);
8712 hclge_uninit_umv_space(hdev);
8714 /* Disable MISC vector(vector0) */
8715 hclge_enable_vector(&hdev->misc_vector, false);
8716 synchronize_irq(hdev->misc_vector.vector_irq);
8718 hclge_config_mac_tnl_int(hdev, false);
8719 hclge_hw_error_set_state(hdev, false);
8720 hclge_cmd_uninit(hdev);
8721 hclge_misc_irq_uninit(hdev);
8722 hclge_pci_uninit(hdev);
8723 mutex_destroy(&hdev->vport_lock);
8724 hclge_uninit_vport_mac_table(hdev);
8725 hclge_uninit_vport_vlan_table(hdev);
8726 mutex_destroy(&hdev->vport_cfg_mutex);
8727 ae_dev->priv = NULL;
8730 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8732 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8733 struct hclge_vport *vport = hclge_get_vport(handle);
8734 struct hclge_dev *hdev = vport->back;
8736 return min_t(u32, hdev->rss_size_max,
8737 vport->alloc_tqps / kinfo->num_tc);
8740 static void hclge_get_channels(struct hnae3_handle *handle,
8741 struct ethtool_channels *ch)
8743 ch->max_combined = hclge_get_max_channels(handle);
8744 ch->other_count = 1;
8746 ch->combined_count = handle->kinfo.rss_size;
8749 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8750 u16 *alloc_tqps, u16 *max_rss_size)
8752 struct hclge_vport *vport = hclge_get_vport(handle);
8753 struct hclge_dev *hdev = vport->back;
8755 *alloc_tqps = vport->alloc_tqps;
8756 *max_rss_size = hdev->rss_size_max;
8759 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8760 bool rxfh_configured)
8762 struct hclge_vport *vport = hclge_get_vport(handle);
8763 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8764 struct hclge_dev *hdev = vport->back;
8765 int cur_rss_size = kinfo->rss_size;
8766 int cur_tqps = kinfo->num_tqps;
8767 u16 tc_offset[HCLGE_MAX_TC_NUM];
8768 u16 tc_valid[HCLGE_MAX_TC_NUM];
8769 u16 tc_size[HCLGE_MAX_TC_NUM];
8774 kinfo->req_rss_size = new_tqps_num;
8776 ret = hclge_tm_vport_map_update(hdev);
8778 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8782 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8783 roundup_size = ilog2(roundup_size);
8784 /* Set the RSS TC mode according to the new RSS size */
8785 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8788 if (!(hdev->hw_tc_map & BIT(i)))
8792 tc_size[i] = roundup_size;
8793 tc_offset[i] = kinfo->rss_size * i;
8795 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8799 /* RSS indirection table has been configuared by user */
8800 if (rxfh_configured)
8803 /* Reinitializes the rss indirect table according to the new RSS size */
8804 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8808 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8809 rss_indir[i] = i % kinfo->rss_size;
8811 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8813 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8820 dev_info(&hdev->pdev->dev,
8821 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8822 cur_rss_size, kinfo->rss_size,
8823 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8828 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8829 u32 *regs_num_64_bit)
8831 struct hclge_desc desc;
8835 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8836 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8838 dev_err(&hdev->pdev->dev,
8839 "Query register number cmd failed, ret = %d.\n", ret);
8843 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8844 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8846 total_num = *regs_num_32_bit + *regs_num_64_bit;
8853 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8856 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8858 struct hclge_desc *desc;
8859 u32 *reg_val = data;
8868 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8869 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8873 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8874 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8876 dev_err(&hdev->pdev->dev,
8877 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8882 for (i = 0; i < cmd_num; i++) {
8884 desc_data = (__le32 *)(&desc[i].data[0]);
8885 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8887 desc_data = (__le32 *)(&desc[i]);
8888 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8890 for (k = 0; k < n; k++) {
8891 *reg_val++ = le32_to_cpu(*desc_data++);
8903 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8906 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8908 struct hclge_desc *desc;
8909 u64 *reg_val = data;
8918 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8919 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8923 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8924 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8926 dev_err(&hdev->pdev->dev,
8927 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8932 for (i = 0; i < cmd_num; i++) {
8934 desc_data = (__le64 *)(&desc[i].data[0]);
8935 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8937 desc_data = (__le64 *)(&desc[i]);
8938 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8940 for (k = 0; k < n; k++) {
8941 *reg_val++ = le64_to_cpu(*desc_data++);
8953 #define MAX_SEPARATE_NUM 4
8954 #define SEPARATOR_VALUE 0xFFFFFFFF
8955 #define REG_NUM_PER_LINE 4
8956 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8958 static int hclge_get_regs_len(struct hnae3_handle *handle)
8960 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8961 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8962 struct hclge_vport *vport = hclge_get_vport(handle);
8963 struct hclge_dev *hdev = vport->back;
8964 u32 regs_num_32_bit, regs_num_64_bit;
8967 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8969 dev_err(&hdev->pdev->dev,
8970 "Get register number failed, ret = %d.\n", ret);
8974 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8975 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8976 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8977 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8979 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8980 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8981 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8984 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8987 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8988 struct hclge_vport *vport = hclge_get_vport(handle);
8989 struct hclge_dev *hdev = vport->back;
8990 u32 regs_num_32_bit, regs_num_64_bit;
8991 int i, j, reg_um, separator_num;
8995 *version = hdev->fw_version;
8997 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8999 dev_err(&hdev->pdev->dev,
9000 "Get register number failed, ret = %d.\n", ret);
9004 /* fetching per-PF registers valus from PF PCIe register space */
9005 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9006 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9007 for (i = 0; i < reg_um; i++)
9008 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9009 for (i = 0; i < separator_num; i++)
9010 *reg++ = SEPARATOR_VALUE;
9012 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9013 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9014 for (i = 0; i < reg_um; i++)
9015 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9016 for (i = 0; i < separator_num; i++)
9017 *reg++ = SEPARATOR_VALUE;
9019 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9020 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9021 for (j = 0; j < kinfo->num_tqps; j++) {
9022 for (i = 0; i < reg_um; i++)
9023 *reg++ = hclge_read_dev(&hdev->hw,
9024 ring_reg_addr_list[i] +
9026 for (i = 0; i < separator_num; i++)
9027 *reg++ = SEPARATOR_VALUE;
9030 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9031 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9032 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9033 for (i = 0; i < reg_um; i++)
9034 *reg++ = hclge_read_dev(&hdev->hw,
9035 tqp_intr_reg_addr_list[i] +
9037 for (i = 0; i < separator_num; i++)
9038 *reg++ = SEPARATOR_VALUE;
9041 /* fetching PF common registers values from firmware */
9042 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9044 dev_err(&hdev->pdev->dev,
9045 "Get 32 bit register failed, ret = %d.\n", ret);
9049 reg += regs_num_32_bit;
9050 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9052 dev_err(&hdev->pdev->dev,
9053 "Get 64 bit register failed, ret = %d.\n", ret);
9056 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9058 struct hclge_set_led_state_cmd *req;
9059 struct hclge_desc desc;
9062 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9064 req = (struct hclge_set_led_state_cmd *)desc.data;
9065 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9066 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9068 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9070 dev_err(&hdev->pdev->dev,
9071 "Send set led state cmd error, ret =%d\n", ret);
9076 enum hclge_led_status {
9079 HCLGE_LED_NO_CHANGE = 0xFF,
9082 static int hclge_set_led_id(struct hnae3_handle *handle,
9083 enum ethtool_phys_id_state status)
9085 struct hclge_vport *vport = hclge_get_vport(handle);
9086 struct hclge_dev *hdev = vport->back;
9089 case ETHTOOL_ID_ACTIVE:
9090 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9091 case ETHTOOL_ID_INACTIVE:
9092 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9098 static void hclge_get_link_mode(struct hnae3_handle *handle,
9099 unsigned long *supported,
9100 unsigned long *advertising)
9102 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9103 struct hclge_vport *vport = hclge_get_vport(handle);
9104 struct hclge_dev *hdev = vport->back;
9105 unsigned int idx = 0;
9107 for (; idx < size; idx++) {
9108 supported[idx] = hdev->hw.mac.supported[idx];
9109 advertising[idx] = hdev->hw.mac.advertising[idx];
9113 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9115 struct hclge_vport *vport = hclge_get_vport(handle);
9116 struct hclge_dev *hdev = vport->back;
9118 return hclge_config_gro(hdev, enable);
9121 static const struct hnae3_ae_ops hclge_ops = {
9122 .init_ae_dev = hclge_init_ae_dev,
9123 .uninit_ae_dev = hclge_uninit_ae_dev,
9124 .flr_prepare = hclge_flr_prepare,
9125 .flr_done = hclge_flr_done,
9126 .init_client_instance = hclge_init_client_instance,
9127 .uninit_client_instance = hclge_uninit_client_instance,
9128 .map_ring_to_vector = hclge_map_ring_to_vector,
9129 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9130 .get_vector = hclge_get_vector,
9131 .put_vector = hclge_put_vector,
9132 .set_promisc_mode = hclge_set_promisc_mode,
9133 .set_loopback = hclge_set_loopback,
9134 .start = hclge_ae_start,
9135 .stop = hclge_ae_stop,
9136 .client_start = hclge_client_start,
9137 .client_stop = hclge_client_stop,
9138 .get_status = hclge_get_status,
9139 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9140 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9141 .get_media_type = hclge_get_media_type,
9142 .check_port_speed = hclge_check_port_speed,
9143 .get_fec = hclge_get_fec,
9144 .set_fec = hclge_set_fec,
9145 .get_rss_key_size = hclge_get_rss_key_size,
9146 .get_rss_indir_size = hclge_get_rss_indir_size,
9147 .get_rss = hclge_get_rss,
9148 .set_rss = hclge_set_rss,
9149 .set_rss_tuple = hclge_set_rss_tuple,
9150 .get_rss_tuple = hclge_get_rss_tuple,
9151 .get_tc_size = hclge_get_tc_size,
9152 .get_mac_addr = hclge_get_mac_addr,
9153 .set_mac_addr = hclge_set_mac_addr,
9154 .do_ioctl = hclge_do_ioctl,
9155 .add_uc_addr = hclge_add_uc_addr,
9156 .rm_uc_addr = hclge_rm_uc_addr,
9157 .add_mc_addr = hclge_add_mc_addr,
9158 .rm_mc_addr = hclge_rm_mc_addr,
9159 .set_autoneg = hclge_set_autoneg,
9160 .get_autoneg = hclge_get_autoneg,
9161 .restart_autoneg = hclge_restart_autoneg,
9162 .get_pauseparam = hclge_get_pauseparam,
9163 .set_pauseparam = hclge_set_pauseparam,
9164 .set_mtu = hclge_set_mtu,
9165 .reset_queue = hclge_reset_tqp,
9166 .get_stats = hclge_get_stats,
9167 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9168 .update_stats = hclge_update_stats,
9169 .get_strings = hclge_get_strings,
9170 .get_sset_count = hclge_get_sset_count,
9171 .get_fw_version = hclge_get_fw_version,
9172 .get_mdix_mode = hclge_get_mdix_mode,
9173 .enable_vlan_filter = hclge_enable_vlan_filter,
9174 .set_vlan_filter = hclge_set_vlan_filter,
9175 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9176 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9177 .reset_event = hclge_reset_event,
9178 .set_default_reset_request = hclge_set_def_reset_request,
9179 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9180 .set_channels = hclge_set_channels,
9181 .get_channels = hclge_get_channels,
9182 .get_regs_len = hclge_get_regs_len,
9183 .get_regs = hclge_get_regs,
9184 .set_led_id = hclge_set_led_id,
9185 .get_link_mode = hclge_get_link_mode,
9186 .add_fd_entry = hclge_add_fd_entry,
9187 .del_fd_entry = hclge_del_fd_entry,
9188 .del_all_fd_entries = hclge_del_all_fd_entries,
9189 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9190 .get_fd_rule_info = hclge_get_fd_rule_info,
9191 .get_fd_all_rules = hclge_get_all_rules,
9192 .restore_fd_rules = hclge_restore_fd_entries,
9193 .enable_fd = hclge_enable_fd,
9194 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9195 .dbg_run_cmd = hclge_dbg_run_cmd,
9196 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9197 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9198 .ae_dev_resetting = hclge_ae_dev_resetting,
9199 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9200 .set_gro_en = hclge_gro_en,
9201 .get_global_queue_id = hclge_covert_handle_qid_global,
9202 .set_timer_task = hclge_set_timer_task,
9203 .mac_connect_phy = hclge_mac_connect_phy,
9204 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9207 static struct hnae3_ae_algo ae_algo = {
9209 .pdev_id_table = ae_algo_pci_tbl,
9212 static int hclge_init(void)
9214 pr_info("%s is initializing\n", HCLGE_NAME);
9216 hnae3_register_ae_algo(&ae_algo);
9221 static void hclge_exit(void)
9223 hnae3_unregister_ae_algo(&ae_algo);
9225 module_init(hclge_init);
9226 module_exit(hclge_exit);
9228 MODULE_LICENSE("GPL");
9229 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9230 MODULE_DESCRIPTION("HCLGE Driver");
9231 MODULE_VERSION(HCLGE_MOD_VERSION);