1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
36 u16 *allocated_size, bool is_alloc);
38 static struct hnae3_ae_algo ae_algo;
40 static const struct pci_device_id ae_algo_pci_tbl[] = {
41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
48 /* required last entry */
52 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
55 HCLGE_CMDQ_TX_ADDR_H_REG,
56 HCLGE_CMDQ_TX_DEPTH_REG,
57 HCLGE_CMDQ_TX_TAIL_REG,
58 HCLGE_CMDQ_TX_HEAD_REG,
59 HCLGE_CMDQ_RX_ADDR_L_REG,
60 HCLGE_CMDQ_RX_ADDR_H_REG,
61 HCLGE_CMDQ_RX_DEPTH_REG,
62 HCLGE_CMDQ_RX_TAIL_REG,
63 HCLGE_CMDQ_RX_HEAD_REG,
64 HCLGE_VECTOR0_CMDQ_SRC_REG,
65 HCLGE_CMDQ_INTR_STS_REG,
66 HCLGE_CMDQ_INTR_EN_REG,
67 HCLGE_CMDQ_INTR_GEN_REG};
69 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
70 HCLGE_VECTOR0_OTER_EN_REG,
71 HCLGE_MISC_RESET_STS_REG,
72 HCLGE_MISC_VECTOR_INT_STS,
73 HCLGE_GLOBAL_RESET_REG,
77 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
78 HCLGE_RING_RX_ADDR_H_REG,
79 HCLGE_RING_RX_BD_NUM_REG,
80 HCLGE_RING_RX_BD_LENGTH_REG,
81 HCLGE_RING_RX_MERGE_EN_REG,
82 HCLGE_RING_RX_TAIL_REG,
83 HCLGE_RING_RX_HEAD_REG,
84 HCLGE_RING_RX_FBD_NUM_REG,
85 HCLGE_RING_RX_OFFSET_REG,
86 HCLGE_RING_RX_FBD_OFFSET_REG,
87 HCLGE_RING_RX_STASH_REG,
88 HCLGE_RING_RX_BD_ERR_REG,
89 HCLGE_RING_TX_ADDR_L_REG,
90 HCLGE_RING_TX_ADDR_H_REG,
91 HCLGE_RING_TX_BD_NUM_REG,
92 HCLGE_RING_TX_PRIORITY_REG,
94 HCLGE_RING_TX_MERGE_EN_REG,
95 HCLGE_RING_TX_TAIL_REG,
96 HCLGE_RING_TX_HEAD_REG,
97 HCLGE_RING_TX_FBD_NUM_REG,
98 HCLGE_RING_TX_OFFSET_REG,
99 HCLGE_RING_TX_EBD_NUM_REG,
100 HCLGE_RING_TX_EBD_OFFSET_REG,
101 HCLGE_RING_TX_BD_ERR_REG,
104 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
105 HCLGE_TQP_INTR_GL0_REG,
106 HCLGE_TQP_INTR_GL1_REG,
107 HCLGE_TQP_INTR_GL2_REG,
108 HCLGE_TQP_INTR_RL_REG};
110 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
112 "Serdes serial Loopback test",
113 "Serdes parallel Loopback test",
117 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
118 {"mac_tx_mac_pause_num",
119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
120 {"mac_rx_mac_pause_num",
121 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
122 {"mac_tx_control_pkt_num",
123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
124 {"mac_rx_control_pkt_num",
125 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
126 {"mac_tx_pfc_pkt_num",
127 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
128 {"mac_tx_pfc_pri0_pkt_num",
129 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
130 {"mac_tx_pfc_pri1_pkt_num",
131 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
132 {"mac_tx_pfc_pri2_pkt_num",
133 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
134 {"mac_tx_pfc_pri3_pkt_num",
135 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
136 {"mac_tx_pfc_pri4_pkt_num",
137 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
138 {"mac_tx_pfc_pri5_pkt_num",
139 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
140 {"mac_tx_pfc_pri6_pkt_num",
141 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
142 {"mac_tx_pfc_pri7_pkt_num",
143 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
144 {"mac_rx_pfc_pkt_num",
145 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
146 {"mac_rx_pfc_pri0_pkt_num",
147 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
148 {"mac_rx_pfc_pri1_pkt_num",
149 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
150 {"mac_rx_pfc_pri2_pkt_num",
151 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
152 {"mac_rx_pfc_pri3_pkt_num",
153 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
154 {"mac_rx_pfc_pri4_pkt_num",
155 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
156 {"mac_rx_pfc_pri5_pkt_num",
157 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
158 {"mac_rx_pfc_pri6_pkt_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
160 {"mac_rx_pfc_pri7_pkt_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
162 {"mac_tx_total_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
164 {"mac_tx_total_oct_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
166 {"mac_tx_good_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
168 {"mac_tx_bad_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
170 {"mac_tx_good_oct_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
172 {"mac_tx_bad_oct_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
174 {"mac_tx_uni_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
176 {"mac_tx_multi_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
178 {"mac_tx_broad_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
180 {"mac_tx_undersize_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
182 {"mac_tx_oversize_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
184 {"mac_tx_64_oct_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
186 {"mac_tx_65_127_oct_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
188 {"mac_tx_128_255_oct_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
190 {"mac_tx_256_511_oct_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
192 {"mac_tx_512_1023_oct_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
194 {"mac_tx_1024_1518_oct_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
196 {"mac_tx_1519_2047_oct_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
198 {"mac_tx_2048_4095_oct_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
200 {"mac_tx_4096_8191_oct_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
202 {"mac_tx_8192_9216_oct_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
204 {"mac_tx_9217_12287_oct_pkt_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
206 {"mac_tx_12288_16383_oct_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
208 {"mac_tx_1519_max_good_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
210 {"mac_tx_1519_max_bad_pkt_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
212 {"mac_rx_total_pkt_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
214 {"mac_rx_total_oct_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
216 {"mac_rx_good_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
218 {"mac_rx_bad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
220 {"mac_rx_good_oct_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
222 {"mac_rx_bad_oct_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
224 {"mac_rx_uni_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
226 {"mac_rx_multi_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
228 {"mac_rx_broad_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
230 {"mac_rx_undersize_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
232 {"mac_rx_oversize_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
234 {"mac_rx_64_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
236 {"mac_rx_65_127_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
238 {"mac_rx_128_255_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
240 {"mac_rx_256_511_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
242 {"mac_rx_512_1023_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
244 {"mac_rx_1024_1518_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
246 {"mac_rx_1519_2047_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
248 {"mac_rx_2048_4095_oct_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
250 {"mac_rx_4096_8191_oct_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
252 {"mac_rx_8192_9216_oct_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
254 {"mac_rx_9217_12287_oct_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
256 {"mac_rx_12288_16383_oct_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
258 {"mac_rx_1519_max_good_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
260 {"mac_rx_1519_max_bad_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
263 {"mac_tx_fragment_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
265 {"mac_tx_undermin_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
267 {"mac_tx_jabber_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
269 {"mac_tx_err_all_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
271 {"mac_tx_from_app_good_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
273 {"mac_tx_from_app_bad_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
275 {"mac_rx_fragment_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
277 {"mac_rx_undermin_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
279 {"mac_rx_jabber_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
281 {"mac_rx_fcs_err_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
283 {"mac_rx_send_app_good_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
285 {"mac_rx_send_app_bad_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
289 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
292 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
293 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
294 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
295 .i_port_bitmap = 0x1,
299 static const u8 hclge_hash_key[] = {
300 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
301 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
302 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
303 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
304 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
307 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
309 #define HCLGE_MAC_CMD_NUM 21
311 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
312 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
317 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
318 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320 dev_err(&hdev->pdev->dev,
321 "Get MAC pkt stats fail, status = %d.\n", ret);
326 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
327 /* for special opcode 0032, only the first desc has the head */
328 if (unlikely(i == 0)) {
329 desc_data = (__le64 *)(&desc[i].data[0]);
330 n = HCLGE_RD_FIRST_STATS_NUM;
332 desc_data = (__le64 *)(&desc[i]);
333 n = HCLGE_RD_OTHER_STATS_NUM;
336 for (k = 0; k < n; k++) {
337 *data += le64_to_cpu(*desc_data);
346 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
349 struct hclge_desc *desc;
354 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
357 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
358 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
364 for (i = 0; i < desc_num; i++) {
365 /* for special opcode 0034, only the first desc has the head */
367 desc_data = (__le64 *)(&desc[i].data[0]);
368 n = HCLGE_RD_FIRST_STATS_NUM;
370 desc_data = (__le64 *)(&desc[i]);
371 n = HCLGE_RD_OTHER_STATS_NUM;
374 for (k = 0; k < n; k++) {
375 *data += le64_to_cpu(*desc_data);
386 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388 struct hclge_desc desc;
393 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
394 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398 desc_data = (__le32 *)(&desc.data[0]);
399 reg_num = le32_to_cpu(*desc_data);
401 *desc_num = 1 + ((reg_num - 3) >> 2) +
402 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
407 static int hclge_mac_update_stats(struct hclge_dev *hdev)
412 ret = hclge_mac_query_reg_num(hdev, &desc_num);
414 /* The firmware supports the new statistics acquisition method */
416 ret = hclge_mac_update_stats_complete(hdev, desc_num);
417 else if (ret == -EOPNOTSUPP)
418 ret = hclge_mac_update_stats_defective(hdev);
420 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
425 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
428 struct hclge_vport *vport = hclge_get_vport(handle);
429 struct hclge_dev *hdev = vport->back;
430 struct hnae3_queue *queue;
431 struct hclge_desc desc[1];
432 struct hclge_tqp *tqp;
435 for (i = 0; i < kinfo->num_tqps; i++) {
436 queue = handle->kinfo.tqp[i];
437 tqp = container_of(queue, struct hclge_tqp, q);
438 /* command : HCLGE_OPC_QUERY_IGU_STAT */
439 hclge_cmd_setup_basic_desc(&desc[0],
440 HCLGE_OPC_QUERY_RX_STATUS,
443 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
444 ret = hclge_cmd_send(&hdev->hw, desc, 1);
446 dev_err(&hdev->pdev->dev,
447 "Query tqp stat fail, status = %d,queue = %d\n",
451 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
452 le32_to_cpu(desc[0].data[1]);
455 for (i = 0; i < kinfo->num_tqps; i++) {
456 queue = handle->kinfo.tqp[i];
457 tqp = container_of(queue, struct hclge_tqp, q);
458 /* command : HCLGE_OPC_QUERY_IGU_STAT */
459 hclge_cmd_setup_basic_desc(&desc[0],
460 HCLGE_OPC_QUERY_TX_STATUS,
463 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
464 ret = hclge_cmd_send(&hdev->hw, desc, 1);
466 dev_err(&hdev->pdev->dev,
467 "Query tqp stat fail, status = %d,queue = %d\n",
471 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
472 le32_to_cpu(desc[0].data[1]);
478 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
481 struct hclge_tqp *tqp;
485 for (i = 0; i < kinfo->num_tqps; i++) {
486 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
487 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
490 for (i = 0; i < kinfo->num_tqps; i++) {
491 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
492 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
498 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502 return kinfo->num_tqps * (2);
505 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
511 for (i = 0; i < kinfo->num_tqps; i++) {
512 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
513 struct hclge_tqp, q);
514 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
516 buff = buff + ETH_GSTRING_LEN;
519 for (i = 0; i < kinfo->num_tqps; i++) {
520 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
521 struct hclge_tqp, q);
522 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
524 buff = buff + ETH_GSTRING_LEN;
530 static u64 *hclge_comm_get_stats(void *comm_stats,
531 const struct hclge_comm_stats_str strs[],
537 for (i = 0; i < size; i++)
538 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
543 static u8 *hclge_comm_get_strings(u32 stringset,
544 const struct hclge_comm_stats_str strs[],
547 char *buff = (char *)data;
550 if (stringset != ETH_SS_STATS)
553 for (i = 0; i < size; i++) {
554 snprintf(buff, ETH_GSTRING_LEN,
556 buff = buff + ETH_GSTRING_LEN;
562 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564 struct hnae3_handle *handle;
567 handle = &hdev->vport[0].nic;
568 if (handle->client) {
569 status = hclge_tqps_update_stats(handle);
571 dev_err(&hdev->pdev->dev,
572 "Update TQPS stats fail, status = %d.\n",
577 status = hclge_mac_update_stats(hdev);
579 dev_err(&hdev->pdev->dev,
580 "Update MAC stats fail, status = %d.\n", status);
583 static void hclge_update_stats(struct hnae3_handle *handle,
584 struct net_device_stats *net_stats)
586 struct hclge_vport *vport = hclge_get_vport(handle);
587 struct hclge_dev *hdev = vport->back;
590 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
593 status = hclge_mac_update_stats(hdev);
595 dev_err(&hdev->pdev->dev,
596 "Update MAC stats fail, status = %d.\n",
599 status = hclge_tqps_update_stats(handle);
601 dev_err(&hdev->pdev->dev,
602 "Update TQPS stats fail, status = %d.\n",
605 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
608 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
611 HNAE3_SUPPORT_PHY_LOOPBACK |\
612 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
613 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
615 struct hclge_vport *vport = hclge_get_vport(handle);
616 struct hclge_dev *hdev = vport->back;
619 /* Loopback test support rules:
620 * mac: only GE mode support
621 * serdes: all mac mode will support include GE/XGE/LGE/CGE
622 * phy: only support when phy device exist on board
624 if (stringset == ETH_SS_TEST) {
625 /* clear loopback bit flags at first */
626 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
627 if (hdev->pdev->revision >= 0x21 ||
628 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
629 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
630 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
636 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
637 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
638 } else if (stringset == ETH_SS_STATS) {
639 count = ARRAY_SIZE(g_mac_stats_string) +
640 hclge_tqps_get_sset_count(handle, stringset);
646 static void hclge_get_strings(struct hnae3_handle *handle,
650 u8 *p = (char *)data;
653 if (stringset == ETH_SS_STATS) {
654 size = ARRAY_SIZE(g_mac_stats_string);
655 p = hclge_comm_get_strings(stringset,
659 p = hclge_tqps_get_strings(handle, p);
660 } else if (stringset == ETH_SS_TEST) {
661 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
663 hns3_nic_test_strs[HNAE3_LOOP_APP],
665 p += ETH_GSTRING_LEN;
667 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
669 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671 p += ETH_GSTRING_LEN;
673 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
677 p += ETH_GSTRING_LEN;
679 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681 hns3_nic_test_strs[HNAE3_LOOP_PHY],
683 p += ETH_GSTRING_LEN;
688 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690 struct hclge_vport *vport = hclge_get_vport(handle);
691 struct hclge_dev *hdev = vport->back;
694 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696 ARRAY_SIZE(g_mac_stats_string),
698 p = hclge_tqps_get_stats(handle, p);
701 static int hclge_parse_func_status(struct hclge_dev *hdev,
702 struct hclge_func_status_cmd *status)
704 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
707 /* Set the pf to main pf */
708 if (status->pf_state & HCLGE_PF_STATE_MAIN)
709 hdev->flag |= HCLGE_FLAG_MAIN;
711 hdev->flag &= ~HCLGE_FLAG_MAIN;
716 static int hclge_query_function_status(struct hclge_dev *hdev)
718 struct hclge_func_status_cmd *req;
719 struct hclge_desc desc;
723 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
724 req = (struct hclge_func_status_cmd *)desc.data;
727 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
729 dev_err(&hdev->pdev->dev,
730 "query function status failed %d.\n",
736 /* Check pf reset is done */
739 usleep_range(1000, 2000);
740 } while (timeout++ < 5);
742 ret = hclge_parse_func_status(hdev, req);
747 static int hclge_query_pf_resource(struct hclge_dev *hdev)
749 struct hclge_pf_res_cmd *req;
750 struct hclge_desc desc;
753 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
754 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
756 dev_err(&hdev->pdev->dev,
757 "query pf resource failed %d.\n", ret);
761 req = (struct hclge_pf_res_cmd *)desc.data;
762 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
763 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
765 if (req->tx_buf_size)
767 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
769 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
771 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
773 if (req->dv_buf_size)
775 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
777 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
779 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
781 if (hnae3_dev_roce_supported(hdev)) {
782 hdev->roce_base_msix_offset =
783 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
784 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
786 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
787 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
789 /* PF should have NIC vectors and Roce vectors,
790 * NIC vectors are queued before Roce vectors.
792 hdev->num_msi = hdev->num_roce_msi +
793 hdev->roce_base_msix_offset;
796 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
797 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
803 static int hclge_parse_speed(int speed_cmd, int *speed)
807 *speed = HCLGE_MAC_SPEED_10M;
810 *speed = HCLGE_MAC_SPEED_100M;
813 *speed = HCLGE_MAC_SPEED_1G;
816 *speed = HCLGE_MAC_SPEED_10G;
819 *speed = HCLGE_MAC_SPEED_25G;
822 *speed = HCLGE_MAC_SPEED_40G;
825 *speed = HCLGE_MAC_SPEED_50G;
828 *speed = HCLGE_MAC_SPEED_100G;
837 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
840 unsigned long *supported = hdev->hw.mac.supported;
842 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
843 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
846 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
847 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
850 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
851 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
854 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
855 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
858 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
859 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
862 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
863 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
866 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
869 unsigned long *supported = hdev->hw.mac.supported;
871 /* default to support all speed for GE port */
873 speed_ability = HCLGE_SUPPORT_GE;
875 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
876 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
879 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
880 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
882 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
886 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
887 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
888 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
891 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
892 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
893 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
896 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
898 u8 media_type = hdev->hw.mac.media_type;
900 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
901 hclge_parse_fiber_link_mode(hdev, speed_ability);
902 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
903 hclge_parse_copper_link_mode(hdev, speed_ability);
906 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
908 struct hclge_cfg_param_cmd *req;
909 u64 mac_addr_tmp_high;
913 req = (struct hclge_cfg_param_cmd *)desc[0].data;
915 /* get the configuration */
916 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
919 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
920 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
921 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
922 HCLGE_CFG_TQP_DESC_N_M,
923 HCLGE_CFG_TQP_DESC_N_S);
925 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
926 HCLGE_CFG_PHY_ADDR_M,
927 HCLGE_CFG_PHY_ADDR_S);
928 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
929 HCLGE_CFG_MEDIA_TP_M,
930 HCLGE_CFG_MEDIA_TP_S);
931 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
932 HCLGE_CFG_RX_BUF_LEN_M,
933 HCLGE_CFG_RX_BUF_LEN_S);
934 /* get mac_address */
935 mac_addr_tmp = __le32_to_cpu(req->param[2]);
936 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
937 HCLGE_CFG_MAC_ADDR_H_M,
938 HCLGE_CFG_MAC_ADDR_H_S);
940 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
942 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
943 HCLGE_CFG_DEFAULT_SPEED_M,
944 HCLGE_CFG_DEFAULT_SPEED_S);
945 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
946 HCLGE_CFG_RSS_SIZE_M,
947 HCLGE_CFG_RSS_SIZE_S);
949 for (i = 0; i < ETH_ALEN; i++)
950 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
952 req = (struct hclge_cfg_param_cmd *)desc[1].data;
953 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
955 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
956 HCLGE_CFG_SPEED_ABILITY_M,
957 HCLGE_CFG_SPEED_ABILITY_S);
958 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
959 HCLGE_CFG_UMV_TBL_SPACE_M,
960 HCLGE_CFG_UMV_TBL_SPACE_S);
962 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
965 /* hclge_get_cfg: query the static parameter from flash
966 * @hdev: pointer to struct hclge_dev
967 * @hcfg: the config structure to be getted
969 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
971 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
972 struct hclge_cfg_param_cmd *req;
975 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
978 req = (struct hclge_cfg_param_cmd *)desc[i].data;
979 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
981 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
982 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
983 /* Len should be united by 4 bytes when send to hardware */
984 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
985 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
986 req->offset = cpu_to_le32(offset);
989 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
991 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
995 hclge_parse_cfg(hcfg, desc);
1000 static int hclge_get_cap(struct hclge_dev *hdev)
1004 ret = hclge_query_function_status(hdev);
1006 dev_err(&hdev->pdev->dev,
1007 "query function status error %d.\n", ret);
1011 /* get pf resource */
1012 ret = hclge_query_pf_resource(hdev);
1014 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1019 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1021 #define HCLGE_MIN_TX_DESC 64
1022 #define HCLGE_MIN_RX_DESC 64
1024 if (!is_kdump_kernel())
1027 dev_info(&hdev->pdev->dev,
1028 "Running kdump kernel. Using minimal resources\n");
1030 /* minimal queue pairs equals to the number of vports */
1031 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1032 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1033 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1036 static int hclge_configure(struct hclge_dev *hdev)
1038 struct hclge_cfg cfg;
1041 ret = hclge_get_cfg(hdev, &cfg);
1043 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1047 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1048 hdev->base_tqp_pid = 0;
1049 hdev->rss_size_max = cfg.rss_size_max;
1050 hdev->rx_buf_len = cfg.rx_buf_len;
1051 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1052 hdev->hw.mac.media_type = cfg.media_type;
1053 hdev->hw.mac.phy_addr = cfg.phy_addr;
1054 hdev->num_tx_desc = cfg.tqp_desc_num;
1055 hdev->num_rx_desc = cfg.tqp_desc_num;
1056 hdev->tm_info.num_pg = 1;
1057 hdev->tc_max = cfg.tc_num;
1058 hdev->tm_info.hw_pfc_map = 0;
1059 hdev->wanted_umv_size = cfg.umv_space;
1061 if (hnae3_dev_fd_supported(hdev))
1064 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1066 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1070 hclge_parse_link_mode(hdev, cfg.speed_ability);
1072 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1073 (hdev->tc_max < 1)) {
1074 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1079 /* Dev does not support DCB */
1080 if (!hnae3_dev_dcb_supported(hdev)) {
1084 hdev->pfc_max = hdev->tc_max;
1087 hdev->tm_info.num_tc = 1;
1089 /* Currently not support uncontiuous tc */
1090 for (i = 0; i < hdev->tm_info.num_tc; i++)
1091 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1093 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1095 hclge_init_kdump_kernel_config(hdev);
1100 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1103 struct hclge_cfg_tso_status_cmd *req;
1104 struct hclge_desc desc;
1107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1109 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1112 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1113 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1114 req->tso_mss_min = cpu_to_le16(tso_mss);
1117 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1118 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1119 req->tso_mss_max = cpu_to_le16(tso_mss);
1121 return hclge_cmd_send(&hdev->hw, &desc, 1);
1124 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1126 struct hclge_cfg_gro_status_cmd *req;
1127 struct hclge_desc desc;
1130 if (!hnae3_dev_gro_supported(hdev))
1133 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1134 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1136 req->gro_en = cpu_to_le16(en ? 1 : 0);
1138 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1140 dev_err(&hdev->pdev->dev,
1141 "GRO hardware config cmd failed, ret = %d\n", ret);
1146 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1148 struct hclge_tqp *tqp;
1151 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1152 sizeof(struct hclge_tqp), GFP_KERNEL);
1158 for (i = 0; i < hdev->num_tqps; i++) {
1159 tqp->dev = &hdev->pdev->dev;
1162 tqp->q.ae_algo = &ae_algo;
1163 tqp->q.buf_size = hdev->rx_buf_len;
1164 tqp->q.tx_desc_num = hdev->num_tx_desc;
1165 tqp->q.rx_desc_num = hdev->num_rx_desc;
1166 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1167 i * HCLGE_TQP_REG_SIZE;
1175 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1176 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1178 struct hclge_tqp_map_cmd *req;
1179 struct hclge_desc desc;
1182 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1184 req = (struct hclge_tqp_map_cmd *)desc.data;
1185 req->tqp_id = cpu_to_le16(tqp_pid);
1186 req->tqp_vf = func_id;
1187 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1188 1 << HCLGE_TQP_MAP_EN_B;
1189 req->tqp_vid = cpu_to_le16(tqp_vid);
1191 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1193 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1198 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1200 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1201 struct hclge_dev *hdev = vport->back;
1204 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1205 alloced < num_tqps; i++) {
1206 if (!hdev->htqp[i].alloced) {
1207 hdev->htqp[i].q.handle = &vport->nic;
1208 hdev->htqp[i].q.tqp_index = alloced;
1209 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1210 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1211 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1212 hdev->htqp[i].alloced = true;
1216 vport->alloc_tqps = alloced;
1217 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1218 vport->alloc_tqps / hdev->tm_info.num_tc);
1223 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1224 u16 num_tx_desc, u16 num_rx_desc)
1227 struct hnae3_handle *nic = &vport->nic;
1228 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1229 struct hclge_dev *hdev = vport->back;
1232 kinfo->num_tx_desc = num_tx_desc;
1233 kinfo->num_rx_desc = num_rx_desc;
1235 kinfo->rx_buf_len = hdev->rx_buf_len;
1237 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1238 sizeof(struct hnae3_queue *), GFP_KERNEL);
1242 ret = hclge_assign_tqp(vport, num_tqps);
1244 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1249 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1250 struct hclge_vport *vport)
1252 struct hnae3_handle *nic = &vport->nic;
1253 struct hnae3_knic_private_info *kinfo;
1256 kinfo = &nic->kinfo;
1257 for (i = 0; i < vport->alloc_tqps; i++) {
1258 struct hclge_tqp *q =
1259 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1263 is_pf = !(vport->vport_id);
1264 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1273 static int hclge_map_tqp(struct hclge_dev *hdev)
1275 struct hclge_vport *vport = hdev->vport;
1278 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1279 for (i = 0; i < num_vport; i++) {
1282 ret = hclge_map_tqp_to_vport(hdev, vport);
1292 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1294 /* this would be initialized later */
1297 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1299 struct hnae3_handle *nic = &vport->nic;
1300 struct hclge_dev *hdev = vport->back;
1303 nic->pdev = hdev->pdev;
1304 nic->ae_algo = &ae_algo;
1305 nic->numa_node_mask = hdev->numa_node_mask;
1307 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1308 ret = hclge_knic_setup(vport, num_tqps,
1309 hdev->num_tx_desc, hdev->num_rx_desc);
1312 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1317 hclge_unic_setup(vport, num_tqps);
1323 static int hclge_alloc_vport(struct hclge_dev *hdev)
1325 struct pci_dev *pdev = hdev->pdev;
1326 struct hclge_vport *vport;
1332 /* We need to alloc a vport for main NIC of PF */
1333 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1335 if (hdev->num_tqps < num_vport) {
1336 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1337 hdev->num_tqps, num_vport);
1341 /* Alloc the same number of TQPs for every vport */
1342 tqp_per_vport = hdev->num_tqps / num_vport;
1343 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1345 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1350 hdev->vport = vport;
1351 hdev->num_alloc_vport = num_vport;
1353 if (IS_ENABLED(CONFIG_PCI_IOV))
1354 hdev->num_alloc_vfs = hdev->num_req_vfs;
1356 for (i = 0; i < num_vport; i++) {
1358 vport->vport_id = i;
1359 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1360 INIT_LIST_HEAD(&vport->vlan_list);
1361 INIT_LIST_HEAD(&vport->uc_mac_list);
1362 INIT_LIST_HEAD(&vport->mc_mac_list);
1365 ret = hclge_vport_setup(vport, tqp_main_vport);
1367 ret = hclge_vport_setup(vport, tqp_per_vport);
1370 "vport setup failed for vport %d, %d\n",
1381 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1382 struct hclge_pkt_buf_alloc *buf_alloc)
1384 /* TX buffer size is unit by 128 byte */
1385 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1386 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1387 struct hclge_tx_buff_alloc_cmd *req;
1388 struct hclge_desc desc;
1392 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1395 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1396 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1398 req->tx_pkt_buff[i] =
1399 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1400 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1403 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1405 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1411 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1412 struct hclge_pkt_buf_alloc *buf_alloc)
1414 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1417 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1422 static int hclge_get_tc_num(struct hclge_dev *hdev)
1426 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1427 if (hdev->hw_tc_map & BIT(i))
1432 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1436 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1437 if (hdev->hw_tc_map & BIT(i) &&
1438 hdev->tm_info.hw_pfc_map & BIT(i))
1443 /* Get the number of pfc enabled TCs, which have private buffer */
1444 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1445 struct hclge_pkt_buf_alloc *buf_alloc)
1447 struct hclge_priv_buf *priv;
1450 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1451 priv = &buf_alloc->priv_buf[i];
1452 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1460 /* Get the number of pfc disabled TCs, which have private buffer */
1461 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1462 struct hclge_pkt_buf_alloc *buf_alloc)
1464 struct hclge_priv_buf *priv;
1467 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1468 priv = &buf_alloc->priv_buf[i];
1469 if (hdev->hw_tc_map & BIT(i) &&
1470 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1478 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1480 struct hclge_priv_buf *priv;
1484 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1485 priv = &buf_alloc->priv_buf[i];
1487 rx_priv += priv->buf_size;
1492 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1494 u32 i, total_tx_size = 0;
1496 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1497 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1499 return total_tx_size;
1502 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1503 struct hclge_pkt_buf_alloc *buf_alloc,
1506 u32 shared_buf_min, shared_buf_tc, shared_std;
1507 int tc_num, pfc_enable_num;
1508 u32 shared_buf, aligned_mps;
1512 tc_num = hclge_get_tc_num(hdev);
1513 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1514 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1516 if (hnae3_dev_dcb_supported(hdev))
1517 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1519 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1520 + hdev->dv_buf_size;
1522 shared_buf_tc = pfc_enable_num * aligned_mps +
1523 (tc_num - pfc_enable_num) * aligned_mps / 2 +
1525 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1526 HCLGE_BUF_SIZE_UNIT);
1528 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1529 if (rx_all < rx_priv + shared_std)
1532 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1533 buf_alloc->s_buf.buf_size = shared_buf;
1534 if (hnae3_dev_dcb_supported(hdev)) {
1535 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1536 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1537 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1539 buf_alloc->s_buf.self.high = aligned_mps +
1540 HCLGE_NON_DCB_ADDITIONAL_BUF;
1541 buf_alloc->s_buf.self.low =
1542 roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1545 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1546 if ((hdev->hw_tc_map & BIT(i)) &&
1547 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1548 buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1549 buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1551 buf_alloc->s_buf.tc_thrd[i].low = 0;
1552 buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1559 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1560 struct hclge_pkt_buf_alloc *buf_alloc)
1564 total_size = hdev->pkt_buf_size;
1566 /* alloc tx buffer for all enabled tc */
1567 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1568 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1570 if (hdev->hw_tc_map & BIT(i)) {
1571 if (total_size < hdev->tx_buf_size)
1574 priv->tx_buf_size = hdev->tx_buf_size;
1576 priv->tx_buf_size = 0;
1579 total_size -= priv->tx_buf_size;
1585 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1589 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1592 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1593 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1600 if (!(hdev->hw_tc_map & BIT(i)))
1605 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1606 priv->wl.low = max ? aligned_mps : 256;
1607 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1608 HCLGE_BUF_SIZE_UNIT);
1611 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1614 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1617 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1620 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1621 struct hclge_pkt_buf_alloc *buf_alloc)
1623 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1624 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1627 /* let the last to be cleared first */
1628 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1629 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1631 if (hdev->hw_tc_map & BIT(i) &&
1632 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1633 /* Clear the no pfc TC private buffer */
1641 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1642 no_pfc_priv_num == 0)
1646 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1649 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1650 struct hclge_pkt_buf_alloc *buf_alloc)
1652 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1653 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1656 /* let the last to be cleared first */
1657 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1658 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1660 if (hdev->hw_tc_map & BIT(i) &&
1661 hdev->tm_info.hw_pfc_map & BIT(i)) {
1662 /* Reduce the number of pfc TC with private buffer */
1670 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1675 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1678 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1679 * @hdev: pointer to struct hclge_dev
1680 * @buf_alloc: pointer to buffer calculation data
1681 * @return: 0: calculate sucessful, negative: fail
1683 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1684 struct hclge_pkt_buf_alloc *buf_alloc)
1686 /* When DCB is not supported, rx private buffer is not allocated. */
1687 if (!hnae3_dev_dcb_supported(hdev)) {
1688 u32 rx_all = hdev->pkt_buf_size;
1690 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1691 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1697 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1700 /* try to decrease the buffer size */
1701 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1704 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1707 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1713 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1714 struct hclge_pkt_buf_alloc *buf_alloc)
1716 struct hclge_rx_priv_buff_cmd *req;
1717 struct hclge_desc desc;
1721 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1722 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1724 /* Alloc private buffer TCs */
1725 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1729 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1731 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1735 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1736 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1738 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1740 dev_err(&hdev->pdev->dev,
1741 "rx private buffer alloc cmd failed %d\n", ret);
1746 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1747 struct hclge_pkt_buf_alloc *buf_alloc)
1749 struct hclge_rx_priv_wl_buf *req;
1750 struct hclge_priv_buf *priv;
1751 struct hclge_desc desc[2];
1755 for (i = 0; i < 2; i++) {
1756 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1758 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1760 /* The first descriptor set the NEXT bit to 1 */
1762 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1764 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1766 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1767 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1769 priv = &buf_alloc->priv_buf[idx];
1770 req->tc_wl[j].high =
1771 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1772 req->tc_wl[j].high |=
1773 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1775 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1776 req->tc_wl[j].low |=
1777 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1781 /* Send 2 descriptor at one time */
1782 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1784 dev_err(&hdev->pdev->dev,
1785 "rx private waterline config cmd failed %d\n",
1790 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1791 struct hclge_pkt_buf_alloc *buf_alloc)
1793 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1794 struct hclge_rx_com_thrd *req;
1795 struct hclge_desc desc[2];
1796 struct hclge_tc_thrd *tc;
1800 for (i = 0; i < 2; i++) {
1801 hclge_cmd_setup_basic_desc(&desc[i],
1802 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1803 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1805 /* The first descriptor set the NEXT bit to 1 */
1807 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1809 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1811 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1812 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1814 req->com_thrd[j].high =
1815 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1816 req->com_thrd[j].high |=
1817 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1818 req->com_thrd[j].low =
1819 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1820 req->com_thrd[j].low |=
1821 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1825 /* Send 2 descriptors at one time */
1826 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1828 dev_err(&hdev->pdev->dev,
1829 "common threshold config cmd failed %d\n", ret);
1833 static int hclge_common_wl_config(struct hclge_dev *hdev,
1834 struct hclge_pkt_buf_alloc *buf_alloc)
1836 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1837 struct hclge_rx_com_wl *req;
1838 struct hclge_desc desc;
1841 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1843 req = (struct hclge_rx_com_wl *)desc.data;
1844 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1845 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1847 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1848 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1850 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1852 dev_err(&hdev->pdev->dev,
1853 "common waterline config cmd failed %d\n", ret);
1858 int hclge_buffer_alloc(struct hclge_dev *hdev)
1860 struct hclge_pkt_buf_alloc *pkt_buf;
1863 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1867 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1869 dev_err(&hdev->pdev->dev,
1870 "could not calc tx buffer size for all TCs %d\n", ret);
1874 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1876 dev_err(&hdev->pdev->dev,
1877 "could not alloc tx buffers %d\n", ret);
1881 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1883 dev_err(&hdev->pdev->dev,
1884 "could not calc rx priv buffer size for all TCs %d\n",
1889 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1891 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1896 if (hnae3_dev_dcb_supported(hdev)) {
1897 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1899 dev_err(&hdev->pdev->dev,
1900 "could not configure rx private waterline %d\n",
1905 ret = hclge_common_thrd_config(hdev, pkt_buf);
1907 dev_err(&hdev->pdev->dev,
1908 "could not configure common threshold %d\n",
1914 ret = hclge_common_wl_config(hdev, pkt_buf);
1916 dev_err(&hdev->pdev->dev,
1917 "could not configure common waterline %d\n", ret);
1924 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1926 struct hnae3_handle *roce = &vport->roce;
1927 struct hnae3_handle *nic = &vport->nic;
1929 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1931 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1932 vport->back->num_msi_left == 0)
1935 roce->rinfo.base_vector = vport->back->roce_base_vector;
1937 roce->rinfo.netdev = nic->kinfo.netdev;
1938 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1940 roce->pdev = nic->pdev;
1941 roce->ae_algo = nic->ae_algo;
1942 roce->numa_node_mask = nic->numa_node_mask;
1947 static int hclge_init_msi(struct hclge_dev *hdev)
1949 struct pci_dev *pdev = hdev->pdev;
1953 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1954 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1957 "failed(%d) to allocate MSI/MSI-X vectors\n",
1961 if (vectors < hdev->num_msi)
1962 dev_warn(&hdev->pdev->dev,
1963 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1964 hdev->num_msi, vectors);
1966 hdev->num_msi = vectors;
1967 hdev->num_msi_left = vectors;
1968 hdev->base_msi_vector = pdev->irq;
1969 hdev->roce_base_vector = hdev->base_msi_vector +
1970 hdev->roce_base_msix_offset;
1972 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1973 sizeof(u16), GFP_KERNEL);
1974 if (!hdev->vector_status) {
1975 pci_free_irq_vectors(pdev);
1979 for (i = 0; i < hdev->num_msi; i++)
1980 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1982 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1983 sizeof(int), GFP_KERNEL);
1984 if (!hdev->vector_irq) {
1985 pci_free_irq_vectors(pdev);
1992 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1995 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1996 duplex = HCLGE_MAC_FULL;
2001 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2004 struct hclge_config_mac_speed_dup_cmd *req;
2005 struct hclge_desc desc;
2008 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2010 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2012 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2015 case HCLGE_MAC_SPEED_10M:
2016 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2017 HCLGE_CFG_SPEED_S, 6);
2019 case HCLGE_MAC_SPEED_100M:
2020 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2021 HCLGE_CFG_SPEED_S, 7);
2023 case HCLGE_MAC_SPEED_1G:
2024 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2025 HCLGE_CFG_SPEED_S, 0);
2027 case HCLGE_MAC_SPEED_10G:
2028 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2029 HCLGE_CFG_SPEED_S, 1);
2031 case HCLGE_MAC_SPEED_25G:
2032 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2033 HCLGE_CFG_SPEED_S, 2);
2035 case HCLGE_MAC_SPEED_40G:
2036 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2037 HCLGE_CFG_SPEED_S, 3);
2039 case HCLGE_MAC_SPEED_50G:
2040 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2041 HCLGE_CFG_SPEED_S, 4);
2043 case HCLGE_MAC_SPEED_100G:
2044 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2045 HCLGE_CFG_SPEED_S, 5);
2048 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2052 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2055 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2057 dev_err(&hdev->pdev->dev,
2058 "mac speed/duplex config cmd failed %d.\n", ret);
2065 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2069 duplex = hclge_check_speed_dup(duplex, speed);
2070 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2073 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2077 hdev->hw.mac.speed = speed;
2078 hdev->hw.mac.duplex = duplex;
2083 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2086 struct hclge_vport *vport = hclge_get_vport(handle);
2087 struct hclge_dev *hdev = vport->back;
2089 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2092 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2094 struct hclge_config_auto_neg_cmd *req;
2095 struct hclge_desc desc;
2099 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2101 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2102 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2103 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2105 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2107 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2113 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2115 struct hclge_vport *vport = hclge_get_vport(handle);
2116 struct hclge_dev *hdev = vport->back;
2118 return hclge_set_autoneg_en(hdev, enable);
2121 static int hclge_get_autoneg(struct hnae3_handle *handle)
2123 struct hclge_vport *vport = hclge_get_vport(handle);
2124 struct hclge_dev *hdev = vport->back;
2125 struct phy_device *phydev = hdev->hw.mac.phydev;
2128 return phydev->autoneg;
2130 return hdev->hw.mac.autoneg;
2133 static int hclge_mac_init(struct hclge_dev *hdev)
2135 struct hclge_mac *mac = &hdev->hw.mac;
2138 hdev->support_sfp_query = true;
2139 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2140 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2141 hdev->hw.mac.duplex);
2143 dev_err(&hdev->pdev->dev,
2144 "Config mac speed dup fail ret=%d\n", ret);
2150 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2152 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2156 ret = hclge_buffer_alloc(hdev);
2158 dev_err(&hdev->pdev->dev,
2159 "allocate buffer fail, ret=%d\n", ret);
2164 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2166 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2167 schedule_work(&hdev->mbx_service_task);
2170 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2172 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2173 schedule_work(&hdev->rst_service_task);
2176 static void hclge_task_schedule(struct hclge_dev *hdev)
2178 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2179 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2180 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2181 (void)schedule_work(&hdev->service_task);
2184 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2186 struct hclge_link_status_cmd *req;
2187 struct hclge_desc desc;
2191 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2192 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2194 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2199 req = (struct hclge_link_status_cmd *)desc.data;
2200 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2202 return !!link_status;
2205 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2210 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2213 mac_state = hclge_get_mac_link_status(hdev);
2215 if (hdev->hw.mac.phydev) {
2216 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2217 link_stat = mac_state &
2218 hdev->hw.mac.phydev->link;
2223 link_stat = mac_state;
2229 static void hclge_update_link_status(struct hclge_dev *hdev)
2231 struct hnae3_client *rclient = hdev->roce_client;
2232 struct hnae3_client *client = hdev->nic_client;
2233 struct hnae3_handle *rhandle;
2234 struct hnae3_handle *handle;
2240 state = hclge_get_mac_phy_link(hdev);
2241 if (state != hdev->hw.mac.link) {
2242 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2243 handle = &hdev->vport[i].nic;
2244 client->ops->link_status_change(handle, state);
2245 rhandle = &hdev->vport[i].roce;
2246 if (rclient && rclient->ops->link_status_change)
2247 rclient->ops->link_status_change(rhandle,
2250 hdev->hw.mac.link = state;
2254 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2256 struct hclge_sfp_speed_cmd *resp = NULL;
2257 struct hclge_desc desc;
2260 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2261 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2262 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2263 if (ret == -EOPNOTSUPP) {
2264 dev_warn(&hdev->pdev->dev,
2265 "IMP do not support get SFP speed %d\n", ret);
2268 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2272 *speed = resp->sfp_speed;
2277 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2279 struct hclge_mac mac = hdev->hw.mac;
2283 /* get the speed from SFP cmd when phy
2289 /* if IMP does not support get SFP/qSFP speed, return directly */
2290 if (!hdev->support_sfp_query)
2293 ret = hclge_get_sfp_speed(hdev, &speed);
2294 if (ret == -EOPNOTSUPP) {
2295 hdev->support_sfp_query = false;
2301 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2302 return 0; /* do nothing if no SFP */
2304 /* must config full duplex for SFP */
2305 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2308 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2310 struct hclge_vport *vport = hclge_get_vport(handle);
2311 struct hclge_dev *hdev = vport->back;
2313 return hclge_update_speed_duplex(hdev);
2316 static int hclge_get_status(struct hnae3_handle *handle)
2318 struct hclge_vport *vport = hclge_get_vport(handle);
2319 struct hclge_dev *hdev = vport->back;
2321 hclge_update_link_status(hdev);
2323 return hdev->hw.mac.link;
2326 static void hclge_service_timer(struct timer_list *t)
2328 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2330 mod_timer(&hdev->service_timer, jiffies + HZ);
2331 hdev->hw_stats.stats_timer++;
2332 hclge_task_schedule(hdev);
2335 static void hclge_service_complete(struct hclge_dev *hdev)
2337 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2339 /* Flush memory before next watchdog */
2340 smp_mb__before_atomic();
2341 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2344 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2346 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2348 /* fetch the events from their corresponding regs */
2349 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2350 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2351 msix_src_reg = hclge_read_dev(&hdev->hw,
2352 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2354 /* Assumption: If by any chance reset and mailbox events are reported
2355 * together then we will only process reset event in this go and will
2356 * defer the processing of the mailbox events. Since, we would have not
2357 * cleared RX CMDQ event this time we would receive again another
2358 * interrupt from H/W just for the mailbox.
2361 /* check for vector0 reset event sources */
2362 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2363 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2364 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2365 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2366 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2367 return HCLGE_VECTOR0_EVENT_RST;
2370 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2371 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2372 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2373 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2374 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2375 return HCLGE_VECTOR0_EVENT_RST;
2378 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2379 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2380 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2381 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2382 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2383 return HCLGE_VECTOR0_EVENT_RST;
2386 /* check for vector0 msix event source */
2387 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2388 return HCLGE_VECTOR0_EVENT_ERR;
2390 /* check for vector0 mailbox(=CMDQ RX) event source */
2391 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2392 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2393 *clearval = cmdq_src_reg;
2394 return HCLGE_VECTOR0_EVENT_MBX;
2397 return HCLGE_VECTOR0_EVENT_OTHER;
2400 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2403 switch (event_type) {
2404 case HCLGE_VECTOR0_EVENT_RST:
2405 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2407 case HCLGE_VECTOR0_EVENT_MBX:
2408 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2415 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2417 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2418 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2419 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2420 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2421 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2424 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2426 writel(enable ? 1 : 0, vector->addr);
2429 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2431 struct hclge_dev *hdev = data;
2435 hclge_enable_vector(&hdev->misc_vector, false);
2436 event_cause = hclge_check_event_cause(hdev, &clearval);
2438 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2439 switch (event_cause) {
2440 case HCLGE_VECTOR0_EVENT_ERR:
2441 /* we do not know what type of reset is required now. This could
2442 * only be decided after we fetch the type of errors which
2443 * caused this event. Therefore, we will do below for now:
2444 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2445 * have defered type of reset to be used.
2446 * 2. Schedule the reset serivce task.
2447 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2448 * will fetch the correct type of reset. This would be done
2449 * by first decoding the types of errors.
2451 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2453 case HCLGE_VECTOR0_EVENT_RST:
2454 hclge_reset_task_schedule(hdev);
2456 case HCLGE_VECTOR0_EVENT_MBX:
2457 /* If we are here then,
2458 * 1. Either we are not handling any mbx task and we are not
2461 * 2. We could be handling a mbx task but nothing more is
2463 * In both cases, we should schedule mbx task as there are more
2464 * mbx messages reported by this interrupt.
2466 hclge_mbx_task_schedule(hdev);
2469 dev_warn(&hdev->pdev->dev,
2470 "received unknown or unhandled event of vector0\n");
2474 /* clear the source of interrupt if it is not cause by reset */
2475 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2476 hclge_clear_event_cause(hdev, event_cause, clearval);
2477 hclge_enable_vector(&hdev->misc_vector, true);
2483 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2485 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2486 dev_warn(&hdev->pdev->dev,
2487 "vector(vector_id %d) has been freed.\n", vector_id);
2491 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2492 hdev->num_msi_left += 1;
2493 hdev->num_msi_used -= 1;
2496 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2498 struct hclge_misc_vector *vector = &hdev->misc_vector;
2500 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2502 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2503 hdev->vector_status[0] = 0;
2505 hdev->num_msi_left -= 1;
2506 hdev->num_msi_used += 1;
2509 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2513 hclge_get_misc_vector(hdev);
2515 /* this would be explicitly freed in the end */
2516 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2517 0, "hclge_misc", hdev);
2519 hclge_free_vector(hdev, 0);
2520 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2521 hdev->misc_vector.vector_irq);
2527 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2529 free_irq(hdev->misc_vector.vector_irq, hdev);
2530 hclge_free_vector(hdev, 0);
2533 int hclge_notify_client(struct hclge_dev *hdev,
2534 enum hnae3_reset_notify_type type)
2536 struct hnae3_client *client = hdev->nic_client;
2539 if (!client->ops->reset_notify)
2542 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2543 struct hnae3_handle *handle = &hdev->vport[i].nic;
2546 ret = client->ops->reset_notify(handle, type);
2548 dev_err(&hdev->pdev->dev,
2549 "notify nic client failed %d(%d)\n", type, ret);
2557 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2558 enum hnae3_reset_notify_type type)
2560 struct hnae3_client *client = hdev->roce_client;
2567 if (!client->ops->reset_notify)
2570 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2571 struct hnae3_handle *handle = &hdev->vport[i].roce;
2573 ret = client->ops->reset_notify(handle, type);
2575 dev_err(&hdev->pdev->dev,
2576 "notify roce client failed %d(%d)",
2585 static int hclge_reset_wait(struct hclge_dev *hdev)
2587 #define HCLGE_RESET_WATI_MS 100
2588 #define HCLGE_RESET_WAIT_CNT 200
2589 u32 val, reg, reg_bit;
2592 switch (hdev->reset_type) {
2593 case HNAE3_IMP_RESET:
2594 reg = HCLGE_GLOBAL_RESET_REG;
2595 reg_bit = HCLGE_IMP_RESET_BIT;
2597 case HNAE3_GLOBAL_RESET:
2598 reg = HCLGE_GLOBAL_RESET_REG;
2599 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2601 case HNAE3_CORE_RESET:
2602 reg = HCLGE_GLOBAL_RESET_REG;
2603 reg_bit = HCLGE_CORE_RESET_BIT;
2605 case HNAE3_FUNC_RESET:
2606 reg = HCLGE_FUN_RST_ING;
2607 reg_bit = HCLGE_FUN_RST_ING_B;
2609 case HNAE3_FLR_RESET:
2612 dev_err(&hdev->pdev->dev,
2613 "Wait for unsupported reset type: %d\n",
2618 if (hdev->reset_type == HNAE3_FLR_RESET) {
2619 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2620 cnt++ < HCLGE_RESET_WAIT_CNT)
2621 msleep(HCLGE_RESET_WATI_MS);
2623 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2624 dev_err(&hdev->pdev->dev,
2625 "flr wait timeout: %d\n", cnt);
2632 val = hclge_read_dev(&hdev->hw, reg);
2633 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2634 msleep(HCLGE_RESET_WATI_MS);
2635 val = hclge_read_dev(&hdev->hw, reg);
2639 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2640 dev_warn(&hdev->pdev->dev,
2641 "Wait for reset timeout: %d\n", hdev->reset_type);
2648 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2650 struct hclge_vf_rst_cmd *req;
2651 struct hclge_desc desc;
2653 req = (struct hclge_vf_rst_cmd *)desc.data;
2654 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2655 req->dest_vfid = func_id;
2660 return hclge_cmd_send(&hdev->hw, &desc, 1);
2663 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2667 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2668 struct hclge_vport *vport = &hdev->vport[i];
2671 /* Send cmd to set/clear VF's FUNC_RST_ING */
2672 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2674 dev_err(&hdev->pdev->dev,
2675 "set vf(%d) rst failed %d!\n",
2676 vport->vport_id, ret);
2680 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2683 /* Inform VF to process the reset.
2684 * hclge_inform_reset_assert_to_vf may fail if VF
2685 * driver is not loaded.
2687 ret = hclge_inform_reset_assert_to_vf(vport);
2689 dev_warn(&hdev->pdev->dev,
2690 "inform reset to vf(%d) failed %d!\n",
2691 vport->vport_id, ret);
2697 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2699 struct hclge_desc desc;
2700 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2704 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2705 req->fun_reset_vfid = func_id;
2707 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2709 dev_err(&hdev->pdev->dev,
2710 "send function reset cmd fail, status =%d\n", ret);
2715 static void hclge_do_reset(struct hclge_dev *hdev)
2717 struct pci_dev *pdev = hdev->pdev;
2720 switch (hdev->reset_type) {
2721 case HNAE3_GLOBAL_RESET:
2722 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2723 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2724 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2725 dev_info(&pdev->dev, "Global Reset requested\n");
2727 case HNAE3_CORE_RESET:
2728 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2729 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2730 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2731 dev_info(&pdev->dev, "Core Reset requested\n");
2733 case HNAE3_FUNC_RESET:
2734 dev_info(&pdev->dev, "PF Reset requested\n");
2735 /* schedule again to check later */
2736 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2737 hclge_reset_task_schedule(hdev);
2739 case HNAE3_FLR_RESET:
2740 dev_info(&pdev->dev, "FLR requested\n");
2741 /* schedule again to check later */
2742 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2743 hclge_reset_task_schedule(hdev);
2746 dev_warn(&pdev->dev,
2747 "Unsupported reset type: %d\n", hdev->reset_type);
2752 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2753 unsigned long *addr)
2755 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2757 /* first, resolve any unknown reset type to the known type(s) */
2758 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2759 /* we will intentionally ignore any errors from this function
2760 * as we will end up in *some* reset request in any case
2762 hclge_handle_hw_msix_error(hdev, addr);
2763 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2764 /* We defered the clearing of the error event which caused
2765 * interrupt since it was not posssible to do that in
2766 * interrupt context (and this is the reason we introduced
2767 * new UNKNOWN reset type). Now, the errors have been
2768 * handled and cleared in hardware we can safely enable
2769 * interrupts. This is an exception to the norm.
2771 hclge_enable_vector(&hdev->misc_vector, true);
2774 /* return the highest priority reset level amongst all */
2775 if (test_bit(HNAE3_IMP_RESET, addr)) {
2776 rst_level = HNAE3_IMP_RESET;
2777 clear_bit(HNAE3_IMP_RESET, addr);
2778 clear_bit(HNAE3_GLOBAL_RESET, addr);
2779 clear_bit(HNAE3_CORE_RESET, addr);
2780 clear_bit(HNAE3_FUNC_RESET, addr);
2781 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2782 rst_level = HNAE3_GLOBAL_RESET;
2783 clear_bit(HNAE3_GLOBAL_RESET, addr);
2784 clear_bit(HNAE3_CORE_RESET, addr);
2785 clear_bit(HNAE3_FUNC_RESET, addr);
2786 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2787 rst_level = HNAE3_CORE_RESET;
2788 clear_bit(HNAE3_CORE_RESET, addr);
2789 clear_bit(HNAE3_FUNC_RESET, addr);
2790 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2791 rst_level = HNAE3_FUNC_RESET;
2792 clear_bit(HNAE3_FUNC_RESET, addr);
2793 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2794 rst_level = HNAE3_FLR_RESET;
2795 clear_bit(HNAE3_FLR_RESET, addr);
2801 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2805 switch (hdev->reset_type) {
2806 case HNAE3_IMP_RESET:
2807 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2809 case HNAE3_GLOBAL_RESET:
2810 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2812 case HNAE3_CORE_RESET:
2813 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2822 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2823 hclge_enable_vector(&hdev->misc_vector, true);
2826 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2830 switch (hdev->reset_type) {
2831 case HNAE3_FUNC_RESET:
2833 case HNAE3_FLR_RESET:
2834 ret = hclge_set_all_vf_rst(hdev, true);
2843 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2848 switch (hdev->reset_type) {
2849 case HNAE3_FUNC_RESET:
2850 /* There is no mechanism for PF to know if VF has stopped IO
2851 * for now, just wait 100 ms for VF to stop IO
2854 ret = hclge_func_reset_cmd(hdev, 0);
2856 dev_err(&hdev->pdev->dev,
2857 "asserting function reset fail %d!\n", ret);
2861 /* After performaning pf reset, it is not necessary to do the
2862 * mailbox handling or send any command to firmware, because
2863 * any mailbox handling or command to firmware is only valid
2864 * after hclge_cmd_init is called.
2866 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2868 case HNAE3_FLR_RESET:
2869 /* There is no mechanism for PF to know if VF has stopped IO
2870 * for now, just wait 100 ms for VF to stop IO
2873 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2874 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2876 case HNAE3_IMP_RESET:
2877 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2878 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2879 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2885 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2890 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2892 #define MAX_RESET_FAIL_CNT 5
2893 #define RESET_UPGRADE_DELAY_SEC 10
2895 if (hdev->reset_pending) {
2896 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2897 hdev->reset_pending);
2899 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2900 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2901 BIT(HCLGE_IMP_RESET_BIT))) {
2902 dev_info(&hdev->pdev->dev,
2903 "reset failed because IMP Reset is pending\n");
2904 hclge_clear_reset_cause(hdev);
2906 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2907 hdev->reset_fail_cnt++;
2909 set_bit(hdev->reset_type, &hdev->reset_pending);
2910 dev_info(&hdev->pdev->dev,
2911 "re-schedule to wait for hw reset done\n");
2915 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2916 hclge_clear_reset_cause(hdev);
2917 mod_timer(&hdev->reset_timer,
2918 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2923 hclge_clear_reset_cause(hdev);
2924 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2928 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2932 switch (hdev->reset_type) {
2933 case HNAE3_FUNC_RESET:
2935 case HNAE3_FLR_RESET:
2936 ret = hclge_set_all_vf_rst(hdev, false);
2945 static void hclge_reset(struct hclge_dev *hdev)
2947 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2948 bool is_timeout = false;
2951 /* Initialize ae_dev reset status as well, in case enet layer wants to
2952 * know if device is undergoing reset
2954 ae_dev->reset_type = hdev->reset_type;
2955 hdev->reset_count++;
2956 /* perform reset of the stack & ae device for a client */
2957 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2961 ret = hclge_reset_prepare_down(hdev);
2966 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2968 goto err_reset_lock;
2972 ret = hclge_reset_prepare_wait(hdev);
2976 if (hclge_reset_wait(hdev)) {
2981 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2986 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2988 goto err_reset_lock;
2990 ret = hclge_reset_ae_dev(hdev->ae_dev);
2992 goto err_reset_lock;
2994 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2996 goto err_reset_lock;
2998 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3000 goto err_reset_lock;
3002 hclge_clear_reset_cause(hdev);
3004 ret = hclge_reset_prepare_up(hdev);
3006 goto err_reset_lock;
3008 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3010 goto err_reset_lock;
3014 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3018 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3022 hdev->last_reset_time = jiffies;
3023 hdev->reset_fail_cnt = 0;
3024 ae_dev->reset_type = HNAE3_NONE_RESET;
3025 del_timer(&hdev->reset_timer);
3032 if (hclge_reset_err_handle(hdev, is_timeout))
3033 hclge_reset_task_schedule(hdev);
3036 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3038 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3039 struct hclge_dev *hdev = ae_dev->priv;
3041 /* We might end up getting called broadly because of 2 below cases:
3042 * 1. Recoverable error was conveyed through APEI and only way to bring
3043 * normalcy is to reset.
3044 * 2. A new reset request from the stack due to timeout
3046 * For the first case,error event might not have ae handle available.
3047 * check if this is a new reset request and we are not here just because
3048 * last reset attempt did not succeed and watchdog hit us again. We will
3049 * know this if last reset request did not occur very recently (watchdog
3050 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3051 * In case of new request we reset the "reset level" to PF reset.
3052 * And if it is a repeat reset request of the most recent one then we
3053 * want to make sure we throttle the reset request. Therefore, we will
3054 * not allow it again before 3*HZ times.
3057 handle = &hdev->vport[0].nic;
3059 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3061 else if (hdev->default_reset_request)
3063 hclge_get_reset_level(hdev,
3064 &hdev->default_reset_request);
3065 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3066 hdev->reset_level = HNAE3_FUNC_RESET;
3068 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3071 /* request reset & schedule reset task */
3072 set_bit(hdev->reset_level, &hdev->reset_request);
3073 hclge_reset_task_schedule(hdev);
3075 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3076 hdev->reset_level++;
3079 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3080 enum hnae3_reset_type rst_type)
3082 struct hclge_dev *hdev = ae_dev->priv;
3084 set_bit(rst_type, &hdev->default_reset_request);
3087 static void hclge_reset_timer(struct timer_list *t)
3089 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3091 dev_info(&hdev->pdev->dev,
3092 "triggering global reset in reset timer\n");
3093 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3094 hclge_reset_event(hdev->pdev, NULL);
3097 static void hclge_reset_subtask(struct hclge_dev *hdev)
3099 /* check if there is any ongoing reset in the hardware. This status can
3100 * be checked from reset_pending. If there is then, we need to wait for
3101 * hardware to complete reset.
3102 * a. If we are able to figure out in reasonable time that hardware
3103 * has fully resetted then, we can proceed with driver, client
3105 * b. else, we can come back later to check this status so re-sched
3108 hdev->last_reset_time = jiffies;
3109 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3110 if (hdev->reset_type != HNAE3_NONE_RESET)
3113 /* check if we got any *new* reset requests to be honored */
3114 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3115 if (hdev->reset_type != HNAE3_NONE_RESET)
3116 hclge_do_reset(hdev);
3118 hdev->reset_type = HNAE3_NONE_RESET;
3121 static void hclge_reset_service_task(struct work_struct *work)
3123 struct hclge_dev *hdev =
3124 container_of(work, struct hclge_dev, rst_service_task);
3126 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3129 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3131 hclge_reset_subtask(hdev);
3133 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3136 static void hclge_mailbox_service_task(struct work_struct *work)
3138 struct hclge_dev *hdev =
3139 container_of(work, struct hclge_dev, mbx_service_task);
3141 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3144 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3146 hclge_mbx_handler(hdev);
3148 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3151 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3155 /* start from vport 1 for PF is always alive */
3156 for (i = 1; i < hdev->num_alloc_vport; i++) {
3157 struct hclge_vport *vport = &hdev->vport[i];
3159 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3160 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3162 /* If vf is not alive, set to default value */
3163 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3164 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3168 static void hclge_service_task(struct work_struct *work)
3170 struct hclge_dev *hdev =
3171 container_of(work, struct hclge_dev, service_task);
3173 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3174 hclge_update_stats_for_all(hdev);
3175 hdev->hw_stats.stats_timer = 0;
3178 hclge_update_speed_duplex(hdev);
3179 hclge_update_link_status(hdev);
3180 hclge_update_vport_alive(hdev);
3181 hclge_service_complete(hdev);
3184 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3186 /* VF handle has no client */
3187 if (!handle->client)
3188 return container_of(handle, struct hclge_vport, nic);
3189 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3190 return container_of(handle, struct hclge_vport, roce);
3192 return container_of(handle, struct hclge_vport, nic);
3195 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3196 struct hnae3_vector_info *vector_info)
3198 struct hclge_vport *vport = hclge_get_vport(handle);
3199 struct hnae3_vector_info *vector = vector_info;
3200 struct hclge_dev *hdev = vport->back;
3204 vector_num = min(hdev->num_msi_left, vector_num);
3206 for (j = 0; j < vector_num; j++) {
3207 for (i = 1; i < hdev->num_msi; i++) {
3208 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3209 vector->vector = pci_irq_vector(hdev->pdev, i);
3210 vector->io_addr = hdev->hw.io_base +
3211 HCLGE_VECTOR_REG_BASE +
3212 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3214 HCLGE_VECTOR_VF_OFFSET;
3215 hdev->vector_status[i] = vport->vport_id;
3216 hdev->vector_irq[i] = vector->vector;
3225 hdev->num_msi_left -= alloc;
3226 hdev->num_msi_used += alloc;
3231 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3235 for (i = 0; i < hdev->num_msi; i++)
3236 if (vector == hdev->vector_irq[i])
3242 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3244 struct hclge_vport *vport = hclge_get_vport(handle);
3245 struct hclge_dev *hdev = vport->back;
3248 vector_id = hclge_get_vector_index(hdev, vector);
3249 if (vector_id < 0) {
3250 dev_err(&hdev->pdev->dev,
3251 "Get vector index fail. vector_id =%d\n", vector_id);
3255 hclge_free_vector(hdev, vector_id);
3260 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3262 return HCLGE_RSS_KEY_SIZE;
3265 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3267 return HCLGE_RSS_IND_TBL_SIZE;
3270 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3271 const u8 hfunc, const u8 *key)
3273 struct hclge_rss_config_cmd *req;
3274 struct hclge_desc desc;
3279 req = (struct hclge_rss_config_cmd *)desc.data;
3281 for (key_offset = 0; key_offset < 3; key_offset++) {
3282 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3285 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3286 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3288 if (key_offset == 2)
3290 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3292 key_size = HCLGE_RSS_HASH_KEY_NUM;
3294 memcpy(req->hash_key,
3295 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3297 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3299 dev_err(&hdev->pdev->dev,
3300 "Configure RSS config fail, status = %d\n",
3308 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3310 struct hclge_rss_indirection_table_cmd *req;
3311 struct hclge_desc desc;
3315 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3317 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3318 hclge_cmd_setup_basic_desc
3319 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3321 req->start_table_index =
3322 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3323 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3325 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3326 req->rss_result[j] =
3327 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3329 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3331 dev_err(&hdev->pdev->dev,
3332 "Configure rss indir table fail,status = %d\n",
3340 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3341 u16 *tc_size, u16 *tc_offset)
3343 struct hclge_rss_tc_mode_cmd *req;
3344 struct hclge_desc desc;
3348 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3349 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3351 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3354 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3355 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3356 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3357 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3358 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3360 req->rss_tc_mode[i] = cpu_to_le16(mode);
3363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3365 dev_err(&hdev->pdev->dev,
3366 "Configure rss tc mode fail, status = %d\n", ret);
3371 static void hclge_get_rss_type(struct hclge_vport *vport)
3373 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3374 vport->rss_tuple_sets.ipv4_udp_en ||
3375 vport->rss_tuple_sets.ipv4_sctp_en ||
3376 vport->rss_tuple_sets.ipv6_tcp_en ||
3377 vport->rss_tuple_sets.ipv6_udp_en ||
3378 vport->rss_tuple_sets.ipv6_sctp_en)
3379 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3380 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3381 vport->rss_tuple_sets.ipv6_fragment_en)
3382 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3384 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3387 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3389 struct hclge_rss_input_tuple_cmd *req;
3390 struct hclge_desc desc;
3393 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3395 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3397 /* Get the tuple cfg from pf */
3398 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3399 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3400 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3401 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3402 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3403 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3404 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3405 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3406 hclge_get_rss_type(&hdev->vport[0]);
3407 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3409 dev_err(&hdev->pdev->dev,
3410 "Configure rss input fail, status = %d\n", ret);
3414 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3417 struct hclge_vport *vport = hclge_get_vport(handle);
3420 /* Get hash algorithm */
3422 switch (vport->rss_algo) {
3423 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3424 *hfunc = ETH_RSS_HASH_TOP;
3426 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3427 *hfunc = ETH_RSS_HASH_XOR;
3430 *hfunc = ETH_RSS_HASH_UNKNOWN;
3435 /* Get the RSS Key required by the user */
3437 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3439 /* Get indirect table */
3441 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3442 indir[i] = vport->rss_indirection_tbl[i];
3447 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3448 const u8 *key, const u8 hfunc)
3450 struct hclge_vport *vport = hclge_get_vport(handle);
3451 struct hclge_dev *hdev = vport->back;
3455 /* Set the RSS Hash Key if specififed by the user */
3458 case ETH_RSS_HASH_TOP:
3459 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3461 case ETH_RSS_HASH_XOR:
3462 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3464 case ETH_RSS_HASH_NO_CHANGE:
3465 hash_algo = vport->rss_algo;
3471 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3475 /* Update the shadow RSS key with user specified qids */
3476 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3477 vport->rss_algo = hash_algo;
3480 /* Update the shadow RSS table with user specified qids */
3481 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3482 vport->rss_indirection_tbl[i] = indir[i];
3484 /* Update the hardware */
3485 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3488 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3490 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3492 if (nfc->data & RXH_L4_B_2_3)
3493 hash_sets |= HCLGE_D_PORT_BIT;
3495 hash_sets &= ~HCLGE_D_PORT_BIT;
3497 if (nfc->data & RXH_IP_SRC)
3498 hash_sets |= HCLGE_S_IP_BIT;
3500 hash_sets &= ~HCLGE_S_IP_BIT;
3502 if (nfc->data & RXH_IP_DST)
3503 hash_sets |= HCLGE_D_IP_BIT;
3505 hash_sets &= ~HCLGE_D_IP_BIT;
3507 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3508 hash_sets |= HCLGE_V_TAG_BIT;
3513 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3514 struct ethtool_rxnfc *nfc)
3516 struct hclge_vport *vport = hclge_get_vport(handle);
3517 struct hclge_dev *hdev = vport->back;
3518 struct hclge_rss_input_tuple_cmd *req;
3519 struct hclge_desc desc;
3523 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3524 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3527 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3528 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3530 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3531 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3532 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3533 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3534 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3535 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3536 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3537 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3539 tuple_sets = hclge_get_rss_hash_bits(nfc);
3540 switch (nfc->flow_type) {
3542 req->ipv4_tcp_en = tuple_sets;
3545 req->ipv6_tcp_en = tuple_sets;
3548 req->ipv4_udp_en = tuple_sets;
3551 req->ipv6_udp_en = tuple_sets;
3554 req->ipv4_sctp_en = tuple_sets;
3557 if ((nfc->data & RXH_L4_B_0_1) ||
3558 (nfc->data & RXH_L4_B_2_3))
3561 req->ipv6_sctp_en = tuple_sets;
3564 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3567 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3573 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3575 dev_err(&hdev->pdev->dev,
3576 "Set rss tuple fail, status = %d\n", ret);
3580 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3581 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3582 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3583 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3584 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3585 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3586 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3587 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3588 hclge_get_rss_type(vport);
3592 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3593 struct ethtool_rxnfc *nfc)
3595 struct hclge_vport *vport = hclge_get_vport(handle);
3600 switch (nfc->flow_type) {
3602 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3605 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3608 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3611 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3614 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3617 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3621 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3630 if (tuple_sets & HCLGE_D_PORT_BIT)
3631 nfc->data |= RXH_L4_B_2_3;
3632 if (tuple_sets & HCLGE_S_PORT_BIT)
3633 nfc->data |= RXH_L4_B_0_1;
3634 if (tuple_sets & HCLGE_D_IP_BIT)
3635 nfc->data |= RXH_IP_DST;
3636 if (tuple_sets & HCLGE_S_IP_BIT)
3637 nfc->data |= RXH_IP_SRC;
3642 static int hclge_get_tc_size(struct hnae3_handle *handle)
3644 struct hclge_vport *vport = hclge_get_vport(handle);
3645 struct hclge_dev *hdev = vport->back;
3647 return hdev->rss_size_max;
3650 int hclge_rss_init_hw(struct hclge_dev *hdev)
3652 struct hclge_vport *vport = hdev->vport;
3653 u8 *rss_indir = vport[0].rss_indirection_tbl;
3654 u16 rss_size = vport[0].alloc_rss_size;
3655 u8 *key = vport[0].rss_hash_key;
3656 u8 hfunc = vport[0].rss_algo;
3657 u16 tc_offset[HCLGE_MAX_TC_NUM];
3658 u16 tc_valid[HCLGE_MAX_TC_NUM];
3659 u16 tc_size[HCLGE_MAX_TC_NUM];
3663 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3667 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3671 ret = hclge_set_rss_input_tuple(hdev);
3675 /* Each TC have the same queue size, and tc_size set to hardware is
3676 * the log2 of roundup power of two of rss_size, the acutal queue
3677 * size is limited by indirection table.
3679 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3680 dev_err(&hdev->pdev->dev,
3681 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3686 roundup_size = roundup_pow_of_two(rss_size);
3687 roundup_size = ilog2(roundup_size);
3689 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3692 if (!(hdev->hw_tc_map & BIT(i)))
3696 tc_size[i] = roundup_size;
3697 tc_offset[i] = rss_size * i;
3700 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3703 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3705 struct hclge_vport *vport = hdev->vport;
3708 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3709 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3710 vport[j].rss_indirection_tbl[i] =
3711 i % vport[j].alloc_rss_size;
3715 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3717 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3718 struct hclge_vport *vport = hdev->vport;
3720 if (hdev->pdev->revision >= 0x21)
3721 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3723 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3724 vport[i].rss_tuple_sets.ipv4_tcp_en =
3725 HCLGE_RSS_INPUT_TUPLE_OTHER;
3726 vport[i].rss_tuple_sets.ipv4_udp_en =
3727 HCLGE_RSS_INPUT_TUPLE_OTHER;
3728 vport[i].rss_tuple_sets.ipv4_sctp_en =
3729 HCLGE_RSS_INPUT_TUPLE_SCTP;
3730 vport[i].rss_tuple_sets.ipv4_fragment_en =
3731 HCLGE_RSS_INPUT_TUPLE_OTHER;
3732 vport[i].rss_tuple_sets.ipv6_tcp_en =
3733 HCLGE_RSS_INPUT_TUPLE_OTHER;
3734 vport[i].rss_tuple_sets.ipv6_udp_en =
3735 HCLGE_RSS_INPUT_TUPLE_OTHER;
3736 vport[i].rss_tuple_sets.ipv6_sctp_en =
3737 HCLGE_RSS_INPUT_TUPLE_SCTP;
3738 vport[i].rss_tuple_sets.ipv6_fragment_en =
3739 HCLGE_RSS_INPUT_TUPLE_OTHER;
3741 vport[i].rss_algo = rss_algo;
3743 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3744 HCLGE_RSS_KEY_SIZE);
3747 hclge_rss_indir_init_cfg(hdev);
3750 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3751 int vector_id, bool en,
3752 struct hnae3_ring_chain_node *ring_chain)
3754 struct hclge_dev *hdev = vport->back;
3755 struct hnae3_ring_chain_node *node;
3756 struct hclge_desc desc;
3757 struct hclge_ctrl_vector_chain_cmd *req
3758 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3759 enum hclge_cmd_status status;
3760 enum hclge_opcode_type op;
3761 u16 tqp_type_and_id;
3764 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3765 hclge_cmd_setup_basic_desc(&desc, op, false);
3766 req->int_vector_id = vector_id;
3769 for (node = ring_chain; node; node = node->next) {
3770 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3771 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3773 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3774 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3775 HCLGE_TQP_ID_S, node->tqp_index);
3776 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3778 hnae3_get_field(node->int_gl_idx,
3779 HNAE3_RING_GL_IDX_M,
3780 HNAE3_RING_GL_IDX_S));
3781 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3782 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3783 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3784 req->vfid = vport->vport_id;
3786 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3788 dev_err(&hdev->pdev->dev,
3789 "Map TQP fail, status is %d.\n",
3795 hclge_cmd_setup_basic_desc(&desc,
3798 req->int_vector_id = vector_id;
3803 req->int_cause_num = i;
3804 req->vfid = vport->vport_id;
3805 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3807 dev_err(&hdev->pdev->dev,
3808 "Map TQP fail, status is %d.\n", status);
3816 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3818 struct hnae3_ring_chain_node *ring_chain)
3820 struct hclge_vport *vport = hclge_get_vport(handle);
3821 struct hclge_dev *hdev = vport->back;
3824 vector_id = hclge_get_vector_index(hdev, vector);
3825 if (vector_id < 0) {
3826 dev_err(&hdev->pdev->dev,
3827 "Get vector index fail. vector_id =%d\n", vector_id);
3831 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3834 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3836 struct hnae3_ring_chain_node *ring_chain)
3838 struct hclge_vport *vport = hclge_get_vport(handle);
3839 struct hclge_dev *hdev = vport->back;
3842 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3845 vector_id = hclge_get_vector_index(hdev, vector);
3846 if (vector_id < 0) {
3847 dev_err(&handle->pdev->dev,
3848 "Get vector index fail. ret =%d\n", vector_id);
3852 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3854 dev_err(&handle->pdev->dev,
3855 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3862 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3863 struct hclge_promisc_param *param)
3865 struct hclge_promisc_cfg_cmd *req;
3866 struct hclge_desc desc;
3869 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3871 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3872 req->vf_id = param->vf_id;
3874 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3875 * pdev revision(0x20), new revision support them. The
3876 * value of this two fields will not return error when driver
3877 * send command to fireware in revision(0x20).
3879 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3880 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3882 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3884 dev_err(&hdev->pdev->dev,
3885 "Set promisc mode fail, status is %d.\n", ret);
3890 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3891 bool en_mc, bool en_bc, int vport_id)
3896 memset(param, 0, sizeof(struct hclge_promisc_param));
3898 param->enable = HCLGE_PROMISC_EN_UC;
3900 param->enable |= HCLGE_PROMISC_EN_MC;
3902 param->enable |= HCLGE_PROMISC_EN_BC;
3903 param->vf_id = vport_id;
3906 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3909 struct hclge_vport *vport = hclge_get_vport(handle);
3910 struct hclge_dev *hdev = vport->back;
3911 struct hclge_promisc_param param;
3912 bool en_bc_pmc = true;
3914 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
3915 * always bypassed. So broadcast promisc should be disabled until
3916 * user enable promisc mode
3918 if (handle->pdev->revision == 0x20)
3919 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3921 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3923 return hclge_cmd_set_promisc_mode(hdev, ¶m);
3926 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3928 struct hclge_get_fd_mode_cmd *req;
3929 struct hclge_desc desc;
3932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3934 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3936 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3938 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3942 *fd_mode = req->mode;
3947 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3948 u32 *stage1_entry_num,
3949 u32 *stage2_entry_num,
3950 u16 *stage1_counter_num,
3951 u16 *stage2_counter_num)
3953 struct hclge_get_fd_allocation_cmd *req;
3954 struct hclge_desc desc;
3957 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3959 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3961 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3963 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3968 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3969 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3970 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3971 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3976 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3978 struct hclge_set_fd_key_config_cmd *req;
3979 struct hclge_fd_key_cfg *stage;
3980 struct hclge_desc desc;
3983 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3985 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3986 stage = &hdev->fd_cfg.key_cfg[stage_num];
3987 req->stage = stage_num;
3988 req->key_select = stage->key_sel;
3989 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3990 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3991 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3992 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3993 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3994 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3996 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3998 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4003 static int hclge_init_fd_config(struct hclge_dev *hdev)
4005 #define LOW_2_WORDS 0x03
4006 struct hclge_fd_key_cfg *key_cfg;
4009 if (!hnae3_dev_fd_supported(hdev))
4012 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4016 switch (hdev->fd_cfg.fd_mode) {
4017 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4018 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4020 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4021 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4024 dev_err(&hdev->pdev->dev,
4025 "Unsupported flow director mode %d\n",
4026 hdev->fd_cfg.fd_mode);
4030 hdev->fd_cfg.proto_support =
4031 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4032 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4033 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4034 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4035 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4036 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4037 key_cfg->outer_sipv6_word_en = 0;
4038 key_cfg->outer_dipv6_word_en = 0;
4040 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4041 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4042 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4043 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4045 /* If use max 400bit key, we can support tuples for ether type */
4046 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4047 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4048 key_cfg->tuple_active |=
4049 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4052 /* roce_type is used to filter roce frames
4053 * dst_vport is used to specify the rule
4055 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4057 ret = hclge_get_fd_allocation(hdev,
4058 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4059 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4060 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4061 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4065 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4068 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4069 int loc, u8 *key, bool is_add)
4071 struct hclge_fd_tcam_config_1_cmd *req1;
4072 struct hclge_fd_tcam_config_2_cmd *req2;
4073 struct hclge_fd_tcam_config_3_cmd *req3;
4074 struct hclge_desc desc[3];
4077 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4078 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4079 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4080 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4081 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4083 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4084 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4085 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4087 req1->stage = stage;
4088 req1->xy_sel = sel_x ? 1 : 0;
4089 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4090 req1->index = cpu_to_le32(loc);
4091 req1->entry_vld = sel_x ? is_add : 0;
4094 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4095 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4096 sizeof(req2->tcam_data));
4097 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4098 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4101 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4103 dev_err(&hdev->pdev->dev,
4104 "config tcam key fail, ret=%d\n",
4110 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4111 struct hclge_fd_ad_data *action)
4113 struct hclge_fd_ad_config_cmd *req;
4114 struct hclge_desc desc;
4118 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4120 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4121 req->index = cpu_to_le32(loc);
4124 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4125 action->write_rule_id_to_bd);
4126 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4129 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4130 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4131 action->forward_to_direct_queue);
4132 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4134 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4135 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4136 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4137 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4138 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4139 action->counter_id);
4141 req->ad_data = cpu_to_le64(ad_data);
4142 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4144 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4149 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4150 struct hclge_fd_rule *rule)
4152 u16 tmp_x_s, tmp_y_s;
4153 u32 tmp_x_l, tmp_y_l;
4156 if (rule->unused_tuple & tuple_bit)
4159 switch (tuple_bit) {
4162 case BIT(INNER_DST_MAC):
4163 for (i = 0; i < 6; i++) {
4164 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4165 rule->tuples_mask.dst_mac[i]);
4166 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4167 rule->tuples_mask.dst_mac[i]);
4171 case BIT(INNER_SRC_MAC):
4172 for (i = 0; i < 6; i++) {
4173 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4174 rule->tuples.src_mac[i]);
4175 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4176 rule->tuples.src_mac[i]);
4180 case BIT(INNER_VLAN_TAG_FST):
4181 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4182 rule->tuples_mask.vlan_tag1);
4183 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4184 rule->tuples_mask.vlan_tag1);
4185 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4186 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4189 case BIT(INNER_ETH_TYPE):
4190 calc_x(tmp_x_s, rule->tuples.ether_proto,
4191 rule->tuples_mask.ether_proto);
4192 calc_y(tmp_y_s, rule->tuples.ether_proto,
4193 rule->tuples_mask.ether_proto);
4194 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4195 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4198 case BIT(INNER_IP_TOS):
4199 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4200 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4203 case BIT(INNER_IP_PROTO):
4204 calc_x(*key_x, rule->tuples.ip_proto,
4205 rule->tuples_mask.ip_proto);
4206 calc_y(*key_y, rule->tuples.ip_proto,
4207 rule->tuples_mask.ip_proto);
4210 case BIT(INNER_SRC_IP):
4211 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4212 rule->tuples_mask.src_ip[3]);
4213 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4214 rule->tuples_mask.src_ip[3]);
4215 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4216 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4219 case BIT(INNER_DST_IP):
4220 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4221 rule->tuples_mask.dst_ip[3]);
4222 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4223 rule->tuples_mask.dst_ip[3]);
4224 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4225 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4228 case BIT(INNER_SRC_PORT):
4229 calc_x(tmp_x_s, rule->tuples.src_port,
4230 rule->tuples_mask.src_port);
4231 calc_y(tmp_y_s, rule->tuples.src_port,
4232 rule->tuples_mask.src_port);
4233 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4234 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4237 case BIT(INNER_DST_PORT):
4238 calc_x(tmp_x_s, rule->tuples.dst_port,
4239 rule->tuples_mask.dst_port);
4240 calc_y(tmp_y_s, rule->tuples.dst_port,
4241 rule->tuples_mask.dst_port);
4242 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4243 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4251 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4252 u8 vf_id, u8 network_port_id)
4254 u32 port_number = 0;
4256 if (port_type == HOST_PORT) {
4257 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4259 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4261 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4263 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4264 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4265 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4271 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4272 __le32 *key_x, __le32 *key_y,
4273 struct hclge_fd_rule *rule)
4275 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4276 u8 cur_pos = 0, tuple_size, shift_bits;
4279 for (i = 0; i < MAX_META_DATA; i++) {
4280 tuple_size = meta_data_key_info[i].key_length;
4281 tuple_bit = key_cfg->meta_data_active & BIT(i);
4283 switch (tuple_bit) {
4284 case BIT(ROCE_TYPE):
4285 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4286 cur_pos += tuple_size;
4288 case BIT(DST_VPORT):
4289 port_number = hclge_get_port_number(HOST_PORT, 0,
4291 hnae3_set_field(meta_data,
4292 GENMASK(cur_pos + tuple_size, cur_pos),
4293 cur_pos, port_number);
4294 cur_pos += tuple_size;
4301 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4302 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4303 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4305 *key_x = cpu_to_le32(tmp_x << shift_bits);
4306 *key_y = cpu_to_le32(tmp_y << shift_bits);
4309 /* A complete key is combined with meta data key and tuple key.
4310 * Meta data key is stored at the MSB region, and tuple key is stored at
4311 * the LSB region, unused bits will be filled 0.
4313 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4314 struct hclge_fd_rule *rule)
4316 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4317 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4318 u8 *cur_key_x, *cur_key_y;
4319 int i, ret, tuple_size;
4320 u8 meta_data_region;
4322 memset(key_x, 0, sizeof(key_x));
4323 memset(key_y, 0, sizeof(key_y));
4327 for (i = 0 ; i < MAX_TUPLE; i++) {
4331 tuple_size = tuple_key_info[i].key_length / 8;
4332 check_tuple = key_cfg->tuple_active & BIT(i);
4334 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4337 cur_key_x += tuple_size;
4338 cur_key_y += tuple_size;
4342 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4343 MAX_META_DATA_LENGTH / 8;
4345 hclge_fd_convert_meta_data(key_cfg,
4346 (__le32 *)(key_x + meta_data_region),
4347 (__le32 *)(key_y + meta_data_region),
4350 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4353 dev_err(&hdev->pdev->dev,
4354 "fd key_y config fail, loc=%d, ret=%d\n",
4355 rule->queue_id, ret);
4359 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4362 dev_err(&hdev->pdev->dev,
4363 "fd key_x config fail, loc=%d, ret=%d\n",
4364 rule->queue_id, ret);
4368 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4369 struct hclge_fd_rule *rule)
4371 struct hclge_fd_ad_data ad_data;
4373 ad_data.ad_id = rule->location;
4375 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4376 ad_data.drop_packet = true;
4377 ad_data.forward_to_direct_queue = false;
4378 ad_data.queue_id = 0;
4380 ad_data.drop_packet = false;
4381 ad_data.forward_to_direct_queue = true;
4382 ad_data.queue_id = rule->queue_id;
4385 ad_data.use_counter = false;
4386 ad_data.counter_id = 0;
4388 ad_data.use_next_stage = false;
4389 ad_data.next_input_key = 0;
4391 ad_data.write_rule_id_to_bd = true;
4392 ad_data.rule_id = rule->location;
4394 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4397 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4398 struct ethtool_rx_flow_spec *fs, u32 *unused)
4400 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4401 struct ethtool_usrip4_spec *usr_ip4_spec;
4402 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4403 struct ethtool_usrip6_spec *usr_ip6_spec;
4404 struct ethhdr *ether_spec;
4406 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4409 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4412 if ((fs->flow_type & FLOW_EXT) &&
4413 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4414 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4418 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4422 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4423 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4425 if (!tcp_ip4_spec->ip4src)
4426 *unused |= BIT(INNER_SRC_IP);
4428 if (!tcp_ip4_spec->ip4dst)
4429 *unused |= BIT(INNER_DST_IP);
4431 if (!tcp_ip4_spec->psrc)
4432 *unused |= BIT(INNER_SRC_PORT);
4434 if (!tcp_ip4_spec->pdst)
4435 *unused |= BIT(INNER_DST_PORT);
4437 if (!tcp_ip4_spec->tos)
4438 *unused |= BIT(INNER_IP_TOS);
4442 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4443 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4444 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4446 if (!usr_ip4_spec->ip4src)
4447 *unused |= BIT(INNER_SRC_IP);
4449 if (!usr_ip4_spec->ip4dst)
4450 *unused |= BIT(INNER_DST_IP);
4452 if (!usr_ip4_spec->tos)
4453 *unused |= BIT(INNER_IP_TOS);
4455 if (!usr_ip4_spec->proto)
4456 *unused |= BIT(INNER_IP_PROTO);
4458 if (usr_ip4_spec->l4_4_bytes)
4461 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4468 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4469 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4472 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4473 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4474 *unused |= BIT(INNER_SRC_IP);
4476 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4477 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4478 *unused |= BIT(INNER_DST_IP);
4480 if (!tcp_ip6_spec->psrc)
4481 *unused |= BIT(INNER_SRC_PORT);
4483 if (!tcp_ip6_spec->pdst)
4484 *unused |= BIT(INNER_DST_PORT);
4486 if (tcp_ip6_spec->tclass)
4490 case IPV6_USER_FLOW:
4491 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4492 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4493 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4494 BIT(INNER_DST_PORT);
4496 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4497 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4498 *unused |= BIT(INNER_SRC_IP);
4500 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4501 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4502 *unused |= BIT(INNER_DST_IP);
4504 if (!usr_ip6_spec->l4_proto)
4505 *unused |= BIT(INNER_IP_PROTO);
4507 if (usr_ip6_spec->tclass)
4510 if (usr_ip6_spec->l4_4_bytes)
4515 ether_spec = &fs->h_u.ether_spec;
4516 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4517 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4518 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4520 if (is_zero_ether_addr(ether_spec->h_source))
4521 *unused |= BIT(INNER_SRC_MAC);
4523 if (is_zero_ether_addr(ether_spec->h_dest))
4524 *unused |= BIT(INNER_DST_MAC);
4526 if (!ether_spec->h_proto)
4527 *unused |= BIT(INNER_ETH_TYPE);
4534 if ((fs->flow_type & FLOW_EXT)) {
4535 if (fs->h_ext.vlan_etype)
4537 if (!fs->h_ext.vlan_tci)
4538 *unused |= BIT(INNER_VLAN_TAG_FST);
4540 if (fs->m_ext.vlan_tci) {
4541 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4545 *unused |= BIT(INNER_VLAN_TAG_FST);
4548 if (fs->flow_type & FLOW_MAC_EXT) {
4549 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4552 if (is_zero_ether_addr(fs->h_ext.h_dest))
4553 *unused |= BIT(INNER_DST_MAC);
4555 *unused &= ~(BIT(INNER_DST_MAC));
4561 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4563 struct hclge_fd_rule *rule = NULL;
4564 struct hlist_node *node2;
4566 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4567 if (rule->location >= location)
4571 return rule && rule->location == location;
4574 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4575 struct hclge_fd_rule *new_rule,
4579 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4580 struct hlist_node *node2;
4582 if (is_add && !new_rule)
4585 hlist_for_each_entry_safe(rule, node2,
4586 &hdev->fd_rule_list, rule_node) {
4587 if (rule->location >= location)
4592 if (rule && rule->location == location) {
4593 hlist_del(&rule->rule_node);
4595 hdev->hclge_fd_rule_num--;
4600 } else if (!is_add) {
4601 dev_err(&hdev->pdev->dev,
4602 "delete fail, rule %d is inexistent\n",
4607 INIT_HLIST_NODE(&new_rule->rule_node);
4610 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4612 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4614 hdev->hclge_fd_rule_num++;
4619 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4620 struct ethtool_rx_flow_spec *fs,
4621 struct hclge_fd_rule *rule)
4623 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4625 switch (flow_type) {
4629 rule->tuples.src_ip[3] =
4630 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4631 rule->tuples_mask.src_ip[3] =
4632 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4634 rule->tuples.dst_ip[3] =
4635 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4636 rule->tuples_mask.dst_ip[3] =
4637 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4639 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4640 rule->tuples_mask.src_port =
4641 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4643 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4644 rule->tuples_mask.dst_port =
4645 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4647 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4648 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4650 rule->tuples.ether_proto = ETH_P_IP;
4651 rule->tuples_mask.ether_proto = 0xFFFF;
4655 rule->tuples.src_ip[3] =
4656 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4657 rule->tuples_mask.src_ip[3] =
4658 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4660 rule->tuples.dst_ip[3] =
4661 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4662 rule->tuples_mask.dst_ip[3] =
4663 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4665 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4666 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4668 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4669 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4671 rule->tuples.ether_proto = ETH_P_IP;
4672 rule->tuples_mask.ether_proto = 0xFFFF;
4678 be32_to_cpu_array(rule->tuples.src_ip,
4679 fs->h_u.tcp_ip6_spec.ip6src, 4);
4680 be32_to_cpu_array(rule->tuples_mask.src_ip,
4681 fs->m_u.tcp_ip6_spec.ip6src, 4);
4683 be32_to_cpu_array(rule->tuples.dst_ip,
4684 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4685 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4686 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4688 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4689 rule->tuples_mask.src_port =
4690 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4692 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4693 rule->tuples_mask.dst_port =
4694 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4696 rule->tuples.ether_proto = ETH_P_IPV6;
4697 rule->tuples_mask.ether_proto = 0xFFFF;
4700 case IPV6_USER_FLOW:
4701 be32_to_cpu_array(rule->tuples.src_ip,
4702 fs->h_u.usr_ip6_spec.ip6src, 4);
4703 be32_to_cpu_array(rule->tuples_mask.src_ip,
4704 fs->m_u.usr_ip6_spec.ip6src, 4);
4706 be32_to_cpu_array(rule->tuples.dst_ip,
4707 fs->h_u.usr_ip6_spec.ip6dst, 4);
4708 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4709 fs->m_u.usr_ip6_spec.ip6dst, 4);
4711 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4712 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4714 rule->tuples.ether_proto = ETH_P_IPV6;
4715 rule->tuples_mask.ether_proto = 0xFFFF;
4719 ether_addr_copy(rule->tuples.src_mac,
4720 fs->h_u.ether_spec.h_source);
4721 ether_addr_copy(rule->tuples_mask.src_mac,
4722 fs->m_u.ether_spec.h_source);
4724 ether_addr_copy(rule->tuples.dst_mac,
4725 fs->h_u.ether_spec.h_dest);
4726 ether_addr_copy(rule->tuples_mask.dst_mac,
4727 fs->m_u.ether_spec.h_dest);
4729 rule->tuples.ether_proto =
4730 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4731 rule->tuples_mask.ether_proto =
4732 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4739 switch (flow_type) {
4742 rule->tuples.ip_proto = IPPROTO_SCTP;
4743 rule->tuples_mask.ip_proto = 0xFF;
4747 rule->tuples.ip_proto = IPPROTO_TCP;
4748 rule->tuples_mask.ip_proto = 0xFF;
4752 rule->tuples.ip_proto = IPPROTO_UDP;
4753 rule->tuples_mask.ip_proto = 0xFF;
4759 if ((fs->flow_type & FLOW_EXT)) {
4760 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4761 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4764 if (fs->flow_type & FLOW_MAC_EXT) {
4765 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4766 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4772 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4773 struct ethtool_rxnfc *cmd)
4775 struct hclge_vport *vport = hclge_get_vport(handle);
4776 struct hclge_dev *hdev = vport->back;
4777 u16 dst_vport_id = 0, q_index = 0;
4778 struct ethtool_rx_flow_spec *fs;
4779 struct hclge_fd_rule *rule;
4784 if (!hnae3_dev_fd_supported(hdev))
4788 dev_warn(&hdev->pdev->dev,
4789 "Please enable flow director first\n");
4793 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4795 ret = hclge_fd_check_spec(hdev, fs, &unused);
4797 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4801 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4802 action = HCLGE_FD_ACTION_DROP_PACKET;
4804 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4805 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4808 if (vf > hdev->num_req_vfs) {
4809 dev_err(&hdev->pdev->dev,
4810 "Error: vf id (%d) > max vf num (%d)\n",
4811 vf, hdev->num_req_vfs);
4815 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4816 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4819 dev_err(&hdev->pdev->dev,
4820 "Error: queue id (%d) > max tqp num (%d)\n",
4825 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4829 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4833 ret = hclge_fd_get_tuple(hdev, fs, rule);
4837 rule->flow_type = fs->flow_type;
4839 rule->location = fs->location;
4840 rule->unused_tuple = unused;
4841 rule->vf_id = dst_vport_id;
4842 rule->queue_id = q_index;
4843 rule->action = action;
4845 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4849 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4853 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4864 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4865 struct ethtool_rxnfc *cmd)
4867 struct hclge_vport *vport = hclge_get_vport(handle);
4868 struct hclge_dev *hdev = vport->back;
4869 struct ethtool_rx_flow_spec *fs;
4872 if (!hnae3_dev_fd_supported(hdev))
4875 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4877 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4880 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4881 dev_err(&hdev->pdev->dev,
4882 "Delete fail, rule %d is inexistent\n",
4887 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4888 fs->location, NULL, false);
4892 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4896 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4899 struct hclge_vport *vport = hclge_get_vport(handle);
4900 struct hclge_dev *hdev = vport->back;
4901 struct hclge_fd_rule *rule;
4902 struct hlist_node *node;
4904 if (!hnae3_dev_fd_supported(hdev))
4908 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4910 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4911 rule->location, NULL, false);
4912 hlist_del(&rule->rule_node);
4914 hdev->hclge_fd_rule_num--;
4917 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4919 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4920 rule->location, NULL, false);
4924 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4926 struct hclge_vport *vport = hclge_get_vport(handle);
4927 struct hclge_dev *hdev = vport->back;
4928 struct hclge_fd_rule *rule;
4929 struct hlist_node *node;
4932 /* Return ok here, because reset error handling will check this
4933 * return value. If error is returned here, the reset process will
4936 if (!hnae3_dev_fd_supported(hdev))
4939 /* if fd is disabled, should not restore it when reset */
4943 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4944 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4946 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4949 dev_warn(&hdev->pdev->dev,
4950 "Restore rule %d failed, remove it\n",
4952 hlist_del(&rule->rule_node);
4954 hdev->hclge_fd_rule_num--;
4960 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4961 struct ethtool_rxnfc *cmd)
4963 struct hclge_vport *vport = hclge_get_vport(handle);
4964 struct hclge_dev *hdev = vport->back;
4966 if (!hnae3_dev_fd_supported(hdev))
4969 cmd->rule_cnt = hdev->hclge_fd_rule_num;
4970 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4975 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4976 struct ethtool_rxnfc *cmd)
4978 struct hclge_vport *vport = hclge_get_vport(handle);
4979 struct hclge_fd_rule *rule = NULL;
4980 struct hclge_dev *hdev = vport->back;
4981 struct ethtool_rx_flow_spec *fs;
4982 struct hlist_node *node2;
4984 if (!hnae3_dev_fd_supported(hdev))
4987 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4989 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4990 if (rule->location >= fs->location)
4994 if (!rule || fs->location != rule->location)
4997 fs->flow_type = rule->flow_type;
4998 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5002 fs->h_u.tcp_ip4_spec.ip4src =
5003 cpu_to_be32(rule->tuples.src_ip[3]);
5004 fs->m_u.tcp_ip4_spec.ip4src =
5005 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5006 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5008 fs->h_u.tcp_ip4_spec.ip4dst =
5009 cpu_to_be32(rule->tuples.dst_ip[3]);
5010 fs->m_u.tcp_ip4_spec.ip4dst =
5011 rule->unused_tuple & BIT(INNER_DST_IP) ?
5012 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5014 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5015 fs->m_u.tcp_ip4_spec.psrc =
5016 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5017 0 : cpu_to_be16(rule->tuples_mask.src_port);
5019 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5020 fs->m_u.tcp_ip4_spec.pdst =
5021 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5022 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5024 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5025 fs->m_u.tcp_ip4_spec.tos =
5026 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5027 0 : rule->tuples_mask.ip_tos;
5031 fs->h_u.usr_ip4_spec.ip4src =
5032 cpu_to_be32(rule->tuples.src_ip[3]);
5033 fs->m_u.tcp_ip4_spec.ip4src =
5034 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5035 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5037 fs->h_u.usr_ip4_spec.ip4dst =
5038 cpu_to_be32(rule->tuples.dst_ip[3]);
5039 fs->m_u.usr_ip4_spec.ip4dst =
5040 rule->unused_tuple & BIT(INNER_DST_IP) ?
5041 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5043 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5044 fs->m_u.usr_ip4_spec.tos =
5045 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5046 0 : rule->tuples_mask.ip_tos;
5048 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5049 fs->m_u.usr_ip4_spec.proto =
5050 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5051 0 : rule->tuples_mask.ip_proto;
5053 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5059 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5060 rule->tuples.src_ip, 4);
5061 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5062 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5064 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5065 rule->tuples_mask.src_ip, 4);
5067 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5068 rule->tuples.dst_ip, 4);
5069 if (rule->unused_tuple & BIT(INNER_DST_IP))
5070 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5072 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5073 rule->tuples_mask.dst_ip, 4);
5075 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5076 fs->m_u.tcp_ip6_spec.psrc =
5077 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5078 0 : cpu_to_be16(rule->tuples_mask.src_port);
5080 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5081 fs->m_u.tcp_ip6_spec.pdst =
5082 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5083 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5086 case IPV6_USER_FLOW:
5087 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5088 rule->tuples.src_ip, 4);
5089 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5090 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5092 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5093 rule->tuples_mask.src_ip, 4);
5095 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5096 rule->tuples.dst_ip, 4);
5097 if (rule->unused_tuple & BIT(INNER_DST_IP))
5098 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5100 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5101 rule->tuples_mask.dst_ip, 4);
5103 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5104 fs->m_u.usr_ip6_spec.l4_proto =
5105 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5106 0 : rule->tuples_mask.ip_proto;
5110 ether_addr_copy(fs->h_u.ether_spec.h_source,
5111 rule->tuples.src_mac);
5112 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5113 eth_zero_addr(fs->m_u.ether_spec.h_source);
5115 ether_addr_copy(fs->m_u.ether_spec.h_source,
5116 rule->tuples_mask.src_mac);
5118 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5119 rule->tuples.dst_mac);
5120 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5121 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5123 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5124 rule->tuples_mask.dst_mac);
5126 fs->h_u.ether_spec.h_proto =
5127 cpu_to_be16(rule->tuples.ether_proto);
5128 fs->m_u.ether_spec.h_proto =
5129 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5130 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5137 if (fs->flow_type & FLOW_EXT) {
5138 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5139 fs->m_ext.vlan_tci =
5140 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5141 cpu_to_be16(VLAN_VID_MASK) :
5142 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5145 if (fs->flow_type & FLOW_MAC_EXT) {
5146 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5147 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5148 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5150 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5151 rule->tuples_mask.dst_mac);
5154 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5155 fs->ring_cookie = RX_CLS_FLOW_DISC;
5159 fs->ring_cookie = rule->queue_id;
5160 vf_id = rule->vf_id;
5161 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5162 fs->ring_cookie |= vf_id;
5168 static int hclge_get_all_rules(struct hnae3_handle *handle,
5169 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5171 struct hclge_vport *vport = hclge_get_vport(handle);
5172 struct hclge_dev *hdev = vport->back;
5173 struct hclge_fd_rule *rule;
5174 struct hlist_node *node2;
5177 if (!hnae3_dev_fd_supported(hdev))
5180 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5182 hlist_for_each_entry_safe(rule, node2,
5183 &hdev->fd_rule_list, rule_node) {
5184 if (cnt == cmd->rule_cnt)
5187 rule_locs[cnt] = rule->location;
5191 cmd->rule_cnt = cnt;
5196 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5198 struct hclge_vport *vport = hclge_get_vport(handle);
5199 struct hclge_dev *hdev = vport->back;
5201 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5202 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5205 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5207 struct hclge_vport *vport = hclge_get_vport(handle);
5208 struct hclge_dev *hdev = vport->back;
5210 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5213 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5215 struct hclge_vport *vport = hclge_get_vport(handle);
5216 struct hclge_dev *hdev = vport->back;
5218 return hdev->reset_count;
5221 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5223 struct hclge_vport *vport = hclge_get_vport(handle);
5224 struct hclge_dev *hdev = vport->back;
5226 hdev->fd_en = enable;
5228 hclge_del_all_fd_entries(handle, false);
5230 hclge_restore_fd_entries(handle);
5233 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5235 struct hclge_desc desc;
5236 struct hclge_config_mac_mode_cmd *req =
5237 (struct hclge_config_mac_mode_cmd *)desc.data;
5241 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5242 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5243 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5244 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5245 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5246 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5247 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5248 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5249 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5250 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5251 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5252 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5253 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5254 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5255 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5256 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5258 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5260 dev_err(&hdev->pdev->dev,
5261 "mac enable fail, ret =%d.\n", ret);
5264 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5266 struct hclge_config_mac_mode_cmd *req;
5267 struct hclge_desc desc;
5271 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5272 /* 1 Read out the MAC mode config at first */
5273 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5274 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5276 dev_err(&hdev->pdev->dev,
5277 "mac loopback get fail, ret =%d.\n", ret);
5281 /* 2 Then setup the loopback flag */
5282 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5283 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5284 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5285 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5287 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5289 /* 3 Config mac work mode with loopback flag
5290 * and its original configure parameters
5292 hclge_cmd_reuse_desc(&desc, false);
5293 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5295 dev_err(&hdev->pdev->dev,
5296 "mac loopback set fail, ret =%d.\n", ret);
5300 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5301 enum hnae3_loop loop_mode)
5303 #define HCLGE_SERDES_RETRY_MS 10
5304 #define HCLGE_SERDES_RETRY_NUM 100
5306 #define HCLGE_MAC_LINK_STATUS_MS 20
5307 #define HCLGE_MAC_LINK_STATUS_NUM 10
5308 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5309 #define HCLGE_MAC_LINK_STATUS_UP 1
5311 struct hclge_serdes_lb_cmd *req;
5312 struct hclge_desc desc;
5313 int mac_link_ret = 0;
5317 req = (struct hclge_serdes_lb_cmd *)desc.data;
5318 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5320 switch (loop_mode) {
5321 case HNAE3_LOOP_SERIAL_SERDES:
5322 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5324 case HNAE3_LOOP_PARALLEL_SERDES:
5325 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5328 dev_err(&hdev->pdev->dev,
5329 "unsupported serdes loopback mode %d\n", loop_mode);
5334 req->enable = loop_mode_b;
5335 req->mask = loop_mode_b;
5336 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5338 req->mask = loop_mode_b;
5339 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5342 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5344 dev_err(&hdev->pdev->dev,
5345 "serdes loopback set fail, ret = %d\n", ret);
5350 msleep(HCLGE_SERDES_RETRY_MS);
5351 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5353 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5355 dev_err(&hdev->pdev->dev,
5356 "serdes loopback get, ret = %d\n", ret);
5359 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5360 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5362 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5363 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5365 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5366 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5370 hclge_cfg_mac_mode(hdev, en);
5374 /* serdes Internal loopback, independent of the network cable.*/
5375 msleep(HCLGE_MAC_LINK_STATUS_MS);
5376 ret = hclge_get_mac_link_status(hdev);
5377 if (ret == mac_link_ret)
5379 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5381 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5386 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5387 int stream_id, bool enable)
5389 struct hclge_desc desc;
5390 struct hclge_cfg_com_tqp_queue_cmd *req =
5391 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5395 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5396 req->stream_id = cpu_to_le16(stream_id);
5397 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5399 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5401 dev_err(&hdev->pdev->dev,
5402 "Tqp enable fail, status =%d.\n", ret);
5406 static int hclge_set_loopback(struct hnae3_handle *handle,
5407 enum hnae3_loop loop_mode, bool en)
5409 struct hclge_vport *vport = hclge_get_vport(handle);
5410 struct hnae3_knic_private_info *kinfo;
5411 struct hclge_dev *hdev = vport->back;
5414 switch (loop_mode) {
5415 case HNAE3_LOOP_APP:
5416 ret = hclge_set_app_loopback(hdev, en);
5418 case HNAE3_LOOP_SERIAL_SERDES:
5419 case HNAE3_LOOP_PARALLEL_SERDES:
5420 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5424 dev_err(&hdev->pdev->dev,
5425 "loop_mode %d is not supported\n", loop_mode);
5432 kinfo = &vport->nic.kinfo;
5433 for (i = 0; i < kinfo->num_tqps; i++) {
5434 ret = hclge_tqp_enable(hdev, i, 0, en);
5442 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5444 struct hclge_vport *vport = hclge_get_vport(handle);
5445 struct hnae3_knic_private_info *kinfo;
5446 struct hnae3_queue *queue;
5447 struct hclge_tqp *tqp;
5450 kinfo = &vport->nic.kinfo;
5451 for (i = 0; i < kinfo->num_tqps; i++) {
5452 queue = handle->kinfo.tqp[i];
5453 tqp = container_of(queue, struct hclge_tqp, q);
5454 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5458 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5460 struct hclge_vport *vport = hclge_get_vport(handle);
5461 struct hclge_dev *hdev = vport->back;
5464 mod_timer(&hdev->service_timer, jiffies + HZ);
5466 del_timer_sync(&hdev->service_timer);
5467 cancel_work_sync(&hdev->service_task);
5468 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5472 static int hclge_ae_start(struct hnae3_handle *handle)
5474 struct hclge_vport *vport = hclge_get_vport(handle);
5475 struct hclge_dev *hdev = vport->back;
5478 hclge_cfg_mac_mode(hdev, true);
5479 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5480 hdev->hw.mac.link = 0;
5482 /* reset tqp stats */
5483 hclge_reset_tqp_stats(handle);
5485 hclge_mac_start_phy(hdev);
5490 static void hclge_ae_stop(struct hnae3_handle *handle)
5492 struct hclge_vport *vport = hclge_get_vport(handle);
5493 struct hclge_dev *hdev = vport->back;
5496 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5498 /* If it is not PF reset, the firmware will disable the MAC,
5499 * so it only need to stop phy here.
5501 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5502 hdev->reset_type != HNAE3_FUNC_RESET) {
5503 hclge_mac_stop_phy(hdev);
5507 for (i = 0; i < handle->kinfo.num_tqps; i++)
5508 hclge_reset_tqp(handle, i);
5511 hclge_cfg_mac_mode(hdev, false);
5513 hclge_mac_stop_phy(hdev);
5515 /* reset tqp stats */
5516 hclge_reset_tqp_stats(handle);
5517 hclge_update_link_status(hdev);
5520 int hclge_vport_start(struct hclge_vport *vport)
5522 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5523 vport->last_active_jiffies = jiffies;
5527 void hclge_vport_stop(struct hclge_vport *vport)
5529 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5532 static int hclge_client_start(struct hnae3_handle *handle)
5534 struct hclge_vport *vport = hclge_get_vport(handle);
5536 return hclge_vport_start(vport);
5539 static void hclge_client_stop(struct hnae3_handle *handle)
5541 struct hclge_vport *vport = hclge_get_vport(handle);
5543 hclge_vport_stop(vport);
5546 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5547 u16 cmdq_resp, u8 resp_code,
5548 enum hclge_mac_vlan_tbl_opcode op)
5550 struct hclge_dev *hdev = vport->back;
5551 int return_status = -EIO;
5554 dev_err(&hdev->pdev->dev,
5555 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5560 if (op == HCLGE_MAC_VLAN_ADD) {
5561 if ((!resp_code) || (resp_code == 1)) {
5563 } else if (resp_code == 2) {
5564 return_status = -ENOSPC;
5565 dev_err(&hdev->pdev->dev,
5566 "add mac addr failed for uc_overflow.\n");
5567 } else if (resp_code == 3) {
5568 return_status = -ENOSPC;
5569 dev_err(&hdev->pdev->dev,
5570 "add mac addr failed for mc_overflow.\n");
5572 dev_err(&hdev->pdev->dev,
5573 "add mac addr failed for undefined, code=%d.\n",
5576 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5579 } else if (resp_code == 1) {
5580 return_status = -ENOENT;
5581 dev_dbg(&hdev->pdev->dev,
5582 "remove mac addr failed for miss.\n");
5584 dev_err(&hdev->pdev->dev,
5585 "remove mac addr failed for undefined, code=%d.\n",
5588 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5591 } else if (resp_code == 1) {
5592 return_status = -ENOENT;
5593 dev_dbg(&hdev->pdev->dev,
5594 "lookup mac addr failed for miss.\n");
5596 dev_err(&hdev->pdev->dev,
5597 "lookup mac addr failed for undefined, code=%d.\n",
5601 return_status = -EINVAL;
5602 dev_err(&hdev->pdev->dev,
5603 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5607 return return_status;
5610 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5615 if (vfid > 255 || vfid < 0)
5618 if (vfid >= 0 && vfid <= 191) {
5619 word_num = vfid / 32;
5620 bit_num = vfid % 32;
5622 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5624 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5626 word_num = (vfid - 192) / 32;
5627 bit_num = vfid % 32;
5629 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5631 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5637 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5639 #define HCLGE_DESC_NUMBER 3
5640 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5643 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5644 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5645 if (desc[i].data[j])
5651 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5652 const u8 *addr, bool is_mc)
5654 const unsigned char *mac_addr = addr;
5655 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5656 (mac_addr[0]) | (mac_addr[1] << 8);
5657 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5659 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5661 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5662 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5665 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5666 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5669 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5670 struct hclge_mac_vlan_tbl_entry_cmd *req)
5672 struct hclge_dev *hdev = vport->back;
5673 struct hclge_desc desc;
5678 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5680 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5682 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5684 dev_err(&hdev->pdev->dev,
5685 "del mac addr failed for cmd_send, ret =%d.\n",
5689 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5690 retval = le16_to_cpu(desc.retval);
5692 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5693 HCLGE_MAC_VLAN_REMOVE);
5696 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5697 struct hclge_mac_vlan_tbl_entry_cmd *req,
5698 struct hclge_desc *desc,
5701 struct hclge_dev *hdev = vport->back;
5706 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5708 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5709 memcpy(desc[0].data,
5711 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5712 hclge_cmd_setup_basic_desc(&desc[1],
5713 HCLGE_OPC_MAC_VLAN_ADD,
5715 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5716 hclge_cmd_setup_basic_desc(&desc[2],
5717 HCLGE_OPC_MAC_VLAN_ADD,
5719 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5721 memcpy(desc[0].data,
5723 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5724 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5727 dev_err(&hdev->pdev->dev,
5728 "lookup mac addr failed for cmd_send, ret =%d.\n",
5732 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5733 retval = le16_to_cpu(desc[0].retval);
5735 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5736 HCLGE_MAC_VLAN_LKUP);
5739 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5740 struct hclge_mac_vlan_tbl_entry_cmd *req,
5741 struct hclge_desc *mc_desc)
5743 struct hclge_dev *hdev = vport->back;
5750 struct hclge_desc desc;
5752 hclge_cmd_setup_basic_desc(&desc,
5753 HCLGE_OPC_MAC_VLAN_ADD,
5755 memcpy(desc.data, req,
5756 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5757 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5758 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5759 retval = le16_to_cpu(desc.retval);
5761 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5763 HCLGE_MAC_VLAN_ADD);
5765 hclge_cmd_reuse_desc(&mc_desc[0], false);
5766 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5767 hclge_cmd_reuse_desc(&mc_desc[1], false);
5768 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5769 hclge_cmd_reuse_desc(&mc_desc[2], false);
5770 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5771 memcpy(mc_desc[0].data, req,
5772 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5773 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5774 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5775 retval = le16_to_cpu(mc_desc[0].retval);
5777 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5779 HCLGE_MAC_VLAN_ADD);
5783 dev_err(&hdev->pdev->dev,
5784 "add mac addr failed for cmd_send, ret =%d.\n",
5792 static int hclge_init_umv_space(struct hclge_dev *hdev)
5794 u16 allocated_size = 0;
5797 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5802 if (allocated_size < hdev->wanted_umv_size)
5803 dev_warn(&hdev->pdev->dev,
5804 "Alloc umv space failed, want %d, get %d\n",
5805 hdev->wanted_umv_size, allocated_size);
5807 mutex_init(&hdev->umv_mutex);
5808 hdev->max_umv_size = allocated_size;
5809 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5810 hdev->share_umv_size = hdev->priv_umv_size +
5811 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5816 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5820 if (hdev->max_umv_size > 0) {
5821 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5825 hdev->max_umv_size = 0;
5827 mutex_destroy(&hdev->umv_mutex);
5832 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5833 u16 *allocated_size, bool is_alloc)
5835 struct hclge_umv_spc_alc_cmd *req;
5836 struct hclge_desc desc;
5839 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5840 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5841 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5842 req->space_size = cpu_to_le32(space_size);
5844 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5846 dev_err(&hdev->pdev->dev,
5847 "%s umv space failed for cmd_send, ret =%d\n",
5848 is_alloc ? "allocate" : "free", ret);
5852 if (is_alloc && allocated_size)
5853 *allocated_size = le32_to_cpu(desc.data[1]);
5858 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5860 struct hclge_vport *vport;
5863 for (i = 0; i < hdev->num_alloc_vport; i++) {
5864 vport = &hdev->vport[i];
5865 vport->used_umv_num = 0;
5868 mutex_lock(&hdev->umv_mutex);
5869 hdev->share_umv_size = hdev->priv_umv_size +
5870 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5871 mutex_unlock(&hdev->umv_mutex);
5874 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5876 struct hclge_dev *hdev = vport->back;
5879 mutex_lock(&hdev->umv_mutex);
5880 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5881 hdev->share_umv_size == 0);
5882 mutex_unlock(&hdev->umv_mutex);
5887 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5889 struct hclge_dev *hdev = vport->back;
5891 mutex_lock(&hdev->umv_mutex);
5893 if (vport->used_umv_num > hdev->priv_umv_size)
5894 hdev->share_umv_size++;
5896 if (vport->used_umv_num > 0)
5897 vport->used_umv_num--;
5899 if (vport->used_umv_num >= hdev->priv_umv_size &&
5900 hdev->share_umv_size > 0)
5901 hdev->share_umv_size--;
5902 vport->used_umv_num++;
5904 mutex_unlock(&hdev->umv_mutex);
5907 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5908 const unsigned char *addr)
5910 struct hclge_vport *vport = hclge_get_vport(handle);
5912 return hclge_add_uc_addr_common(vport, addr);
5915 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5916 const unsigned char *addr)
5918 struct hclge_dev *hdev = vport->back;
5919 struct hclge_mac_vlan_tbl_entry_cmd req;
5920 struct hclge_desc desc;
5921 u16 egress_port = 0;
5924 /* mac addr check */
5925 if (is_zero_ether_addr(addr) ||
5926 is_broadcast_ether_addr(addr) ||
5927 is_multicast_ether_addr(addr)) {
5928 dev_err(&hdev->pdev->dev,
5929 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5931 is_zero_ether_addr(addr),
5932 is_broadcast_ether_addr(addr),
5933 is_multicast_ether_addr(addr));
5937 memset(&req, 0, sizeof(req));
5939 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5940 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5942 req.egress_port = cpu_to_le16(egress_port);
5944 hclge_prepare_mac_addr(&req, addr, false);
5946 /* Lookup the mac address in the mac_vlan table, and add
5947 * it if the entry is inexistent. Repeated unicast entry
5948 * is not allowed in the mac vlan table.
5950 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5951 if (ret == -ENOENT) {
5952 if (!hclge_is_umv_space_full(vport)) {
5953 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5955 hclge_update_umv_space(vport, false);
5959 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5960 hdev->priv_umv_size);
5965 /* check if we just hit the duplicate */
5967 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
5968 vport->vport_id, addr);
5972 dev_err(&hdev->pdev->dev,
5973 "PF failed to add unicast entry(%pM) in the MAC table\n",
5979 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5980 const unsigned char *addr)
5982 struct hclge_vport *vport = hclge_get_vport(handle);
5984 return hclge_rm_uc_addr_common(vport, addr);
5987 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5988 const unsigned char *addr)
5990 struct hclge_dev *hdev = vport->back;
5991 struct hclge_mac_vlan_tbl_entry_cmd req;
5994 /* mac addr check */
5995 if (is_zero_ether_addr(addr) ||
5996 is_broadcast_ether_addr(addr) ||
5997 is_multicast_ether_addr(addr)) {
5998 dev_dbg(&hdev->pdev->dev,
5999 "Remove mac err! invalid mac:%pM.\n",
6004 memset(&req, 0, sizeof(req));
6005 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6006 hclge_prepare_mac_addr(&req, addr, false);
6007 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6009 hclge_update_umv_space(vport, true);
6014 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6015 const unsigned char *addr)
6017 struct hclge_vport *vport = hclge_get_vport(handle);
6019 return hclge_add_mc_addr_common(vport, addr);
6022 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6023 const unsigned char *addr)
6025 struct hclge_dev *hdev = vport->back;
6026 struct hclge_mac_vlan_tbl_entry_cmd req;
6027 struct hclge_desc desc[3];
6030 /* mac addr check */
6031 if (!is_multicast_ether_addr(addr)) {
6032 dev_err(&hdev->pdev->dev,
6033 "Add mc mac err! invalid mac:%pM.\n",
6037 memset(&req, 0, sizeof(req));
6038 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6039 hclge_prepare_mac_addr(&req, addr, true);
6040 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6042 /* This mac addr exist, update VFID for it */
6043 hclge_update_desc_vfid(desc, vport->vport_id, false);
6044 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6046 /* This mac addr do not exist, add new entry for it */
6047 memset(desc[0].data, 0, sizeof(desc[0].data));
6048 memset(desc[1].data, 0, sizeof(desc[0].data));
6049 memset(desc[2].data, 0, sizeof(desc[0].data));
6050 hclge_update_desc_vfid(desc, vport->vport_id, false);
6051 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6054 if (status == -ENOSPC)
6055 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6060 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6061 const unsigned char *addr)
6063 struct hclge_vport *vport = hclge_get_vport(handle);
6065 return hclge_rm_mc_addr_common(vport, addr);
6068 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6069 const unsigned char *addr)
6071 struct hclge_dev *hdev = vport->back;
6072 struct hclge_mac_vlan_tbl_entry_cmd req;
6073 enum hclge_cmd_status status;
6074 struct hclge_desc desc[3];
6076 /* mac addr check */
6077 if (!is_multicast_ether_addr(addr)) {
6078 dev_dbg(&hdev->pdev->dev,
6079 "Remove mc mac err! invalid mac:%pM.\n",
6084 memset(&req, 0, sizeof(req));
6085 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6086 hclge_prepare_mac_addr(&req, addr, true);
6087 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6089 /* This mac addr exist, remove this handle's VFID for it */
6090 hclge_update_desc_vfid(desc, vport->vport_id, true);
6092 if (hclge_is_all_function_id_zero(desc))
6093 /* All the vfid is zero, so need to delete this entry */
6094 status = hclge_remove_mac_vlan_tbl(vport, &req);
6096 /* Not all the vfid is zero, update the vfid */
6097 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6100 /* Maybe this mac address is in mta table, but it cannot be
6101 * deleted here because an entry of mta represents an address
6102 * range rather than a specific address. the delete action to
6103 * all entries will take effect in update_mta_status called by
6104 * hns3_nic_set_rx_mode.
6112 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6113 enum HCLGE_MAC_ADDR_TYPE mac_type)
6115 struct hclge_vport_mac_addr_cfg *mac_cfg;
6116 struct list_head *list;
6118 if (!vport->vport_id)
6121 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6125 mac_cfg->hd_tbl_status = true;
6126 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6128 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6129 &vport->uc_mac_list : &vport->mc_mac_list;
6131 list_add_tail(&mac_cfg->node, list);
6134 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6136 enum HCLGE_MAC_ADDR_TYPE mac_type)
6138 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6139 struct list_head *list;
6140 bool uc_flag, mc_flag;
6142 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6143 &vport->uc_mac_list : &vport->mc_mac_list;
6145 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6146 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6148 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6149 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6150 if (uc_flag && mac_cfg->hd_tbl_status)
6151 hclge_rm_uc_addr_common(vport, mac_addr);
6153 if (mc_flag && mac_cfg->hd_tbl_status)
6154 hclge_rm_mc_addr_common(vport, mac_addr);
6156 list_del(&mac_cfg->node);
6163 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6164 enum HCLGE_MAC_ADDR_TYPE mac_type)
6166 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6167 struct list_head *list;
6169 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6170 &vport->uc_mac_list : &vport->mc_mac_list;
6172 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6173 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6174 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6176 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6177 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6179 mac_cfg->hd_tbl_status = false;
6181 list_del(&mac_cfg->node);
6187 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6189 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6190 struct hclge_vport *vport;
6193 mutex_lock(&hdev->vport_cfg_mutex);
6194 for (i = 0; i < hdev->num_alloc_vport; i++) {
6195 vport = &hdev->vport[i];
6196 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6197 list_del(&mac->node);
6201 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6202 list_del(&mac->node);
6206 mutex_unlock(&hdev->vport_cfg_mutex);
6209 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6210 u16 cmdq_resp, u8 resp_code)
6212 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6213 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6214 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6215 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6220 dev_err(&hdev->pdev->dev,
6221 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6226 switch (resp_code) {
6227 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6228 case HCLGE_ETHERTYPE_ALREADY_ADD:
6231 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6232 dev_err(&hdev->pdev->dev,
6233 "add mac ethertype failed for manager table overflow.\n");
6234 return_status = -EIO;
6236 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6237 dev_err(&hdev->pdev->dev,
6238 "add mac ethertype failed for key conflict.\n");
6239 return_status = -EIO;
6242 dev_err(&hdev->pdev->dev,
6243 "add mac ethertype failed for undefined, code=%d.\n",
6245 return_status = -EIO;
6248 return return_status;
6251 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6252 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6254 struct hclge_desc desc;
6259 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6260 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6262 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6264 dev_err(&hdev->pdev->dev,
6265 "add mac ethertype failed for cmd_send, ret =%d.\n",
6270 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6271 retval = le16_to_cpu(desc.retval);
6273 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6276 static int init_mgr_tbl(struct hclge_dev *hdev)
6281 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6282 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6284 dev_err(&hdev->pdev->dev,
6285 "add mac ethertype failed, ret =%d.\n",
6294 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6296 struct hclge_vport *vport = hclge_get_vport(handle);
6297 struct hclge_dev *hdev = vport->back;
6299 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6302 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6305 const unsigned char *new_addr = (const unsigned char *)p;
6306 struct hclge_vport *vport = hclge_get_vport(handle);
6307 struct hclge_dev *hdev = vport->back;
6310 /* mac addr check */
6311 if (is_zero_ether_addr(new_addr) ||
6312 is_broadcast_ether_addr(new_addr) ||
6313 is_multicast_ether_addr(new_addr)) {
6314 dev_err(&hdev->pdev->dev,
6315 "Change uc mac err! invalid mac:%p.\n",
6320 if ((!is_first || is_kdump_kernel()) &&
6321 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6322 dev_warn(&hdev->pdev->dev,
6323 "remove old uc mac address fail.\n");
6325 ret = hclge_add_uc_addr(handle, new_addr);
6327 dev_err(&hdev->pdev->dev,
6328 "add uc mac address fail, ret =%d.\n",
6332 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6333 dev_err(&hdev->pdev->dev,
6334 "restore uc mac address fail.\n");
6339 ret = hclge_pause_addr_cfg(hdev, new_addr);
6341 dev_err(&hdev->pdev->dev,
6342 "configure mac pause address fail, ret =%d.\n",
6347 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6352 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6355 struct hclge_vport *vport = hclge_get_vport(handle);
6356 struct hclge_dev *hdev = vport->back;
6358 if (!hdev->hw.mac.phydev)
6361 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6364 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6365 u8 fe_type, bool filter_en, u8 vf_id)
6367 struct hclge_vlan_filter_ctrl_cmd *req;
6368 struct hclge_desc desc;
6371 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6373 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6374 req->vlan_type = vlan_type;
6375 req->vlan_fe = filter_en ? fe_type : 0;
6378 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6380 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6386 #define HCLGE_FILTER_TYPE_VF 0
6387 #define HCLGE_FILTER_TYPE_PORT 1
6388 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6389 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6390 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6391 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6392 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6393 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6394 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6395 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6396 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6398 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6400 struct hclge_vport *vport = hclge_get_vport(handle);
6401 struct hclge_dev *hdev = vport->back;
6403 if (hdev->pdev->revision >= 0x21) {
6404 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6405 HCLGE_FILTER_FE_EGRESS, enable, 0);
6406 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6407 HCLGE_FILTER_FE_INGRESS, enable, 0);
6409 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6410 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6414 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6416 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6419 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6420 bool is_kill, u16 vlan, u8 qos,
6423 #define HCLGE_MAX_VF_BYTES 16
6424 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6425 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6426 struct hclge_desc desc[2];
6431 hclge_cmd_setup_basic_desc(&desc[0],
6432 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6433 hclge_cmd_setup_basic_desc(&desc[1],
6434 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6436 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6438 vf_byte_off = vfid / 8;
6439 vf_byte_val = 1 << (vfid % 8);
6441 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6442 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6444 req0->vlan_id = cpu_to_le16(vlan);
6445 req0->vlan_cfg = is_kill;
6447 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6448 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6450 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6452 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6454 dev_err(&hdev->pdev->dev,
6455 "Send vf vlan command fail, ret =%d.\n",
6461 #define HCLGE_VF_VLAN_NO_ENTRY 2
6462 if (!req0->resp_code || req0->resp_code == 1)
6465 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6466 dev_warn(&hdev->pdev->dev,
6467 "vf vlan table is full, vf vlan filter is disabled\n");
6471 dev_err(&hdev->pdev->dev,
6472 "Add vf vlan filter fail, ret =%d.\n",
6475 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6476 if (!req0->resp_code)
6479 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6480 dev_warn(&hdev->pdev->dev,
6481 "vlan %d filter is not in vf vlan table\n",
6486 dev_err(&hdev->pdev->dev,
6487 "Kill vf vlan filter fail, ret =%d.\n",
6494 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6495 u16 vlan_id, bool is_kill)
6497 struct hclge_vlan_filter_pf_cfg_cmd *req;
6498 struct hclge_desc desc;
6499 u8 vlan_offset_byte_val;
6500 u8 vlan_offset_byte;
6504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6506 vlan_offset_160 = vlan_id / 160;
6507 vlan_offset_byte = (vlan_id % 160) / 8;
6508 vlan_offset_byte_val = 1 << (vlan_id % 8);
6510 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6511 req->vlan_offset = vlan_offset_160;
6512 req->vlan_cfg = is_kill;
6513 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6515 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6517 dev_err(&hdev->pdev->dev,
6518 "port vlan command, send fail, ret =%d.\n", ret);
6522 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6523 u16 vport_id, u16 vlan_id, u8 qos,
6526 u16 vport_idx, vport_num = 0;
6529 if (is_kill && !vlan_id)
6532 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6535 dev_err(&hdev->pdev->dev,
6536 "Set %d vport vlan filter config fail, ret =%d.\n",
6541 /* vlan 0 may be added twice when 8021q module is enabled */
6542 if (!is_kill && !vlan_id &&
6543 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6546 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6547 dev_err(&hdev->pdev->dev,
6548 "Add port vlan failed, vport %d is already in vlan %d\n",
6554 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6555 dev_err(&hdev->pdev->dev,
6556 "Delete port vlan failed, vport %d is not in vlan %d\n",
6561 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6564 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6565 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6571 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6572 u16 vlan_id, bool is_kill)
6574 struct hclge_vport *vport = hclge_get_vport(handle);
6575 struct hclge_dev *hdev = vport->back;
6577 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6581 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6582 u16 vlan, u8 qos, __be16 proto)
6584 struct hclge_vport *vport = hclge_get_vport(handle);
6585 struct hclge_dev *hdev = vport->back;
6587 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6589 if (proto != htons(ETH_P_8021Q))
6590 return -EPROTONOSUPPORT;
6592 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6595 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6597 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6598 struct hclge_vport_vtag_tx_cfg_cmd *req;
6599 struct hclge_dev *hdev = vport->back;
6600 struct hclge_desc desc;
6603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6605 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6606 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6607 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6608 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6609 vcfg->accept_tag1 ? 1 : 0);
6610 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6611 vcfg->accept_untag1 ? 1 : 0);
6612 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6613 vcfg->accept_tag2 ? 1 : 0);
6614 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6615 vcfg->accept_untag2 ? 1 : 0);
6616 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6617 vcfg->insert_tag1_en ? 1 : 0);
6618 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6619 vcfg->insert_tag2_en ? 1 : 0);
6620 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6622 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6623 req->vf_bitmap[req->vf_offset] =
6624 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6626 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6628 dev_err(&hdev->pdev->dev,
6629 "Send port txvlan cfg command fail, ret =%d\n",
6635 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6637 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6638 struct hclge_vport_vtag_rx_cfg_cmd *req;
6639 struct hclge_dev *hdev = vport->back;
6640 struct hclge_desc desc;
6643 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6645 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6646 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6647 vcfg->strip_tag1_en ? 1 : 0);
6648 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6649 vcfg->strip_tag2_en ? 1 : 0);
6650 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6651 vcfg->vlan1_vlan_prionly ? 1 : 0);
6652 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6653 vcfg->vlan2_vlan_prionly ? 1 : 0);
6655 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6656 req->vf_bitmap[req->vf_offset] =
6657 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6659 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6661 dev_err(&hdev->pdev->dev,
6662 "Send port rxvlan cfg command fail, ret =%d\n",
6668 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6670 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6671 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6672 struct hclge_desc desc;
6675 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6676 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6677 rx_req->ot_fst_vlan_type =
6678 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6679 rx_req->ot_sec_vlan_type =
6680 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6681 rx_req->in_fst_vlan_type =
6682 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6683 rx_req->in_sec_vlan_type =
6684 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6686 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6688 dev_err(&hdev->pdev->dev,
6689 "Send rxvlan protocol type command fail, ret =%d\n",
6694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6696 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6697 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6698 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6700 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6702 dev_err(&hdev->pdev->dev,
6703 "Send txvlan protocol type command fail, ret =%d\n",
6709 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6711 #define HCLGE_DEF_VLAN_TYPE 0x8100
6713 struct hnae3_handle *handle = &hdev->vport[0].nic;
6714 struct hclge_vport *vport;
6718 if (hdev->pdev->revision >= 0x21) {
6719 /* for revision 0x21, vf vlan filter is per function */
6720 for (i = 0; i < hdev->num_alloc_vport; i++) {
6721 vport = &hdev->vport[i];
6722 ret = hclge_set_vlan_filter_ctrl(hdev,
6723 HCLGE_FILTER_TYPE_VF,
6724 HCLGE_FILTER_FE_EGRESS,
6731 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6732 HCLGE_FILTER_FE_INGRESS, true,
6737 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6738 HCLGE_FILTER_FE_EGRESS_V1_B,
6744 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6746 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6747 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6748 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6749 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6750 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6751 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6753 ret = hclge_set_vlan_protocol_type(hdev);
6757 for (i = 0; i < hdev->num_alloc_vport; i++) {
6758 vport = &hdev->vport[i];
6759 vport->txvlan_cfg.accept_tag1 = true;
6760 vport->txvlan_cfg.accept_untag1 = true;
6762 /* accept_tag2 and accept_untag2 are not supported on
6763 * pdev revision(0x20), new revision support them. The
6764 * value of this two fields will not return error when driver
6765 * send command to fireware in revision(0x20).
6766 * This two fields can not configured by user.
6768 vport->txvlan_cfg.accept_tag2 = true;
6769 vport->txvlan_cfg.accept_untag2 = true;
6771 vport->txvlan_cfg.insert_tag1_en = false;
6772 vport->txvlan_cfg.insert_tag2_en = false;
6773 vport->txvlan_cfg.default_tag1 = 0;
6774 vport->txvlan_cfg.default_tag2 = 0;
6776 ret = hclge_set_vlan_tx_offload_cfg(vport);
6780 vport->rxvlan_cfg.strip_tag1_en = false;
6781 vport->rxvlan_cfg.strip_tag2_en = true;
6782 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6783 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6785 ret = hclge_set_vlan_rx_offload_cfg(vport);
6790 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6793 void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
6795 struct hclge_vport_vlan_cfg *vlan;
6797 /* vlan 0 is reserved */
6801 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6805 vlan->hd_tbl_status = true;
6806 vlan->vlan_id = vlan_id;
6808 list_add_tail(&vlan->node, &vport->vlan_list);
6811 void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6814 struct hclge_vport_vlan_cfg *vlan, *tmp;
6815 struct hclge_dev *hdev = vport->back;
6817 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6818 if (vlan->vlan_id == vlan_id) {
6819 if (is_write_tbl && vlan->hd_tbl_status)
6820 hclge_set_vlan_filter_hw(hdev,
6826 list_del(&vlan->node);
6833 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6835 struct hclge_vport_vlan_cfg *vlan, *tmp;
6836 struct hclge_dev *hdev = vport->back;
6838 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6839 if (vlan->hd_tbl_status)
6840 hclge_set_vlan_filter_hw(hdev,
6846 vlan->hd_tbl_status = false;
6848 list_del(&vlan->node);
6854 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6856 struct hclge_vport_vlan_cfg *vlan, *tmp;
6857 struct hclge_vport *vport;
6860 mutex_lock(&hdev->vport_cfg_mutex);
6861 for (i = 0; i < hdev->num_alloc_vport; i++) {
6862 vport = &hdev->vport[i];
6863 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6864 list_del(&vlan->node);
6868 mutex_unlock(&hdev->vport_cfg_mutex);
6871 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6873 struct hclge_vport *vport = hclge_get_vport(handle);
6875 vport->rxvlan_cfg.strip_tag1_en = false;
6876 vport->rxvlan_cfg.strip_tag2_en = enable;
6877 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6878 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6880 return hclge_set_vlan_rx_offload_cfg(vport);
6883 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6885 struct hclge_config_max_frm_size_cmd *req;
6886 struct hclge_desc desc;
6888 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6890 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6891 req->max_frm_size = cpu_to_le16(new_mps);
6892 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6894 return hclge_cmd_send(&hdev->hw, &desc, 1);
6897 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6899 struct hclge_vport *vport = hclge_get_vport(handle);
6901 return hclge_set_vport_mtu(vport, new_mtu);
6904 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6906 struct hclge_dev *hdev = vport->back;
6907 int i, max_frm_size, ret = 0;
6909 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6910 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6911 max_frm_size > HCLGE_MAC_MAX_FRAME)
6914 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6915 mutex_lock(&hdev->vport_lock);
6916 /* VF's mps must fit within hdev->mps */
6917 if (vport->vport_id && max_frm_size > hdev->mps) {
6918 mutex_unlock(&hdev->vport_lock);
6920 } else if (vport->vport_id) {
6921 vport->mps = max_frm_size;
6922 mutex_unlock(&hdev->vport_lock);
6926 /* PF's mps must be greater then VF's mps */
6927 for (i = 1; i < hdev->num_alloc_vport; i++)
6928 if (max_frm_size < hdev->vport[i].mps) {
6929 mutex_unlock(&hdev->vport_lock);
6933 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6935 ret = hclge_set_mac_mtu(hdev, max_frm_size);
6937 dev_err(&hdev->pdev->dev,
6938 "Change mtu fail, ret =%d\n", ret);
6942 hdev->mps = max_frm_size;
6943 vport->mps = max_frm_size;
6945 ret = hclge_buffer_alloc(hdev);
6947 dev_err(&hdev->pdev->dev,
6948 "Allocate buffer fail, ret =%d\n", ret);
6951 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6952 mutex_unlock(&hdev->vport_lock);
6956 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6959 struct hclge_reset_tqp_queue_cmd *req;
6960 struct hclge_desc desc;
6963 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6965 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6966 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6967 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6969 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6971 dev_err(&hdev->pdev->dev,
6972 "Send tqp reset cmd error, status =%d\n", ret);
6979 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6981 struct hclge_reset_tqp_queue_cmd *req;
6982 struct hclge_desc desc;
6985 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6987 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6988 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6990 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6992 dev_err(&hdev->pdev->dev,
6993 "Get reset status error, status =%d\n", ret);
6997 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7000 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7002 struct hnae3_queue *queue;
7003 struct hclge_tqp *tqp;
7005 queue = handle->kinfo.tqp[queue_id];
7006 tqp = container_of(queue, struct hclge_tqp, q);
7011 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7013 struct hclge_vport *vport = hclge_get_vport(handle);
7014 struct hclge_dev *hdev = vport->back;
7015 int reset_try_times = 0;
7020 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7022 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7024 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7028 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7030 dev_err(&hdev->pdev->dev,
7031 "Send reset tqp cmd fail, ret = %d\n", ret);
7035 reset_try_times = 0;
7036 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7037 /* Wait for tqp hw reset */
7039 reset_status = hclge_get_reset_status(hdev, queue_gid);
7044 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7045 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7049 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7051 dev_err(&hdev->pdev->dev,
7052 "Deassert the soft reset fail, ret = %d\n", ret);
7057 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7059 struct hclge_dev *hdev = vport->back;
7060 int reset_try_times = 0;
7065 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7067 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7069 dev_warn(&hdev->pdev->dev,
7070 "Send reset tqp cmd fail, ret = %d\n", ret);
7074 reset_try_times = 0;
7075 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7076 /* Wait for tqp hw reset */
7078 reset_status = hclge_get_reset_status(hdev, queue_gid);
7083 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7084 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7088 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7090 dev_warn(&hdev->pdev->dev,
7091 "Deassert the soft reset fail, ret = %d\n", ret);
7094 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7096 struct hclge_vport *vport = hclge_get_vport(handle);
7097 struct hclge_dev *hdev = vport->back;
7099 return hdev->fw_version;
7102 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7104 struct phy_device *phydev = hdev->hw.mac.phydev;
7109 phy_set_asym_pause(phydev, rx_en, tx_en);
7112 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7117 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7118 else if (rx_en && !tx_en)
7119 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7120 else if (!rx_en && tx_en)
7121 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7123 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7125 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7128 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7130 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7135 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7140 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7142 struct phy_device *phydev = hdev->hw.mac.phydev;
7143 u16 remote_advertising = 0;
7144 u16 local_advertising = 0;
7145 u32 rx_pause, tx_pause;
7148 if (!phydev->link || !phydev->autoneg)
7151 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7154 remote_advertising = LPA_PAUSE_CAP;
7156 if (phydev->asym_pause)
7157 remote_advertising |= LPA_PAUSE_ASYM;
7159 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7160 remote_advertising);
7161 tx_pause = flowctl & FLOW_CTRL_TX;
7162 rx_pause = flowctl & FLOW_CTRL_RX;
7164 if (phydev->duplex == HCLGE_MAC_HALF) {
7169 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7172 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7173 u32 *rx_en, u32 *tx_en)
7175 struct hclge_vport *vport = hclge_get_vport(handle);
7176 struct hclge_dev *hdev = vport->back;
7178 *auto_neg = hclge_get_autoneg(handle);
7180 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7186 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7189 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7192 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7201 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7202 u32 rx_en, u32 tx_en)
7204 struct hclge_vport *vport = hclge_get_vport(handle);
7205 struct hclge_dev *hdev = vport->back;
7206 struct phy_device *phydev = hdev->hw.mac.phydev;
7209 fc_autoneg = hclge_get_autoneg(handle);
7210 if (auto_neg != fc_autoneg) {
7211 dev_info(&hdev->pdev->dev,
7212 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7216 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7217 dev_info(&hdev->pdev->dev,
7218 "Priority flow control enabled. Cannot set link flow control.\n");
7222 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7225 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7227 /* Only support flow control negotiation for netdev with
7228 * phy attached for now.
7233 return phy_start_aneg(phydev);
7236 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7237 u8 *auto_neg, u32 *speed, u8 *duplex)
7239 struct hclge_vport *vport = hclge_get_vport(handle);
7240 struct hclge_dev *hdev = vport->back;
7243 *speed = hdev->hw.mac.speed;
7245 *duplex = hdev->hw.mac.duplex;
7247 *auto_neg = hdev->hw.mac.autoneg;
7250 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7252 struct hclge_vport *vport = hclge_get_vport(handle);
7253 struct hclge_dev *hdev = vport->back;
7256 *media_type = hdev->hw.mac.media_type;
7259 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7260 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7262 struct hclge_vport *vport = hclge_get_vport(handle);
7263 struct hclge_dev *hdev = vport->back;
7264 struct phy_device *phydev = hdev->hw.mac.phydev;
7265 int mdix_ctrl, mdix, retval, is_resolved;
7268 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7269 *tp_mdix = ETH_TP_MDI_INVALID;
7273 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7275 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7276 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7277 HCLGE_PHY_MDIX_CTRL_S);
7279 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7280 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7281 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7283 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7285 switch (mdix_ctrl) {
7287 *tp_mdix_ctrl = ETH_TP_MDI;
7290 *tp_mdix_ctrl = ETH_TP_MDI_X;
7293 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7296 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7301 *tp_mdix = ETH_TP_MDI_INVALID;
7303 *tp_mdix = ETH_TP_MDI_X;
7305 *tp_mdix = ETH_TP_MDI;
7308 static int hclge_init_client_instance(struct hnae3_client *client,
7309 struct hnae3_ae_dev *ae_dev)
7311 struct hclge_dev *hdev = ae_dev->priv;
7312 struct hclge_vport *vport;
7315 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7316 vport = &hdev->vport[i];
7318 switch (client->type) {
7319 case HNAE3_CLIENT_KNIC:
7321 hdev->nic_client = client;
7322 vport->nic.client = client;
7323 ret = client->ops->init_instance(&vport->nic);
7327 hnae3_set_client_init_flag(client, ae_dev, 1);
7329 if (hdev->roce_client &&
7330 hnae3_dev_roce_supported(hdev)) {
7331 struct hnae3_client *rc = hdev->roce_client;
7333 ret = hclge_init_roce_base_info(vport);
7337 ret = rc->ops->init_instance(&vport->roce);
7341 hnae3_set_client_init_flag(hdev->roce_client,
7346 case HNAE3_CLIENT_UNIC:
7347 hdev->nic_client = client;
7348 vport->nic.client = client;
7350 ret = client->ops->init_instance(&vport->nic);
7354 hnae3_set_client_init_flag(client, ae_dev, 1);
7357 case HNAE3_CLIENT_ROCE:
7358 if (hnae3_dev_roce_supported(hdev)) {
7359 hdev->roce_client = client;
7360 vport->roce.client = client;
7363 if (hdev->roce_client && hdev->nic_client) {
7364 ret = hclge_init_roce_base_info(vport);
7368 ret = client->ops->init_instance(&vport->roce);
7372 hnae3_set_client_init_flag(client, ae_dev, 1);
7384 hdev->nic_client = NULL;
7385 vport->nic.client = NULL;
7388 hdev->roce_client = NULL;
7389 vport->roce.client = NULL;
7393 static void hclge_uninit_client_instance(struct hnae3_client *client,
7394 struct hnae3_ae_dev *ae_dev)
7396 struct hclge_dev *hdev = ae_dev->priv;
7397 struct hclge_vport *vport;
7400 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7401 vport = &hdev->vport[i];
7402 if (hdev->roce_client) {
7403 hdev->roce_client->ops->uninit_instance(&vport->roce,
7405 hdev->roce_client = NULL;
7406 vport->roce.client = NULL;
7408 if (client->type == HNAE3_CLIENT_ROCE)
7410 if (hdev->nic_client && client->ops->uninit_instance) {
7411 client->ops->uninit_instance(&vport->nic, 0);
7412 hdev->nic_client = NULL;
7413 vport->nic.client = NULL;
7418 static int hclge_pci_init(struct hclge_dev *hdev)
7420 struct pci_dev *pdev = hdev->pdev;
7421 struct hclge_hw *hw;
7424 ret = pci_enable_device(pdev);
7426 dev_err(&pdev->dev, "failed to enable PCI device\n");
7430 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7432 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7435 "can't set consistent PCI DMA");
7436 goto err_disable_device;
7438 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7441 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7443 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7444 goto err_disable_device;
7447 pci_set_master(pdev);
7449 hw->io_base = pcim_iomap(pdev, 2, 0);
7451 dev_err(&pdev->dev, "Can't map configuration register space\n");
7453 goto err_clr_master;
7456 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7460 pci_clear_master(pdev);
7461 pci_release_regions(pdev);
7463 pci_disable_device(pdev);
7468 static void hclge_pci_uninit(struct hclge_dev *hdev)
7470 struct pci_dev *pdev = hdev->pdev;
7472 pcim_iounmap(pdev, hdev->hw.io_base);
7473 pci_free_irq_vectors(pdev);
7474 pci_clear_master(pdev);
7475 pci_release_mem_regions(pdev);
7476 pci_disable_device(pdev);
7479 static void hclge_state_init(struct hclge_dev *hdev)
7481 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7482 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7483 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7484 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7485 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7486 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7489 static void hclge_state_uninit(struct hclge_dev *hdev)
7491 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7493 if (hdev->service_timer.function)
7494 del_timer_sync(&hdev->service_timer);
7495 if (hdev->reset_timer.function)
7496 del_timer_sync(&hdev->reset_timer);
7497 if (hdev->service_task.func)
7498 cancel_work_sync(&hdev->service_task);
7499 if (hdev->rst_service_task.func)
7500 cancel_work_sync(&hdev->rst_service_task);
7501 if (hdev->mbx_service_task.func)
7502 cancel_work_sync(&hdev->mbx_service_task);
7505 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7507 #define HCLGE_FLR_WAIT_MS 100
7508 #define HCLGE_FLR_WAIT_CNT 50
7509 struct hclge_dev *hdev = ae_dev->priv;
7512 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7513 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7514 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7515 hclge_reset_event(hdev->pdev, NULL);
7517 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7518 cnt++ < HCLGE_FLR_WAIT_CNT)
7519 msleep(HCLGE_FLR_WAIT_MS);
7521 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7522 dev_err(&hdev->pdev->dev,
7523 "flr wait down timeout: %d\n", cnt);
7526 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7528 struct hclge_dev *hdev = ae_dev->priv;
7530 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7533 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7535 struct pci_dev *pdev = ae_dev->pdev;
7536 struct hclge_dev *hdev;
7539 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7546 hdev->ae_dev = ae_dev;
7547 hdev->reset_type = HNAE3_NONE_RESET;
7548 hdev->reset_level = HNAE3_FUNC_RESET;
7549 ae_dev->priv = hdev;
7550 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7552 mutex_init(&hdev->vport_lock);
7553 mutex_init(&hdev->vport_cfg_mutex);
7555 ret = hclge_pci_init(hdev);
7557 dev_err(&pdev->dev, "PCI init failed\n");
7561 /* Firmware command queue initialize */
7562 ret = hclge_cmd_queue_init(hdev);
7564 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7565 goto err_pci_uninit;
7568 /* Firmware command initialize */
7569 ret = hclge_cmd_init(hdev);
7571 goto err_cmd_uninit;
7573 ret = hclge_get_cap(hdev);
7575 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7577 goto err_cmd_uninit;
7580 ret = hclge_configure(hdev);
7582 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7583 goto err_cmd_uninit;
7586 ret = hclge_init_msi(hdev);
7588 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7589 goto err_cmd_uninit;
7592 ret = hclge_misc_irq_init(hdev);
7595 "Misc IRQ(vector0) init error, ret = %d.\n",
7597 goto err_msi_uninit;
7600 ret = hclge_alloc_tqps(hdev);
7602 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7603 goto err_msi_irq_uninit;
7606 ret = hclge_alloc_vport(hdev);
7608 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7609 goto err_msi_irq_uninit;
7612 ret = hclge_map_tqp(hdev);
7614 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7615 goto err_msi_irq_uninit;
7618 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7619 ret = hclge_mac_mdio_config(hdev);
7621 dev_err(&hdev->pdev->dev,
7622 "mdio config fail ret=%d\n", ret);
7623 goto err_msi_irq_uninit;
7627 ret = hclge_init_umv_space(hdev);
7629 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7630 goto err_mdiobus_unreg;
7633 ret = hclge_mac_init(hdev);
7635 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7636 goto err_mdiobus_unreg;
7639 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7641 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7642 goto err_mdiobus_unreg;
7645 ret = hclge_config_gro(hdev, true);
7647 goto err_mdiobus_unreg;
7649 ret = hclge_init_vlan_config(hdev);
7651 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7652 goto err_mdiobus_unreg;
7655 ret = hclge_tm_schd_init(hdev);
7657 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7658 goto err_mdiobus_unreg;
7661 hclge_rss_init_cfg(hdev);
7662 ret = hclge_rss_init_hw(hdev);
7664 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7665 goto err_mdiobus_unreg;
7668 ret = init_mgr_tbl(hdev);
7670 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7671 goto err_mdiobus_unreg;
7674 ret = hclge_init_fd_config(hdev);
7677 "fd table init fail, ret=%d\n", ret);
7678 goto err_mdiobus_unreg;
7681 ret = hclge_hw_error_set_state(hdev, true);
7684 "fail(%d) to enable hw error interrupts\n", ret);
7685 goto err_mdiobus_unreg;
7688 hclge_dcb_ops_set(hdev);
7690 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7691 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7692 INIT_WORK(&hdev->service_task, hclge_service_task);
7693 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7694 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7696 hclge_clear_all_event_cause(hdev);
7698 /* Enable MISC vector(vector0) */
7699 hclge_enable_vector(&hdev->misc_vector, true);
7701 hclge_state_init(hdev);
7702 hdev->last_reset_time = jiffies;
7704 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7708 if (hdev->hw.mac.phydev)
7709 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7711 hclge_misc_irq_uninit(hdev);
7713 pci_free_irq_vectors(pdev);
7715 hclge_cmd_uninit(hdev);
7717 pcim_iounmap(pdev, hdev->hw.io_base);
7718 pci_clear_master(pdev);
7719 pci_release_regions(pdev);
7720 pci_disable_device(pdev);
7725 static void hclge_stats_clear(struct hclge_dev *hdev)
7727 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7730 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7732 struct hclge_vport *vport = hdev->vport;
7735 for (i = 0; i < hdev->num_alloc_vport; i++) {
7736 hclge_vport_stop(vport);
7741 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7743 struct hclge_dev *hdev = ae_dev->priv;
7744 struct pci_dev *pdev = ae_dev->pdev;
7747 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7749 hclge_stats_clear(hdev);
7750 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7752 ret = hclge_cmd_init(hdev);
7754 dev_err(&pdev->dev, "Cmd queue init failed\n");
7758 ret = hclge_map_tqp(hdev);
7760 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7764 hclge_reset_umv_space(hdev);
7766 ret = hclge_mac_init(hdev);
7768 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7772 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7774 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7778 ret = hclge_config_gro(hdev, true);
7782 ret = hclge_init_vlan_config(hdev);
7784 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7788 ret = hclge_tm_init_hw(hdev, true);
7790 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7794 ret = hclge_rss_init_hw(hdev);
7796 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7800 ret = hclge_init_fd_config(hdev);
7803 "fd table init fail, ret=%d\n", ret);
7807 /* Re-enable the hw error interrupts because
7808 * the interrupts get disabled on core/global reset.
7810 ret = hclge_hw_error_set_state(hdev, true);
7813 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
7817 hclge_reset_vport_state(hdev);
7819 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7825 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7827 struct hclge_dev *hdev = ae_dev->priv;
7828 struct hclge_mac *mac = &hdev->hw.mac;
7830 hclge_state_uninit(hdev);
7833 mdiobus_unregister(mac->mdio_bus);
7835 hclge_uninit_umv_space(hdev);
7837 /* Disable MISC vector(vector0) */
7838 hclge_enable_vector(&hdev->misc_vector, false);
7839 synchronize_irq(hdev->misc_vector.vector_irq);
7841 hclge_hw_error_set_state(hdev, false);
7842 hclge_cmd_uninit(hdev);
7843 hclge_misc_irq_uninit(hdev);
7844 hclge_pci_uninit(hdev);
7845 mutex_destroy(&hdev->vport_lock);
7846 hclge_uninit_vport_mac_table(hdev);
7847 hclge_uninit_vport_vlan_table(hdev);
7848 mutex_destroy(&hdev->vport_cfg_mutex);
7849 ae_dev->priv = NULL;
7852 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7854 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7855 struct hclge_vport *vport = hclge_get_vport(handle);
7856 struct hclge_dev *hdev = vport->back;
7858 return min_t(u32, hdev->rss_size_max,
7859 vport->alloc_tqps / kinfo->num_tc);
7862 static void hclge_get_channels(struct hnae3_handle *handle,
7863 struct ethtool_channels *ch)
7865 ch->max_combined = hclge_get_max_channels(handle);
7866 ch->other_count = 1;
7868 ch->combined_count = handle->kinfo.rss_size;
7871 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7872 u16 *alloc_tqps, u16 *max_rss_size)
7874 struct hclge_vport *vport = hclge_get_vport(handle);
7875 struct hclge_dev *hdev = vport->back;
7877 *alloc_tqps = vport->alloc_tqps;
7878 *max_rss_size = hdev->rss_size_max;
7881 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7882 bool rxfh_configured)
7884 struct hclge_vport *vport = hclge_get_vport(handle);
7885 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7886 struct hclge_dev *hdev = vport->back;
7887 int cur_rss_size = kinfo->rss_size;
7888 int cur_tqps = kinfo->num_tqps;
7889 u16 tc_offset[HCLGE_MAX_TC_NUM];
7890 u16 tc_valid[HCLGE_MAX_TC_NUM];
7891 u16 tc_size[HCLGE_MAX_TC_NUM];
7896 kinfo->req_rss_size = new_tqps_num;
7898 ret = hclge_tm_vport_map_update(hdev);
7900 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7904 roundup_size = roundup_pow_of_two(kinfo->rss_size);
7905 roundup_size = ilog2(roundup_size);
7906 /* Set the RSS TC mode according to the new RSS size */
7907 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7910 if (!(hdev->hw_tc_map & BIT(i)))
7914 tc_size[i] = roundup_size;
7915 tc_offset[i] = kinfo->rss_size * i;
7917 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7921 /* RSS indirection table has been configuared by user */
7922 if (rxfh_configured)
7925 /* Reinitializes the rss indirect table according to the new RSS size */
7926 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7930 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7931 rss_indir[i] = i % kinfo->rss_size;
7933 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7935 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7942 dev_info(&hdev->pdev->dev,
7943 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7944 cur_rss_size, kinfo->rss_size,
7945 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7950 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7951 u32 *regs_num_64_bit)
7953 struct hclge_desc desc;
7957 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7958 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7960 dev_err(&hdev->pdev->dev,
7961 "Query register number cmd failed, ret = %d.\n", ret);
7965 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7966 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7968 total_num = *regs_num_32_bit + *regs_num_64_bit;
7975 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7978 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7980 struct hclge_desc *desc;
7981 u32 *reg_val = data;
7990 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7991 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7995 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7996 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7998 dev_err(&hdev->pdev->dev,
7999 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8004 for (i = 0; i < cmd_num; i++) {
8006 desc_data = (__le32 *)(&desc[i].data[0]);
8007 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8009 desc_data = (__le32 *)(&desc[i]);
8010 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8012 for (k = 0; k < n; k++) {
8013 *reg_val++ = le32_to_cpu(*desc_data++);
8025 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8028 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8030 struct hclge_desc *desc;
8031 u64 *reg_val = data;
8040 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8041 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8045 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8046 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8048 dev_err(&hdev->pdev->dev,
8049 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8054 for (i = 0; i < cmd_num; i++) {
8056 desc_data = (__le64 *)(&desc[i].data[0]);
8057 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8059 desc_data = (__le64 *)(&desc[i]);
8060 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8062 for (k = 0; k < n; k++) {
8063 *reg_val++ = le64_to_cpu(*desc_data++);
8075 #define MAX_SEPARATE_NUM 4
8076 #define SEPARATOR_VALUE 0xFFFFFFFF
8077 #define REG_NUM_PER_LINE 4
8078 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8080 static int hclge_get_regs_len(struct hnae3_handle *handle)
8082 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8083 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8084 struct hclge_vport *vport = hclge_get_vport(handle);
8085 struct hclge_dev *hdev = vport->back;
8086 u32 regs_num_32_bit, regs_num_64_bit;
8089 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8091 dev_err(&hdev->pdev->dev,
8092 "Get register number failed, ret = %d.\n", ret);
8096 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8097 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8098 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8099 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8101 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8102 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8103 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8106 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8109 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8110 struct hclge_vport *vport = hclge_get_vport(handle);
8111 struct hclge_dev *hdev = vport->back;
8112 u32 regs_num_32_bit, regs_num_64_bit;
8113 int i, j, reg_um, separator_num;
8117 *version = hdev->fw_version;
8119 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8121 dev_err(&hdev->pdev->dev,
8122 "Get register number failed, ret = %d.\n", ret);
8126 /* fetching per-PF registers valus from PF PCIe register space */
8127 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8128 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8129 for (i = 0; i < reg_um; i++)
8130 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8131 for (i = 0; i < separator_num; i++)
8132 *reg++ = SEPARATOR_VALUE;
8134 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8135 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8136 for (i = 0; i < reg_um; i++)
8137 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8138 for (i = 0; i < separator_num; i++)
8139 *reg++ = SEPARATOR_VALUE;
8141 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8142 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8143 for (j = 0; j < kinfo->num_tqps; j++) {
8144 for (i = 0; i < reg_um; i++)
8145 *reg++ = hclge_read_dev(&hdev->hw,
8146 ring_reg_addr_list[i] +
8148 for (i = 0; i < separator_num; i++)
8149 *reg++ = SEPARATOR_VALUE;
8152 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8153 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8154 for (j = 0; j < hdev->num_msi_used - 1; j++) {
8155 for (i = 0; i < reg_um; i++)
8156 *reg++ = hclge_read_dev(&hdev->hw,
8157 tqp_intr_reg_addr_list[i] +
8159 for (i = 0; i < separator_num; i++)
8160 *reg++ = SEPARATOR_VALUE;
8163 /* fetching PF common registers values from firmware */
8164 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8166 dev_err(&hdev->pdev->dev,
8167 "Get 32 bit register failed, ret = %d.\n", ret);
8171 reg += regs_num_32_bit;
8172 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8174 dev_err(&hdev->pdev->dev,
8175 "Get 64 bit register failed, ret = %d.\n", ret);
8178 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8180 struct hclge_set_led_state_cmd *req;
8181 struct hclge_desc desc;
8184 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8186 req = (struct hclge_set_led_state_cmd *)desc.data;
8187 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8188 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8190 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8192 dev_err(&hdev->pdev->dev,
8193 "Send set led state cmd error, ret =%d\n", ret);
8198 enum hclge_led_status {
8201 HCLGE_LED_NO_CHANGE = 0xFF,
8204 static int hclge_set_led_id(struct hnae3_handle *handle,
8205 enum ethtool_phys_id_state status)
8207 struct hclge_vport *vport = hclge_get_vport(handle);
8208 struct hclge_dev *hdev = vport->back;
8211 case ETHTOOL_ID_ACTIVE:
8212 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8213 case ETHTOOL_ID_INACTIVE:
8214 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8220 static void hclge_get_link_mode(struct hnae3_handle *handle,
8221 unsigned long *supported,
8222 unsigned long *advertising)
8224 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8225 struct hclge_vport *vport = hclge_get_vport(handle);
8226 struct hclge_dev *hdev = vport->back;
8227 unsigned int idx = 0;
8229 for (; idx < size; idx++) {
8230 supported[idx] = hdev->hw.mac.supported[idx];
8231 advertising[idx] = hdev->hw.mac.advertising[idx];
8235 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8237 struct hclge_vport *vport = hclge_get_vport(handle);
8238 struct hclge_dev *hdev = vport->back;
8240 return hclge_config_gro(hdev, enable);
8243 static const struct hnae3_ae_ops hclge_ops = {
8244 .init_ae_dev = hclge_init_ae_dev,
8245 .uninit_ae_dev = hclge_uninit_ae_dev,
8246 .flr_prepare = hclge_flr_prepare,
8247 .flr_done = hclge_flr_done,
8248 .init_client_instance = hclge_init_client_instance,
8249 .uninit_client_instance = hclge_uninit_client_instance,
8250 .map_ring_to_vector = hclge_map_ring_to_vector,
8251 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8252 .get_vector = hclge_get_vector,
8253 .put_vector = hclge_put_vector,
8254 .set_promisc_mode = hclge_set_promisc_mode,
8255 .set_loopback = hclge_set_loopback,
8256 .start = hclge_ae_start,
8257 .stop = hclge_ae_stop,
8258 .client_start = hclge_client_start,
8259 .client_stop = hclge_client_stop,
8260 .get_status = hclge_get_status,
8261 .get_ksettings_an_result = hclge_get_ksettings_an_result,
8262 .update_speed_duplex_h = hclge_update_speed_duplex_h,
8263 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8264 .get_media_type = hclge_get_media_type,
8265 .get_rss_key_size = hclge_get_rss_key_size,
8266 .get_rss_indir_size = hclge_get_rss_indir_size,
8267 .get_rss = hclge_get_rss,
8268 .set_rss = hclge_set_rss,
8269 .set_rss_tuple = hclge_set_rss_tuple,
8270 .get_rss_tuple = hclge_get_rss_tuple,
8271 .get_tc_size = hclge_get_tc_size,
8272 .get_mac_addr = hclge_get_mac_addr,
8273 .set_mac_addr = hclge_set_mac_addr,
8274 .do_ioctl = hclge_do_ioctl,
8275 .add_uc_addr = hclge_add_uc_addr,
8276 .rm_uc_addr = hclge_rm_uc_addr,
8277 .add_mc_addr = hclge_add_mc_addr,
8278 .rm_mc_addr = hclge_rm_mc_addr,
8279 .set_autoneg = hclge_set_autoneg,
8280 .get_autoneg = hclge_get_autoneg,
8281 .get_pauseparam = hclge_get_pauseparam,
8282 .set_pauseparam = hclge_set_pauseparam,
8283 .set_mtu = hclge_set_mtu,
8284 .reset_queue = hclge_reset_tqp,
8285 .get_stats = hclge_get_stats,
8286 .update_stats = hclge_update_stats,
8287 .get_strings = hclge_get_strings,
8288 .get_sset_count = hclge_get_sset_count,
8289 .get_fw_version = hclge_get_fw_version,
8290 .get_mdix_mode = hclge_get_mdix_mode,
8291 .enable_vlan_filter = hclge_enable_vlan_filter,
8292 .set_vlan_filter = hclge_set_vlan_filter,
8293 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8294 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8295 .reset_event = hclge_reset_event,
8296 .set_default_reset_request = hclge_set_def_reset_request,
8297 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8298 .set_channels = hclge_set_channels,
8299 .get_channels = hclge_get_channels,
8300 .get_regs_len = hclge_get_regs_len,
8301 .get_regs = hclge_get_regs,
8302 .set_led_id = hclge_set_led_id,
8303 .get_link_mode = hclge_get_link_mode,
8304 .add_fd_entry = hclge_add_fd_entry,
8305 .del_fd_entry = hclge_del_fd_entry,
8306 .del_all_fd_entries = hclge_del_all_fd_entries,
8307 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8308 .get_fd_rule_info = hclge_get_fd_rule_info,
8309 .get_fd_all_rules = hclge_get_all_rules,
8310 .restore_fd_rules = hclge_restore_fd_entries,
8311 .enable_fd = hclge_enable_fd,
8312 .dbg_run_cmd = hclge_dbg_run_cmd,
8313 .handle_hw_ras_error = hclge_handle_hw_ras_error,
8314 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8315 .ae_dev_resetting = hclge_ae_dev_resetting,
8316 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8317 .set_gro_en = hclge_gro_en,
8318 .get_global_queue_id = hclge_covert_handle_qid_global,
8319 .set_timer_task = hclge_set_timer_task,
8320 .mac_connect_phy = hclge_mac_connect_phy,
8321 .mac_disconnect_phy = hclge_mac_disconnect_phy,
8324 static struct hnae3_ae_algo ae_algo = {
8326 .pdev_id_table = ae_algo_pci_tbl,
8329 static int hclge_init(void)
8331 pr_info("%s is initializing\n", HCLGE_NAME);
8333 hnae3_register_ae_algo(&ae_algo);
8338 static void hclge_exit(void)
8340 hnae3_unregister_ae_algo(&ae_algo);
8342 module_init(hclge_init);
8343 module_exit(hclge_exit);
8345 MODULE_LICENSE("GPL");
8346 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8347 MODULE_DESCRIPTION("HCLGE Driver");
8348 MODULE_VERSION(HCLGE_MOD_VERSION);