1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
22 #include "hclge_err.h"
25 #define HCLGE_NAME "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 #define HCLGE_BUF_SIZE_UNIT 256
31 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
32 static int hclge_init_vlan_config(struct hclge_dev *hdev);
33 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
34 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
35 u16 *allocated_size, bool is_alloc);
37 static struct hnae3_ae_algo ae_algo;
39 static const struct pci_device_id ae_algo_pci_tbl[] = {
40 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
47 /* required last entry */
51 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
53 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
54 HCLGE_CMDQ_TX_ADDR_H_REG,
55 HCLGE_CMDQ_TX_DEPTH_REG,
56 HCLGE_CMDQ_TX_TAIL_REG,
57 HCLGE_CMDQ_TX_HEAD_REG,
58 HCLGE_CMDQ_RX_ADDR_L_REG,
59 HCLGE_CMDQ_RX_ADDR_H_REG,
60 HCLGE_CMDQ_RX_DEPTH_REG,
61 HCLGE_CMDQ_RX_TAIL_REG,
62 HCLGE_CMDQ_RX_HEAD_REG,
63 HCLGE_VECTOR0_CMDQ_SRC_REG,
64 HCLGE_CMDQ_INTR_STS_REG,
65 HCLGE_CMDQ_INTR_EN_REG,
66 HCLGE_CMDQ_INTR_GEN_REG};
68 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
69 HCLGE_VECTOR0_OTER_EN_REG,
70 HCLGE_MISC_RESET_STS_REG,
71 HCLGE_MISC_VECTOR_INT_STS,
72 HCLGE_GLOBAL_RESET_REG,
76 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
77 HCLGE_RING_RX_ADDR_H_REG,
78 HCLGE_RING_RX_BD_NUM_REG,
79 HCLGE_RING_RX_BD_LENGTH_REG,
80 HCLGE_RING_RX_MERGE_EN_REG,
81 HCLGE_RING_RX_TAIL_REG,
82 HCLGE_RING_RX_HEAD_REG,
83 HCLGE_RING_RX_FBD_NUM_REG,
84 HCLGE_RING_RX_OFFSET_REG,
85 HCLGE_RING_RX_FBD_OFFSET_REG,
86 HCLGE_RING_RX_STASH_REG,
87 HCLGE_RING_RX_BD_ERR_REG,
88 HCLGE_RING_TX_ADDR_L_REG,
89 HCLGE_RING_TX_ADDR_H_REG,
90 HCLGE_RING_TX_BD_NUM_REG,
91 HCLGE_RING_TX_PRIORITY_REG,
93 HCLGE_RING_TX_MERGE_EN_REG,
94 HCLGE_RING_TX_TAIL_REG,
95 HCLGE_RING_TX_HEAD_REG,
96 HCLGE_RING_TX_FBD_NUM_REG,
97 HCLGE_RING_TX_OFFSET_REG,
98 HCLGE_RING_TX_EBD_NUM_REG,
99 HCLGE_RING_TX_EBD_OFFSET_REG,
100 HCLGE_RING_TX_BD_ERR_REG,
103 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
104 HCLGE_TQP_INTR_GL0_REG,
105 HCLGE_TQP_INTR_GL1_REG,
106 HCLGE_TQP_INTR_GL2_REG,
107 HCLGE_TQP_INTR_RL_REG};
109 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
111 "Serdes serial Loopback test",
112 "Serdes parallel Loopback test",
116 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
117 {"mac_tx_mac_pause_num",
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
119 {"mac_rx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
121 {"mac_tx_control_pkt_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
123 {"mac_rx_control_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
125 {"mac_tx_pfc_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
127 {"mac_tx_pfc_pri0_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
129 {"mac_tx_pfc_pri1_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
131 {"mac_tx_pfc_pri2_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
133 {"mac_tx_pfc_pri3_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
135 {"mac_tx_pfc_pri4_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
137 {"mac_tx_pfc_pri5_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
139 {"mac_tx_pfc_pri6_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
141 {"mac_tx_pfc_pri7_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
143 {"mac_rx_pfc_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
145 {"mac_rx_pfc_pri0_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
147 {"mac_rx_pfc_pri1_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
149 {"mac_rx_pfc_pri2_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
151 {"mac_rx_pfc_pri3_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
153 {"mac_rx_pfc_pri4_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
155 {"mac_rx_pfc_pri5_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
157 {"mac_rx_pfc_pri6_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
159 {"mac_rx_pfc_pri7_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
161 {"mac_tx_total_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
163 {"mac_tx_total_oct_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
165 {"mac_tx_good_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
167 {"mac_tx_bad_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
169 {"mac_tx_good_oct_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
171 {"mac_tx_bad_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
173 {"mac_tx_uni_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
175 {"mac_tx_multi_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
177 {"mac_tx_broad_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
179 {"mac_tx_undersize_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
181 {"mac_tx_oversize_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
183 {"mac_tx_64_oct_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
185 {"mac_tx_65_127_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
187 {"mac_tx_128_255_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
189 {"mac_tx_256_511_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
191 {"mac_tx_512_1023_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
193 {"mac_tx_1024_1518_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
195 {"mac_tx_1519_2047_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
197 {"mac_tx_2048_4095_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
199 {"mac_tx_4096_8191_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
201 {"mac_tx_8192_9216_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
203 {"mac_tx_9217_12287_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
205 {"mac_tx_12288_16383_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
207 {"mac_tx_1519_max_good_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
209 {"mac_tx_1519_max_bad_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
211 {"mac_rx_total_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
213 {"mac_rx_total_oct_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
215 {"mac_rx_good_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
217 {"mac_rx_bad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
219 {"mac_rx_good_oct_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
221 {"mac_rx_bad_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
223 {"mac_rx_uni_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
225 {"mac_rx_multi_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
227 {"mac_rx_broad_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
229 {"mac_rx_undersize_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
231 {"mac_rx_oversize_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
233 {"mac_rx_64_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
235 {"mac_rx_65_127_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
237 {"mac_rx_128_255_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
239 {"mac_rx_256_511_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
241 {"mac_rx_512_1023_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
243 {"mac_rx_1024_1518_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
245 {"mac_rx_1519_2047_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
247 {"mac_rx_2048_4095_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
249 {"mac_rx_4096_8191_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
251 {"mac_rx_8192_9216_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
253 {"mac_rx_9217_12287_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
255 {"mac_rx_12288_16383_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
257 {"mac_rx_1519_max_good_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
259 {"mac_rx_1519_max_bad_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
262 {"mac_tx_fragment_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
264 {"mac_tx_undermin_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
266 {"mac_tx_jabber_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
268 {"mac_tx_err_all_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
270 {"mac_tx_from_app_good_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
272 {"mac_tx_from_app_bad_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
274 {"mac_rx_fragment_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
276 {"mac_rx_undermin_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
278 {"mac_rx_jabber_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
280 {"mac_rx_fcs_err_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
282 {"mac_rx_send_app_good_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
284 {"mac_rx_send_app_bad_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
288 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
290 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
291 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
292 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
293 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
294 .i_port_bitmap = 0x1,
298 static const u8 hclge_hash_key[] = {
299 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
300 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
301 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
302 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
303 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
308 #define HCLGE_MAC_CMD_NUM 21
310 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
311 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
316 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
317 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
319 dev_err(&hdev->pdev->dev,
320 "Get MAC pkt stats fail, status = %d.\n", ret);
325 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
326 /* for special opcode 0032, only the first desc has the head */
327 if (unlikely(i == 0)) {
328 desc_data = (__le64 *)(&desc[i].data[0]);
329 n = HCLGE_RD_FIRST_STATS_NUM;
331 desc_data = (__le64 *)(&desc[i]);
332 n = HCLGE_RD_OTHER_STATS_NUM;
335 for (k = 0; k < n; k++) {
336 *data += le64_to_cpu(*desc_data);
345 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
347 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
348 struct hclge_desc *desc;
353 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
354 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
355 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
361 for (i = 0; i < desc_num; i++) {
362 /* for special opcode 0034, only the first desc has the head */
364 desc_data = (__le64 *)(&desc[i].data[0]);
365 n = HCLGE_RD_FIRST_STATS_NUM;
367 desc_data = (__le64 *)(&desc[i]);
368 n = HCLGE_RD_OTHER_STATS_NUM;
371 for (k = 0; k < n; k++) {
372 *data += le64_to_cpu(*desc_data);
383 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
385 struct hclge_desc desc;
390 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
391 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
395 desc_data = (__le32 *)(&desc.data[0]);
396 reg_num = le32_to_cpu(*desc_data);
398 *desc_num = 1 + ((reg_num - 3) >> 2) +
399 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404 static int hclge_mac_update_stats(struct hclge_dev *hdev)
409 ret = hclge_mac_query_reg_num(hdev, &desc_num);
411 /* The firmware supports the new statistics acquisition method */
413 ret = hclge_mac_update_stats_complete(hdev, desc_num);
414 else if (ret == -EOPNOTSUPP)
415 ret = hclge_mac_update_stats_defective(hdev);
417 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
424 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
425 struct hclge_vport *vport = hclge_get_vport(handle);
426 struct hclge_dev *hdev = vport->back;
427 struct hnae3_queue *queue;
428 struct hclge_desc desc[1];
429 struct hclge_tqp *tqp;
432 for (i = 0; i < kinfo->num_tqps; i++) {
433 queue = handle->kinfo.tqp[i];
434 tqp = container_of(queue, struct hclge_tqp, q);
435 /* command : HCLGE_OPC_QUERY_IGU_STAT */
436 hclge_cmd_setup_basic_desc(&desc[0],
437 HCLGE_OPC_QUERY_RX_STATUS,
440 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
441 ret = hclge_cmd_send(&hdev->hw, desc, 1);
443 dev_err(&hdev->pdev->dev,
444 "Query tqp stat fail, status = %d,queue = %d\n",
448 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
449 le32_to_cpu(desc[0].data[1]);
452 for (i = 0; i < kinfo->num_tqps; i++) {
453 queue = handle->kinfo.tqp[i];
454 tqp = container_of(queue, struct hclge_tqp, q);
455 /* command : HCLGE_OPC_QUERY_IGU_STAT */
456 hclge_cmd_setup_basic_desc(&desc[0],
457 HCLGE_OPC_QUERY_TX_STATUS,
460 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
461 ret = hclge_cmd_send(&hdev->hw, desc, 1);
463 dev_err(&hdev->pdev->dev,
464 "Query tqp stat fail, status = %d,queue = %d\n",
468 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
469 le32_to_cpu(desc[0].data[1]);
475 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
477 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
478 struct hclge_tqp *tqp;
482 for (i = 0; i < kinfo->num_tqps; i++) {
483 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
484 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
487 for (i = 0; i < kinfo->num_tqps; i++) {
488 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
489 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
495 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
497 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
499 return kinfo->num_tqps * (2);
502 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
504 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
508 for (i = 0; i < kinfo->num_tqps; i++) {
509 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
510 struct hclge_tqp, q);
511 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
513 buff = buff + ETH_GSTRING_LEN;
516 for (i = 0; i < kinfo->num_tqps; i++) {
517 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
518 struct hclge_tqp, q);
519 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
521 buff = buff + ETH_GSTRING_LEN;
527 static u64 *hclge_comm_get_stats(void *comm_stats,
528 const struct hclge_comm_stats_str strs[],
534 for (i = 0; i < size; i++)
535 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540 static u8 *hclge_comm_get_strings(u32 stringset,
541 const struct hclge_comm_stats_str strs[],
544 char *buff = (char *)data;
547 if (stringset != ETH_SS_STATS)
550 for (i = 0; i < size; i++) {
551 snprintf(buff, ETH_GSTRING_LEN,
553 buff = buff + ETH_GSTRING_LEN;
559 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
561 struct hnae3_handle *handle;
564 handle = &hdev->vport[0].nic;
565 if (handle->client) {
566 status = hclge_tqps_update_stats(handle);
568 dev_err(&hdev->pdev->dev,
569 "Update TQPS stats fail, status = %d.\n",
574 status = hclge_mac_update_stats(hdev);
576 dev_err(&hdev->pdev->dev,
577 "Update MAC stats fail, status = %d.\n", status);
580 static void hclge_update_stats(struct hnae3_handle *handle,
581 struct net_device_stats *net_stats)
583 struct hclge_vport *vport = hclge_get_vport(handle);
584 struct hclge_dev *hdev = vport->back;
587 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
590 status = hclge_mac_update_stats(hdev);
592 dev_err(&hdev->pdev->dev,
593 "Update MAC stats fail, status = %d.\n",
596 status = hclge_tqps_update_stats(handle);
598 dev_err(&hdev->pdev->dev,
599 "Update TQPS stats fail, status = %d.\n",
602 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
605 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
607 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
608 HNAE3_SUPPORT_PHY_LOOPBACK |\
609 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
610 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
612 struct hclge_vport *vport = hclge_get_vport(handle);
613 struct hclge_dev *hdev = vport->back;
616 /* Loopback test support rules:
617 * mac: only GE mode support
618 * serdes: all mac mode will support include GE/XGE/LGE/CGE
619 * phy: only support when phy device exist on board
621 if (stringset == ETH_SS_TEST) {
622 /* clear loopback bit flags at first */
623 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
624 if (hdev->pdev->revision >= 0x21 ||
625 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
626 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
627 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
629 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
633 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
634 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
635 } else if (stringset == ETH_SS_STATS) {
636 count = ARRAY_SIZE(g_mac_stats_string) +
637 hclge_tqps_get_sset_count(handle, stringset);
643 static void hclge_get_strings(struct hnae3_handle *handle,
647 u8 *p = (char *)data;
650 if (stringset == ETH_SS_STATS) {
651 size = ARRAY_SIZE(g_mac_stats_string);
652 p = hclge_comm_get_strings(stringset,
656 p = hclge_tqps_get_strings(handle, p);
657 } else if (stringset == ETH_SS_TEST) {
658 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
660 hns3_nic_test_strs[HNAE3_LOOP_APP],
662 p += ETH_GSTRING_LEN;
664 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
666 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
672 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
678 hns3_nic_test_strs[HNAE3_LOOP_PHY],
680 p += ETH_GSTRING_LEN;
685 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
687 struct hclge_vport *vport = hclge_get_vport(handle);
688 struct hclge_dev *hdev = vport->back;
691 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
693 ARRAY_SIZE(g_mac_stats_string),
695 p = hclge_tqps_get_stats(handle, p);
698 static int hclge_parse_func_status(struct hclge_dev *hdev,
699 struct hclge_func_status_cmd *status)
701 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
704 /* Set the pf to main pf */
705 if (status->pf_state & HCLGE_PF_STATE_MAIN)
706 hdev->flag |= HCLGE_FLAG_MAIN;
708 hdev->flag &= ~HCLGE_FLAG_MAIN;
713 static int hclge_query_function_status(struct hclge_dev *hdev)
715 struct hclge_func_status_cmd *req;
716 struct hclge_desc desc;
720 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
721 req = (struct hclge_func_status_cmd *)desc.data;
724 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
726 dev_err(&hdev->pdev->dev,
727 "query function status failed %d.\n",
733 /* Check pf reset is done */
736 usleep_range(1000, 2000);
737 } while (timeout++ < 5);
739 ret = hclge_parse_func_status(hdev, req);
744 static int hclge_query_pf_resource(struct hclge_dev *hdev)
746 struct hclge_pf_res_cmd *req;
747 struct hclge_desc desc;
750 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
751 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
753 dev_err(&hdev->pdev->dev,
754 "query pf resource failed %d.\n", ret);
758 req = (struct hclge_pf_res_cmd *)desc.data;
759 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
760 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
762 if (req->tx_buf_size)
764 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
766 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
768 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
770 if (req->dv_buf_size)
772 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
774 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
776 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
778 if (hnae3_dev_roce_supported(hdev)) {
779 hdev->roce_base_msix_offset =
780 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
781 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
783 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
784 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
786 /* PF should have NIC vectors and Roce vectors,
787 * NIC vectors are queued before Roce vectors.
789 hdev->num_msi = hdev->num_roce_msi +
790 hdev->roce_base_msix_offset;
793 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
794 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
800 static int hclge_parse_speed(int speed_cmd, int *speed)
804 *speed = HCLGE_MAC_SPEED_10M;
807 *speed = HCLGE_MAC_SPEED_100M;
810 *speed = HCLGE_MAC_SPEED_1G;
813 *speed = HCLGE_MAC_SPEED_10G;
816 *speed = HCLGE_MAC_SPEED_25G;
819 *speed = HCLGE_MAC_SPEED_40G;
822 *speed = HCLGE_MAC_SPEED_50G;
825 *speed = HCLGE_MAC_SPEED_100G;
834 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
837 unsigned long *supported = hdev->hw.mac.supported;
839 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
840 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
843 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
844 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
847 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
848 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
851 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
852 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
855 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
856 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
859 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
860 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
863 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
865 u8 media_type = hdev->hw.mac.media_type;
867 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
870 hclge_parse_fiber_link_mode(hdev, speed_ability);
873 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
875 struct hclge_cfg_param_cmd *req;
876 u64 mac_addr_tmp_high;
880 req = (struct hclge_cfg_param_cmd *)desc[0].data;
882 /* get the configuration */
883 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
886 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
887 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
888 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
889 HCLGE_CFG_TQP_DESC_N_M,
890 HCLGE_CFG_TQP_DESC_N_S);
892 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
893 HCLGE_CFG_PHY_ADDR_M,
894 HCLGE_CFG_PHY_ADDR_S);
895 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
896 HCLGE_CFG_MEDIA_TP_M,
897 HCLGE_CFG_MEDIA_TP_S);
898 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
899 HCLGE_CFG_RX_BUF_LEN_M,
900 HCLGE_CFG_RX_BUF_LEN_S);
901 /* get mac_address */
902 mac_addr_tmp = __le32_to_cpu(req->param[2]);
903 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
904 HCLGE_CFG_MAC_ADDR_H_M,
905 HCLGE_CFG_MAC_ADDR_H_S);
907 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
909 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
910 HCLGE_CFG_DEFAULT_SPEED_M,
911 HCLGE_CFG_DEFAULT_SPEED_S);
912 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
913 HCLGE_CFG_RSS_SIZE_M,
914 HCLGE_CFG_RSS_SIZE_S);
916 for (i = 0; i < ETH_ALEN; i++)
917 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
919 req = (struct hclge_cfg_param_cmd *)desc[1].data;
920 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
922 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
923 HCLGE_CFG_SPEED_ABILITY_M,
924 HCLGE_CFG_SPEED_ABILITY_S);
925 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
926 HCLGE_CFG_UMV_TBL_SPACE_M,
927 HCLGE_CFG_UMV_TBL_SPACE_S);
929 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
932 /* hclge_get_cfg: query the static parameter from flash
933 * @hdev: pointer to struct hclge_dev
934 * @hcfg: the config structure to be getted
936 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
938 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
939 struct hclge_cfg_param_cmd *req;
942 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
945 req = (struct hclge_cfg_param_cmd *)desc[i].data;
946 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
948 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
949 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
950 /* Len should be united by 4 bytes when send to hardware */
951 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
952 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
953 req->offset = cpu_to_le32(offset);
956 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
958 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
962 hclge_parse_cfg(hcfg, desc);
967 static int hclge_get_cap(struct hclge_dev *hdev)
971 ret = hclge_query_function_status(hdev);
973 dev_err(&hdev->pdev->dev,
974 "query function status error %d.\n", ret);
978 /* get pf resource */
979 ret = hclge_query_pf_resource(hdev);
981 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
986 static int hclge_configure(struct hclge_dev *hdev)
988 struct hclge_cfg cfg;
991 ret = hclge_get_cfg(hdev, &cfg);
993 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
997 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
998 hdev->base_tqp_pid = 0;
999 hdev->rss_size_max = cfg.rss_size_max;
1000 hdev->rx_buf_len = cfg.rx_buf_len;
1001 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1002 hdev->hw.mac.media_type = cfg.media_type;
1003 hdev->hw.mac.phy_addr = cfg.phy_addr;
1004 hdev->num_desc = cfg.tqp_desc_num;
1005 hdev->tm_info.num_pg = 1;
1006 hdev->tc_max = cfg.tc_num;
1007 hdev->tm_info.hw_pfc_map = 0;
1008 hdev->wanted_umv_size = cfg.umv_space;
1010 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1012 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1016 hclge_parse_link_mode(hdev, cfg.speed_ability);
1018 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1019 (hdev->tc_max < 1)) {
1020 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1025 /* Dev does not support DCB */
1026 if (!hnae3_dev_dcb_supported(hdev)) {
1030 hdev->pfc_max = hdev->tc_max;
1033 hdev->tm_info.num_tc = 1;
1035 /* Currently not support uncontiuous tc */
1036 for (i = 0; i < hdev->tm_info.num_tc; i++)
1037 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1039 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1044 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1047 struct hclge_cfg_tso_status_cmd *req;
1048 struct hclge_desc desc;
1051 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1053 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1056 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1057 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1058 req->tso_mss_min = cpu_to_le16(tso_mss);
1061 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1062 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1063 req->tso_mss_max = cpu_to_le16(tso_mss);
1065 return hclge_cmd_send(&hdev->hw, &desc, 1);
1068 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1070 struct hclge_cfg_gro_status_cmd *req;
1071 struct hclge_desc desc;
1074 if (!hnae3_dev_gro_supported(hdev))
1077 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1078 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1080 req->gro_en = cpu_to_le16(en ? 1 : 0);
1082 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1084 dev_err(&hdev->pdev->dev,
1085 "GRO hardware config cmd failed, ret = %d\n", ret);
1090 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1092 struct hclge_tqp *tqp;
1095 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1096 sizeof(struct hclge_tqp), GFP_KERNEL);
1102 for (i = 0; i < hdev->num_tqps; i++) {
1103 tqp->dev = &hdev->pdev->dev;
1106 tqp->q.ae_algo = &ae_algo;
1107 tqp->q.buf_size = hdev->rx_buf_len;
1108 tqp->q.desc_num = hdev->num_desc;
1109 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1110 i * HCLGE_TQP_REG_SIZE;
1118 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1119 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1121 struct hclge_tqp_map_cmd *req;
1122 struct hclge_desc desc;
1125 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1127 req = (struct hclge_tqp_map_cmd *)desc.data;
1128 req->tqp_id = cpu_to_le16(tqp_pid);
1129 req->tqp_vf = func_id;
1130 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1131 1 << HCLGE_TQP_MAP_EN_B;
1132 req->tqp_vid = cpu_to_le16(tqp_vid);
1134 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1136 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1141 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1143 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1144 struct hclge_dev *hdev = vport->back;
1147 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1148 alloced < num_tqps; i++) {
1149 if (!hdev->htqp[i].alloced) {
1150 hdev->htqp[i].q.handle = &vport->nic;
1151 hdev->htqp[i].q.tqp_index = alloced;
1152 hdev->htqp[i].q.desc_num = kinfo->num_desc;
1153 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1154 hdev->htqp[i].alloced = true;
1158 vport->alloc_tqps = alloced;
1159 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1160 vport->alloc_tqps / hdev->tm_info.num_tc);
1165 static int hclge_knic_setup(struct hclge_vport *vport,
1166 u16 num_tqps, u16 num_desc)
1168 struct hnae3_handle *nic = &vport->nic;
1169 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1170 struct hclge_dev *hdev = vport->back;
1173 kinfo->num_desc = num_desc;
1174 kinfo->rx_buf_len = hdev->rx_buf_len;
1176 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1177 sizeof(struct hnae3_queue *), GFP_KERNEL);
1181 ret = hclge_assign_tqp(vport, num_tqps);
1183 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1188 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1189 struct hclge_vport *vport)
1191 struct hnae3_handle *nic = &vport->nic;
1192 struct hnae3_knic_private_info *kinfo;
1195 kinfo = &nic->kinfo;
1196 for (i = 0; i < vport->alloc_tqps; i++) {
1197 struct hclge_tqp *q =
1198 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1202 is_pf = !(vport->vport_id);
1203 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1212 static int hclge_map_tqp(struct hclge_dev *hdev)
1214 struct hclge_vport *vport = hdev->vport;
1217 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1218 for (i = 0; i < num_vport; i++) {
1221 ret = hclge_map_tqp_to_vport(hdev, vport);
1231 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1233 /* this would be initialized later */
1236 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1238 struct hnae3_handle *nic = &vport->nic;
1239 struct hclge_dev *hdev = vport->back;
1242 nic->pdev = hdev->pdev;
1243 nic->ae_algo = &ae_algo;
1244 nic->numa_node_mask = hdev->numa_node_mask;
1246 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1247 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1249 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1254 hclge_unic_setup(vport, num_tqps);
1260 static int hclge_alloc_vport(struct hclge_dev *hdev)
1262 struct pci_dev *pdev = hdev->pdev;
1263 struct hclge_vport *vport;
1269 /* We need to alloc a vport for main NIC of PF */
1270 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1272 if (hdev->num_tqps < num_vport) {
1273 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1274 hdev->num_tqps, num_vport);
1278 /* Alloc the same number of TQPs for every vport */
1279 tqp_per_vport = hdev->num_tqps / num_vport;
1280 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1282 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1287 hdev->vport = vport;
1288 hdev->num_alloc_vport = num_vport;
1290 if (IS_ENABLED(CONFIG_PCI_IOV))
1291 hdev->num_alloc_vfs = hdev->num_req_vfs;
1293 for (i = 0; i < num_vport; i++) {
1295 vport->vport_id = i;
1296 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1299 ret = hclge_vport_setup(vport, tqp_main_vport);
1301 ret = hclge_vport_setup(vport, tqp_per_vport);
1304 "vport setup failed for vport %d, %d\n",
1315 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1316 struct hclge_pkt_buf_alloc *buf_alloc)
1318 /* TX buffer size is unit by 128 byte */
1319 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1320 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1321 struct hclge_tx_buff_alloc_cmd *req;
1322 struct hclge_desc desc;
1326 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1328 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1329 for (i = 0; i < HCLGE_TC_NUM; i++) {
1330 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1332 req->tx_pkt_buff[i] =
1333 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1334 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1337 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1339 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1345 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1346 struct hclge_pkt_buf_alloc *buf_alloc)
1348 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1351 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1356 static int hclge_get_tc_num(struct hclge_dev *hdev)
1360 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1361 if (hdev->hw_tc_map & BIT(i))
1366 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1370 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1371 if (hdev->hw_tc_map & BIT(i) &&
1372 hdev->tm_info.hw_pfc_map & BIT(i))
1377 /* Get the number of pfc enabled TCs, which have private buffer */
1378 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1379 struct hclge_pkt_buf_alloc *buf_alloc)
1381 struct hclge_priv_buf *priv;
1384 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1385 priv = &buf_alloc->priv_buf[i];
1386 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1394 /* Get the number of pfc disabled TCs, which have private buffer */
1395 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1396 struct hclge_pkt_buf_alloc *buf_alloc)
1398 struct hclge_priv_buf *priv;
1401 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1402 priv = &buf_alloc->priv_buf[i];
1403 if (hdev->hw_tc_map & BIT(i) &&
1404 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1412 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1414 struct hclge_priv_buf *priv;
1418 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1419 priv = &buf_alloc->priv_buf[i];
1421 rx_priv += priv->buf_size;
1426 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1428 u32 i, total_tx_size = 0;
1430 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1431 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1433 return total_tx_size;
1436 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1437 struct hclge_pkt_buf_alloc *buf_alloc,
1440 u32 shared_buf_min, shared_buf_tc, shared_std;
1441 int tc_num, pfc_enable_num;
1442 u32 shared_buf, aligned_mps;
1446 tc_num = hclge_get_tc_num(hdev);
1447 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1448 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1450 if (hnae3_dev_dcb_supported(hdev))
1451 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1453 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1454 + hdev->dv_buf_size;
1456 shared_buf_tc = pfc_enable_num * aligned_mps +
1457 (tc_num - pfc_enable_num) * aligned_mps / 2 +
1459 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1460 HCLGE_BUF_SIZE_UNIT);
1462 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1463 if (rx_all < rx_priv + shared_std)
1466 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1467 buf_alloc->s_buf.buf_size = shared_buf;
1468 if (hnae3_dev_dcb_supported(hdev)) {
1469 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1470 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1471 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1473 buf_alloc->s_buf.self.high = aligned_mps +
1474 HCLGE_NON_DCB_ADDITIONAL_BUF;
1475 buf_alloc->s_buf.self.low =
1476 roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1479 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1480 if ((hdev->hw_tc_map & BIT(i)) &&
1481 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1482 buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1483 buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1485 buf_alloc->s_buf.tc_thrd[i].low = 0;
1486 buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1493 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1494 struct hclge_pkt_buf_alloc *buf_alloc)
1498 total_size = hdev->pkt_buf_size;
1500 /* alloc tx buffer for all enabled tc */
1501 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1502 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1504 if (total_size < hdev->tx_buf_size)
1507 if (hdev->hw_tc_map & BIT(i))
1508 priv->tx_buf_size = hdev->tx_buf_size;
1510 priv->tx_buf_size = 0;
1512 total_size -= priv->tx_buf_size;
1518 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1519 * @hdev: pointer to struct hclge_dev
1520 * @buf_alloc: pointer to buffer calculation data
1521 * @return: 0: calculate sucessful, negative: fail
1523 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1524 struct hclge_pkt_buf_alloc *buf_alloc)
1526 u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1527 int no_pfc_priv_num, pfc_priv_num;
1528 struct hclge_priv_buf *priv;
1531 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1532 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1534 /* When DCB is not supported, rx private
1535 * buffer is not allocated.
1537 if (!hnae3_dev_dcb_supported(hdev)) {
1538 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1544 /* step 1, try to alloc private buffer for all enabled tc */
1545 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1546 priv = &buf_alloc->priv_buf[i];
1547 if (hdev->hw_tc_map & BIT(i)) {
1549 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1550 priv->wl.low = aligned_mps;
1552 roundup(priv->wl.low + aligned_mps,
1553 HCLGE_BUF_SIZE_UNIT);
1554 priv->buf_size = priv->wl.high +
1558 priv->wl.high = 2 * aligned_mps;
1559 priv->buf_size = priv->wl.high +
1570 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1573 /* step 2, try to decrease the buffer size of
1574 * no pfc TC's private buffer
1576 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1577 priv = &buf_alloc->priv_buf[i];
1584 if (!(hdev->hw_tc_map & BIT(i)))
1589 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1591 priv->wl.high = priv->wl.low + aligned_mps;
1592 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1595 priv->wl.high = aligned_mps;
1596 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1600 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1603 /* step 3, try to reduce the number of pfc disabled TCs,
1604 * which have private buffer
1606 /* get the total no pfc enable TC number, which have private buffer */
1607 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1609 /* let the last to be cleared first */
1610 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1611 priv = &buf_alloc->priv_buf[i];
1613 if (hdev->hw_tc_map & BIT(i) &&
1614 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1615 /* Clear the no pfc TC private buffer */
1623 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1624 no_pfc_priv_num == 0)
1628 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1631 /* step 4, try to reduce the number of pfc enabled TCs
1632 * which have private buffer.
1634 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1636 /* let the last to be cleared first */
1637 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1638 priv = &buf_alloc->priv_buf[i];
1640 if (hdev->hw_tc_map & BIT(i) &&
1641 hdev->tm_info.hw_pfc_map & BIT(i)) {
1642 /* Reduce the number of pfc TC with private buffer */
1650 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1654 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1660 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1661 struct hclge_pkt_buf_alloc *buf_alloc)
1663 struct hclge_rx_priv_buff_cmd *req;
1664 struct hclge_desc desc;
1668 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1669 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1671 /* Alloc private buffer TCs */
1672 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1673 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1676 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1678 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1682 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1683 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1685 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1687 dev_err(&hdev->pdev->dev,
1688 "rx private buffer alloc cmd failed %d\n", ret);
1693 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1694 struct hclge_pkt_buf_alloc *buf_alloc)
1696 struct hclge_rx_priv_wl_buf *req;
1697 struct hclge_priv_buf *priv;
1698 struct hclge_desc desc[2];
1702 for (i = 0; i < 2; i++) {
1703 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1705 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1707 /* The first descriptor set the NEXT bit to 1 */
1709 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1711 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1713 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1714 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1716 priv = &buf_alloc->priv_buf[idx];
1717 req->tc_wl[j].high =
1718 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1719 req->tc_wl[j].high |=
1720 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1722 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1723 req->tc_wl[j].low |=
1724 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1728 /* Send 2 descriptor at one time */
1729 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1731 dev_err(&hdev->pdev->dev,
1732 "rx private waterline config cmd failed %d\n",
1737 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1738 struct hclge_pkt_buf_alloc *buf_alloc)
1740 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1741 struct hclge_rx_com_thrd *req;
1742 struct hclge_desc desc[2];
1743 struct hclge_tc_thrd *tc;
1747 for (i = 0; i < 2; i++) {
1748 hclge_cmd_setup_basic_desc(&desc[i],
1749 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1750 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1752 /* The first descriptor set the NEXT bit to 1 */
1754 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1756 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1758 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1759 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1761 req->com_thrd[j].high =
1762 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1763 req->com_thrd[j].high |=
1764 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1765 req->com_thrd[j].low =
1766 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1767 req->com_thrd[j].low |=
1768 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1772 /* Send 2 descriptors at one time */
1773 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1775 dev_err(&hdev->pdev->dev,
1776 "common threshold config cmd failed %d\n", ret);
1780 static int hclge_common_wl_config(struct hclge_dev *hdev,
1781 struct hclge_pkt_buf_alloc *buf_alloc)
1783 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1784 struct hclge_rx_com_wl *req;
1785 struct hclge_desc desc;
1788 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1790 req = (struct hclge_rx_com_wl *)desc.data;
1791 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1792 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1794 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1795 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1797 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1799 dev_err(&hdev->pdev->dev,
1800 "common waterline config cmd failed %d\n", ret);
1805 int hclge_buffer_alloc(struct hclge_dev *hdev)
1807 struct hclge_pkt_buf_alloc *pkt_buf;
1810 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1814 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1816 dev_err(&hdev->pdev->dev,
1817 "could not calc tx buffer size for all TCs %d\n", ret);
1821 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1823 dev_err(&hdev->pdev->dev,
1824 "could not alloc tx buffers %d\n", ret);
1828 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1830 dev_err(&hdev->pdev->dev,
1831 "could not calc rx priv buffer size for all TCs %d\n",
1836 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1838 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1843 if (hnae3_dev_dcb_supported(hdev)) {
1844 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1846 dev_err(&hdev->pdev->dev,
1847 "could not configure rx private waterline %d\n",
1852 ret = hclge_common_thrd_config(hdev, pkt_buf);
1854 dev_err(&hdev->pdev->dev,
1855 "could not configure common threshold %d\n",
1861 ret = hclge_common_wl_config(hdev, pkt_buf);
1863 dev_err(&hdev->pdev->dev,
1864 "could not configure common waterline %d\n", ret);
1871 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1873 struct hnae3_handle *roce = &vport->roce;
1874 struct hnae3_handle *nic = &vport->nic;
1876 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1878 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1879 vport->back->num_msi_left == 0)
1882 roce->rinfo.base_vector = vport->back->roce_base_vector;
1884 roce->rinfo.netdev = nic->kinfo.netdev;
1885 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1887 roce->pdev = nic->pdev;
1888 roce->ae_algo = nic->ae_algo;
1889 roce->numa_node_mask = nic->numa_node_mask;
1894 static int hclge_init_msi(struct hclge_dev *hdev)
1896 struct pci_dev *pdev = hdev->pdev;
1900 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1901 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1904 "failed(%d) to allocate MSI/MSI-X vectors\n",
1908 if (vectors < hdev->num_msi)
1909 dev_warn(&hdev->pdev->dev,
1910 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1911 hdev->num_msi, vectors);
1913 hdev->num_msi = vectors;
1914 hdev->num_msi_left = vectors;
1915 hdev->base_msi_vector = pdev->irq;
1916 hdev->roce_base_vector = hdev->base_msi_vector +
1917 hdev->roce_base_msix_offset;
1919 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1920 sizeof(u16), GFP_KERNEL);
1921 if (!hdev->vector_status) {
1922 pci_free_irq_vectors(pdev);
1926 for (i = 0; i < hdev->num_msi; i++)
1927 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1929 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1930 sizeof(int), GFP_KERNEL);
1931 if (!hdev->vector_irq) {
1932 pci_free_irq_vectors(pdev);
1939 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1942 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1943 duplex = HCLGE_MAC_FULL;
1948 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1951 struct hclge_config_mac_speed_dup_cmd *req;
1952 struct hclge_desc desc;
1955 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1957 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1959 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1962 case HCLGE_MAC_SPEED_10M:
1963 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1964 HCLGE_CFG_SPEED_S, 6);
1966 case HCLGE_MAC_SPEED_100M:
1967 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1968 HCLGE_CFG_SPEED_S, 7);
1970 case HCLGE_MAC_SPEED_1G:
1971 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1972 HCLGE_CFG_SPEED_S, 0);
1974 case HCLGE_MAC_SPEED_10G:
1975 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1976 HCLGE_CFG_SPEED_S, 1);
1978 case HCLGE_MAC_SPEED_25G:
1979 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1980 HCLGE_CFG_SPEED_S, 2);
1982 case HCLGE_MAC_SPEED_40G:
1983 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1984 HCLGE_CFG_SPEED_S, 3);
1986 case HCLGE_MAC_SPEED_50G:
1987 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1988 HCLGE_CFG_SPEED_S, 4);
1990 case HCLGE_MAC_SPEED_100G:
1991 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1992 HCLGE_CFG_SPEED_S, 5);
1995 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1999 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2002 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2004 dev_err(&hdev->pdev->dev,
2005 "mac speed/duplex config cmd failed %d.\n", ret);
2012 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2016 duplex = hclge_check_speed_dup(duplex, speed);
2017 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2020 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2024 hdev->hw.mac.speed = speed;
2025 hdev->hw.mac.duplex = duplex;
2030 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2033 struct hclge_vport *vport = hclge_get_vport(handle);
2034 struct hclge_dev *hdev = vport->back;
2036 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2039 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2041 struct hclge_config_auto_neg_cmd *req;
2042 struct hclge_desc desc;
2046 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2048 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2049 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2050 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2052 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2054 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2060 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2062 struct hclge_vport *vport = hclge_get_vport(handle);
2063 struct hclge_dev *hdev = vport->back;
2065 return hclge_set_autoneg_en(hdev, enable);
2068 static int hclge_get_autoneg(struct hnae3_handle *handle)
2070 struct hclge_vport *vport = hclge_get_vport(handle);
2071 struct hclge_dev *hdev = vport->back;
2072 struct phy_device *phydev = hdev->hw.mac.phydev;
2075 return phydev->autoneg;
2077 return hdev->hw.mac.autoneg;
2080 static int hclge_mac_init(struct hclge_dev *hdev)
2082 struct hclge_mac *mac = &hdev->hw.mac;
2085 hdev->support_sfp_query = true;
2086 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2087 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2088 hdev->hw.mac.duplex);
2090 dev_err(&hdev->pdev->dev,
2091 "Config mac speed dup fail ret=%d\n", ret);
2097 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2099 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2103 ret = hclge_buffer_alloc(hdev);
2105 dev_err(&hdev->pdev->dev,
2106 "allocate buffer fail, ret=%d\n", ret);
2111 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2113 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2114 schedule_work(&hdev->mbx_service_task);
2117 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2119 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2120 schedule_work(&hdev->rst_service_task);
2123 static void hclge_task_schedule(struct hclge_dev *hdev)
2125 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2126 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2127 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2128 (void)schedule_work(&hdev->service_task);
2131 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2133 struct hclge_link_status_cmd *req;
2134 struct hclge_desc desc;
2138 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2139 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2141 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2146 req = (struct hclge_link_status_cmd *)desc.data;
2147 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2149 return !!link_status;
2152 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2157 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2160 mac_state = hclge_get_mac_link_status(hdev);
2162 if (hdev->hw.mac.phydev) {
2163 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2164 link_stat = mac_state &
2165 hdev->hw.mac.phydev->link;
2170 link_stat = mac_state;
2176 static void hclge_update_link_status(struct hclge_dev *hdev)
2178 struct hnae3_client *rclient = hdev->roce_client;
2179 struct hnae3_client *client = hdev->nic_client;
2180 struct hnae3_handle *rhandle;
2181 struct hnae3_handle *handle;
2187 state = hclge_get_mac_phy_link(hdev);
2188 if (state != hdev->hw.mac.link) {
2189 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2190 handle = &hdev->vport[i].nic;
2191 client->ops->link_status_change(handle, state);
2192 rhandle = &hdev->vport[i].roce;
2193 if (rclient && rclient->ops->link_status_change)
2194 rclient->ops->link_status_change(rhandle,
2197 hdev->hw.mac.link = state;
2201 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2203 struct hclge_sfp_speed_cmd *resp = NULL;
2204 struct hclge_desc desc;
2207 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2208 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2209 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2210 if (ret == -EOPNOTSUPP) {
2211 dev_warn(&hdev->pdev->dev,
2212 "IMP do not support get SFP speed %d\n", ret);
2215 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2219 *speed = resp->sfp_speed;
2224 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2226 struct hclge_mac mac = hdev->hw.mac;
2230 /* get the speed from SFP cmd when phy
2236 /* if IMP does not support get SFP/qSFP speed, return directly */
2237 if (!hdev->support_sfp_query)
2240 ret = hclge_get_sfp_speed(hdev, &speed);
2241 if (ret == -EOPNOTSUPP) {
2242 hdev->support_sfp_query = false;
2248 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2249 return 0; /* do nothing if no SFP */
2251 /* must config full duplex for SFP */
2252 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2255 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2257 struct hclge_vport *vport = hclge_get_vport(handle);
2258 struct hclge_dev *hdev = vport->back;
2260 return hclge_update_speed_duplex(hdev);
2263 static int hclge_get_status(struct hnae3_handle *handle)
2265 struct hclge_vport *vport = hclge_get_vport(handle);
2266 struct hclge_dev *hdev = vport->back;
2268 hclge_update_link_status(hdev);
2270 return hdev->hw.mac.link;
2273 static void hclge_service_timer(struct timer_list *t)
2275 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2277 mod_timer(&hdev->service_timer, jiffies + HZ);
2278 hdev->hw_stats.stats_timer++;
2279 hclge_task_schedule(hdev);
2282 static void hclge_service_complete(struct hclge_dev *hdev)
2284 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2286 /* Flush memory before next watchdog */
2287 smp_mb__before_atomic();
2288 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2291 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2293 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2295 /* fetch the events from their corresponding regs */
2296 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2297 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2298 msix_src_reg = hclge_read_dev(&hdev->hw,
2299 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2301 /* Assumption: If by any chance reset and mailbox events are reported
2302 * together then we will only process reset event in this go and will
2303 * defer the processing of the mailbox events. Since, we would have not
2304 * cleared RX CMDQ event this time we would receive again another
2305 * interrupt from H/W just for the mailbox.
2308 /* check for vector0 reset event sources */
2309 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2310 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2311 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2312 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2313 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2314 return HCLGE_VECTOR0_EVENT_RST;
2317 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2318 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2319 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2320 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2321 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2322 return HCLGE_VECTOR0_EVENT_RST;
2325 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2326 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2327 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2328 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2329 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2330 return HCLGE_VECTOR0_EVENT_RST;
2333 /* check for vector0 msix event source */
2334 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2335 return HCLGE_VECTOR0_EVENT_ERR;
2337 /* check for vector0 mailbox(=CMDQ RX) event source */
2338 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2339 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2340 *clearval = cmdq_src_reg;
2341 return HCLGE_VECTOR0_EVENT_MBX;
2344 return HCLGE_VECTOR0_EVENT_OTHER;
2347 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2350 switch (event_type) {
2351 case HCLGE_VECTOR0_EVENT_RST:
2352 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2354 case HCLGE_VECTOR0_EVENT_MBX:
2355 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2362 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2364 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2365 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2366 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2367 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2368 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2371 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2373 writel(enable ? 1 : 0, vector->addr);
2376 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2378 struct hclge_dev *hdev = data;
2382 hclge_enable_vector(&hdev->misc_vector, false);
2383 event_cause = hclge_check_event_cause(hdev, &clearval);
2385 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2386 switch (event_cause) {
2387 case HCLGE_VECTOR0_EVENT_ERR:
2388 /* we do not know what type of reset is required now. This could
2389 * only be decided after we fetch the type of errors which
2390 * caused this event. Therefore, we will do below for now:
2391 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2392 * have defered type of reset to be used.
2393 * 2. Schedule the reset serivce task.
2394 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2395 * will fetch the correct type of reset. This would be done
2396 * by first decoding the types of errors.
2398 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2400 case HCLGE_VECTOR0_EVENT_RST:
2401 hclge_reset_task_schedule(hdev);
2403 case HCLGE_VECTOR0_EVENT_MBX:
2404 /* If we are here then,
2405 * 1. Either we are not handling any mbx task and we are not
2408 * 2. We could be handling a mbx task but nothing more is
2410 * In both cases, we should schedule mbx task as there are more
2411 * mbx messages reported by this interrupt.
2413 hclge_mbx_task_schedule(hdev);
2416 dev_warn(&hdev->pdev->dev,
2417 "received unknown or unhandled event of vector0\n");
2421 /* clear the source of interrupt if it is not cause by reset */
2422 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2423 hclge_clear_event_cause(hdev, event_cause, clearval);
2424 hclge_enable_vector(&hdev->misc_vector, true);
2430 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2432 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2433 dev_warn(&hdev->pdev->dev,
2434 "vector(vector_id %d) has been freed.\n", vector_id);
2438 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2439 hdev->num_msi_left += 1;
2440 hdev->num_msi_used -= 1;
2443 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2445 struct hclge_misc_vector *vector = &hdev->misc_vector;
2447 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2449 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2450 hdev->vector_status[0] = 0;
2452 hdev->num_msi_left -= 1;
2453 hdev->num_msi_used += 1;
2456 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2460 hclge_get_misc_vector(hdev);
2462 /* this would be explicitly freed in the end */
2463 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2464 0, "hclge_misc", hdev);
2466 hclge_free_vector(hdev, 0);
2467 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2468 hdev->misc_vector.vector_irq);
2474 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2476 free_irq(hdev->misc_vector.vector_irq, hdev);
2477 hclge_free_vector(hdev, 0);
2480 int hclge_notify_client(struct hclge_dev *hdev,
2481 enum hnae3_reset_notify_type type)
2483 struct hnae3_client *client = hdev->nic_client;
2486 if (!client->ops->reset_notify)
2489 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2490 struct hnae3_handle *handle = &hdev->vport[i].nic;
2493 ret = client->ops->reset_notify(handle, type);
2495 dev_err(&hdev->pdev->dev,
2496 "notify nic client failed %d(%d)\n", type, ret);
2504 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2505 enum hnae3_reset_notify_type type)
2507 struct hnae3_client *client = hdev->roce_client;
2514 if (!client->ops->reset_notify)
2517 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2518 struct hnae3_handle *handle = &hdev->vport[i].roce;
2520 ret = client->ops->reset_notify(handle, type);
2522 dev_err(&hdev->pdev->dev,
2523 "notify roce client failed %d(%d)",
2532 static int hclge_reset_wait(struct hclge_dev *hdev)
2534 #define HCLGE_RESET_WATI_MS 100
2535 #define HCLGE_RESET_WAIT_CNT 200
2536 u32 val, reg, reg_bit;
2539 switch (hdev->reset_type) {
2540 case HNAE3_IMP_RESET:
2541 reg = HCLGE_GLOBAL_RESET_REG;
2542 reg_bit = HCLGE_IMP_RESET_BIT;
2544 case HNAE3_GLOBAL_RESET:
2545 reg = HCLGE_GLOBAL_RESET_REG;
2546 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2548 case HNAE3_CORE_RESET:
2549 reg = HCLGE_GLOBAL_RESET_REG;
2550 reg_bit = HCLGE_CORE_RESET_BIT;
2552 case HNAE3_FUNC_RESET:
2553 reg = HCLGE_FUN_RST_ING;
2554 reg_bit = HCLGE_FUN_RST_ING_B;
2556 case HNAE3_FLR_RESET:
2559 dev_err(&hdev->pdev->dev,
2560 "Wait for unsupported reset type: %d\n",
2565 if (hdev->reset_type == HNAE3_FLR_RESET) {
2566 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2567 cnt++ < HCLGE_RESET_WAIT_CNT)
2568 msleep(HCLGE_RESET_WATI_MS);
2570 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2571 dev_err(&hdev->pdev->dev,
2572 "flr wait timeout: %d\n", cnt);
2579 val = hclge_read_dev(&hdev->hw, reg);
2580 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2581 msleep(HCLGE_RESET_WATI_MS);
2582 val = hclge_read_dev(&hdev->hw, reg);
2586 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2587 dev_warn(&hdev->pdev->dev,
2588 "Wait for reset timeout: %d\n", hdev->reset_type);
2595 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2597 struct hclge_vf_rst_cmd *req;
2598 struct hclge_desc desc;
2600 req = (struct hclge_vf_rst_cmd *)desc.data;
2601 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2602 req->dest_vfid = func_id;
2607 return hclge_cmd_send(&hdev->hw, &desc, 1);
2610 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2614 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2615 struct hclge_vport *vport = &hdev->vport[i];
2618 /* Send cmd to set/clear VF's FUNC_RST_ING */
2619 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2621 dev_err(&hdev->pdev->dev,
2622 "set vf(%d) rst failed %d!\n",
2623 vport->vport_id, ret);
2630 /* Inform VF to process the reset.
2631 * hclge_inform_reset_assert_to_vf may fail if VF
2632 * driver is not loaded.
2634 ret = hclge_inform_reset_assert_to_vf(vport);
2636 dev_warn(&hdev->pdev->dev,
2637 "inform reset to vf(%d) failed %d!\n",
2638 vport->vport_id, ret);
2644 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2646 struct hclge_desc desc;
2647 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2651 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2652 req->fun_reset_vfid = func_id;
2654 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2656 dev_err(&hdev->pdev->dev,
2657 "send function reset cmd fail, status =%d\n", ret);
2662 static void hclge_do_reset(struct hclge_dev *hdev)
2664 struct pci_dev *pdev = hdev->pdev;
2667 switch (hdev->reset_type) {
2668 case HNAE3_GLOBAL_RESET:
2669 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2670 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2671 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2672 dev_info(&pdev->dev, "Global Reset requested\n");
2674 case HNAE3_CORE_RESET:
2675 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2676 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2677 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2678 dev_info(&pdev->dev, "Core Reset requested\n");
2680 case HNAE3_FUNC_RESET:
2681 dev_info(&pdev->dev, "PF Reset requested\n");
2682 /* schedule again to check later */
2683 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2684 hclge_reset_task_schedule(hdev);
2686 case HNAE3_FLR_RESET:
2687 dev_info(&pdev->dev, "FLR requested\n");
2688 /* schedule again to check later */
2689 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2690 hclge_reset_task_schedule(hdev);
2693 dev_warn(&pdev->dev,
2694 "Unsupported reset type: %d\n", hdev->reset_type);
2699 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2700 unsigned long *addr)
2702 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2704 /* first, resolve any unknown reset type to the known type(s) */
2705 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2706 /* we will intentionally ignore any errors from this function
2707 * as we will end up in *some* reset request in any case
2709 hclge_handle_hw_msix_error(hdev, addr);
2710 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2711 /* We defered the clearing of the error event which caused
2712 * interrupt since it was not posssible to do that in
2713 * interrupt context (and this is the reason we introduced
2714 * new UNKNOWN reset type). Now, the errors have been
2715 * handled and cleared in hardware we can safely enable
2716 * interrupts. This is an exception to the norm.
2718 hclge_enable_vector(&hdev->misc_vector, true);
2721 /* return the highest priority reset level amongst all */
2722 if (test_bit(HNAE3_IMP_RESET, addr)) {
2723 rst_level = HNAE3_IMP_RESET;
2724 clear_bit(HNAE3_IMP_RESET, addr);
2725 clear_bit(HNAE3_GLOBAL_RESET, addr);
2726 clear_bit(HNAE3_CORE_RESET, addr);
2727 clear_bit(HNAE3_FUNC_RESET, addr);
2728 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2729 rst_level = HNAE3_GLOBAL_RESET;
2730 clear_bit(HNAE3_GLOBAL_RESET, addr);
2731 clear_bit(HNAE3_CORE_RESET, addr);
2732 clear_bit(HNAE3_FUNC_RESET, addr);
2733 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2734 rst_level = HNAE3_CORE_RESET;
2735 clear_bit(HNAE3_CORE_RESET, addr);
2736 clear_bit(HNAE3_FUNC_RESET, addr);
2737 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2738 rst_level = HNAE3_FUNC_RESET;
2739 clear_bit(HNAE3_FUNC_RESET, addr);
2740 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2741 rst_level = HNAE3_FLR_RESET;
2742 clear_bit(HNAE3_FLR_RESET, addr);
2748 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2752 switch (hdev->reset_type) {
2753 case HNAE3_IMP_RESET:
2754 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2756 case HNAE3_GLOBAL_RESET:
2757 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2759 case HNAE3_CORE_RESET:
2760 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2769 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2770 hclge_enable_vector(&hdev->misc_vector, true);
2773 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2777 switch (hdev->reset_type) {
2778 case HNAE3_FUNC_RESET:
2780 case HNAE3_FLR_RESET:
2781 ret = hclge_set_all_vf_rst(hdev, true);
2790 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2795 switch (hdev->reset_type) {
2796 case HNAE3_FUNC_RESET:
2797 /* There is no mechanism for PF to know if VF has stopped IO
2798 * for now, just wait 100 ms for VF to stop IO
2801 ret = hclge_func_reset_cmd(hdev, 0);
2803 dev_err(&hdev->pdev->dev,
2804 "asserting function reset fail %d!\n", ret);
2808 /* After performaning pf reset, it is not necessary to do the
2809 * mailbox handling or send any command to firmware, because
2810 * any mailbox handling or command to firmware is only valid
2811 * after hclge_cmd_init is called.
2813 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2815 case HNAE3_FLR_RESET:
2816 /* There is no mechanism for PF to know if VF has stopped IO
2817 * for now, just wait 100 ms for VF to stop IO
2820 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2821 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2823 case HNAE3_IMP_RESET:
2824 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2825 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2826 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2832 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2837 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2839 #define MAX_RESET_FAIL_CNT 5
2840 #define RESET_UPGRADE_DELAY_SEC 10
2842 if (hdev->reset_pending) {
2843 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2844 hdev->reset_pending);
2846 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2847 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2848 BIT(HCLGE_IMP_RESET_BIT))) {
2849 dev_info(&hdev->pdev->dev,
2850 "reset failed because IMP Reset is pending\n");
2851 hclge_clear_reset_cause(hdev);
2853 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2854 hdev->reset_fail_cnt++;
2856 set_bit(hdev->reset_type, &hdev->reset_pending);
2857 dev_info(&hdev->pdev->dev,
2858 "re-schedule to wait for hw reset done\n");
2862 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2863 hclge_clear_reset_cause(hdev);
2864 mod_timer(&hdev->reset_timer,
2865 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2870 hclge_clear_reset_cause(hdev);
2871 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2875 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2879 switch (hdev->reset_type) {
2880 case HNAE3_FUNC_RESET:
2882 case HNAE3_FLR_RESET:
2883 ret = hclge_set_all_vf_rst(hdev, false);
2892 static void hclge_reset(struct hclge_dev *hdev)
2894 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2895 bool is_timeout = false;
2898 /* Initialize ae_dev reset status as well, in case enet layer wants to
2899 * know if device is undergoing reset
2901 ae_dev->reset_type = hdev->reset_type;
2902 hdev->reset_count++;
2903 /* perform reset of the stack & ae device for a client */
2904 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2908 ret = hclge_reset_prepare_down(hdev);
2913 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2915 goto err_reset_lock;
2919 ret = hclge_reset_prepare_wait(hdev);
2923 if (hclge_reset_wait(hdev)) {
2928 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2933 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2935 goto err_reset_lock;
2937 ret = hclge_reset_ae_dev(hdev->ae_dev);
2939 goto err_reset_lock;
2941 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2943 goto err_reset_lock;
2945 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
2947 goto err_reset_lock;
2949 hclge_clear_reset_cause(hdev);
2951 ret = hclge_reset_prepare_up(hdev);
2953 goto err_reset_lock;
2955 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2957 goto err_reset_lock;
2961 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2965 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2969 hdev->last_reset_time = jiffies;
2970 hdev->reset_fail_cnt = 0;
2971 ae_dev->reset_type = HNAE3_NONE_RESET;
2978 if (hclge_reset_err_handle(hdev, is_timeout))
2979 hclge_reset_task_schedule(hdev);
2982 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2984 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2985 struct hclge_dev *hdev = ae_dev->priv;
2987 /* We might end up getting called broadly because of 2 below cases:
2988 * 1. Recoverable error was conveyed through APEI and only way to bring
2989 * normalcy is to reset.
2990 * 2. A new reset request from the stack due to timeout
2992 * For the first case,error event might not have ae handle available.
2993 * check if this is a new reset request and we are not here just because
2994 * last reset attempt did not succeed and watchdog hit us again. We will
2995 * know this if last reset request did not occur very recently (watchdog
2996 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2997 * In case of new request we reset the "reset level" to PF reset.
2998 * And if it is a repeat reset request of the most recent one then we
2999 * want to make sure we throttle the reset request. Therefore, we will
3000 * not allow it again before 3*HZ times.
3003 handle = &hdev->vport[0].nic;
3005 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3007 else if (hdev->default_reset_request)
3009 hclge_get_reset_level(hdev,
3010 &hdev->default_reset_request);
3011 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3012 hdev->reset_level = HNAE3_FUNC_RESET;
3014 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3017 /* request reset & schedule reset task */
3018 set_bit(hdev->reset_level, &hdev->reset_request);
3019 hclge_reset_task_schedule(hdev);
3021 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3022 hdev->reset_level++;
3025 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3026 enum hnae3_reset_type rst_type)
3028 struct hclge_dev *hdev = ae_dev->priv;
3030 set_bit(rst_type, &hdev->default_reset_request);
3033 static void hclge_reset_timer(struct timer_list *t)
3035 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3037 dev_info(&hdev->pdev->dev,
3038 "triggering global reset in reset timer\n");
3039 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3040 hclge_reset_event(hdev->pdev, NULL);
3043 static void hclge_reset_subtask(struct hclge_dev *hdev)
3045 /* check if there is any ongoing reset in the hardware. This status can
3046 * be checked from reset_pending. If there is then, we need to wait for
3047 * hardware to complete reset.
3048 * a. If we are able to figure out in reasonable time that hardware
3049 * has fully resetted then, we can proceed with driver, client
3051 * b. else, we can come back later to check this status so re-sched
3054 hdev->last_reset_time = jiffies;
3055 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3056 if (hdev->reset_type != HNAE3_NONE_RESET)
3059 /* check if we got any *new* reset requests to be honored */
3060 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3061 if (hdev->reset_type != HNAE3_NONE_RESET)
3062 hclge_do_reset(hdev);
3064 hdev->reset_type = HNAE3_NONE_RESET;
3067 static void hclge_reset_service_task(struct work_struct *work)
3069 struct hclge_dev *hdev =
3070 container_of(work, struct hclge_dev, rst_service_task);
3072 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3075 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3077 hclge_reset_subtask(hdev);
3079 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3082 static void hclge_mailbox_service_task(struct work_struct *work)
3084 struct hclge_dev *hdev =
3085 container_of(work, struct hclge_dev, mbx_service_task);
3087 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3090 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3092 hclge_mbx_handler(hdev);
3094 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3097 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3101 /* start from vport 1 for PF is always alive */
3102 for (i = 1; i < hdev->num_alloc_vport; i++) {
3103 struct hclge_vport *vport = &hdev->vport[i];
3105 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3106 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3108 /* If vf is not alive, set to default value */
3109 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3110 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3114 static void hclge_service_task(struct work_struct *work)
3116 struct hclge_dev *hdev =
3117 container_of(work, struct hclge_dev, service_task);
3119 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3120 hclge_update_stats_for_all(hdev);
3121 hdev->hw_stats.stats_timer = 0;
3124 hclge_update_speed_duplex(hdev);
3125 hclge_update_link_status(hdev);
3126 hclge_update_vport_alive(hdev);
3127 hclge_service_complete(hdev);
3130 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3132 /* VF handle has no client */
3133 if (!handle->client)
3134 return container_of(handle, struct hclge_vport, nic);
3135 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3136 return container_of(handle, struct hclge_vport, roce);
3138 return container_of(handle, struct hclge_vport, nic);
3141 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3142 struct hnae3_vector_info *vector_info)
3144 struct hclge_vport *vport = hclge_get_vport(handle);
3145 struct hnae3_vector_info *vector = vector_info;
3146 struct hclge_dev *hdev = vport->back;
3150 vector_num = min(hdev->num_msi_left, vector_num);
3152 for (j = 0; j < vector_num; j++) {
3153 for (i = 1; i < hdev->num_msi; i++) {
3154 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3155 vector->vector = pci_irq_vector(hdev->pdev, i);
3156 vector->io_addr = hdev->hw.io_base +
3157 HCLGE_VECTOR_REG_BASE +
3158 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3160 HCLGE_VECTOR_VF_OFFSET;
3161 hdev->vector_status[i] = vport->vport_id;
3162 hdev->vector_irq[i] = vector->vector;
3171 hdev->num_msi_left -= alloc;
3172 hdev->num_msi_used += alloc;
3177 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3181 for (i = 0; i < hdev->num_msi; i++)
3182 if (vector == hdev->vector_irq[i])
3188 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3190 struct hclge_vport *vport = hclge_get_vport(handle);
3191 struct hclge_dev *hdev = vport->back;
3194 vector_id = hclge_get_vector_index(hdev, vector);
3195 if (vector_id < 0) {
3196 dev_err(&hdev->pdev->dev,
3197 "Get vector index fail. vector_id =%d\n", vector_id);
3201 hclge_free_vector(hdev, vector_id);
3206 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3208 return HCLGE_RSS_KEY_SIZE;
3211 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3213 return HCLGE_RSS_IND_TBL_SIZE;
3216 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3217 const u8 hfunc, const u8 *key)
3219 struct hclge_rss_config_cmd *req;
3220 struct hclge_desc desc;
3225 req = (struct hclge_rss_config_cmd *)desc.data;
3227 for (key_offset = 0; key_offset < 3; key_offset++) {
3228 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3231 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3232 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3234 if (key_offset == 2)
3236 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3238 key_size = HCLGE_RSS_HASH_KEY_NUM;
3240 memcpy(req->hash_key,
3241 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3243 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3245 dev_err(&hdev->pdev->dev,
3246 "Configure RSS config fail, status = %d\n",
3254 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3256 struct hclge_rss_indirection_table_cmd *req;
3257 struct hclge_desc desc;
3261 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3263 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3264 hclge_cmd_setup_basic_desc
3265 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3267 req->start_table_index =
3268 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3269 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3271 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3272 req->rss_result[j] =
3273 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3275 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3277 dev_err(&hdev->pdev->dev,
3278 "Configure rss indir table fail,status = %d\n",
3286 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3287 u16 *tc_size, u16 *tc_offset)
3289 struct hclge_rss_tc_mode_cmd *req;
3290 struct hclge_desc desc;
3294 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3295 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3297 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3300 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3301 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3302 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3303 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3304 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3306 req->rss_tc_mode[i] = cpu_to_le16(mode);
3309 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3311 dev_err(&hdev->pdev->dev,
3312 "Configure rss tc mode fail, status = %d\n", ret);
3317 static void hclge_get_rss_type(struct hclge_vport *vport)
3319 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3320 vport->rss_tuple_sets.ipv4_udp_en ||
3321 vport->rss_tuple_sets.ipv4_sctp_en ||
3322 vport->rss_tuple_sets.ipv6_tcp_en ||
3323 vport->rss_tuple_sets.ipv6_udp_en ||
3324 vport->rss_tuple_sets.ipv6_sctp_en)
3325 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3326 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3327 vport->rss_tuple_sets.ipv6_fragment_en)
3328 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3330 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3333 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3335 struct hclge_rss_input_tuple_cmd *req;
3336 struct hclge_desc desc;
3339 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3341 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3343 /* Get the tuple cfg from pf */
3344 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3345 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3346 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3347 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3348 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3349 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3350 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3351 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3352 hclge_get_rss_type(&hdev->vport[0]);
3353 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3355 dev_err(&hdev->pdev->dev,
3356 "Configure rss input fail, status = %d\n", ret);
3360 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3363 struct hclge_vport *vport = hclge_get_vport(handle);
3366 /* Get hash algorithm */
3368 switch (vport->rss_algo) {
3369 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3370 *hfunc = ETH_RSS_HASH_TOP;
3372 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3373 *hfunc = ETH_RSS_HASH_XOR;
3376 *hfunc = ETH_RSS_HASH_UNKNOWN;
3381 /* Get the RSS Key required by the user */
3383 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3385 /* Get indirect table */
3387 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3388 indir[i] = vport->rss_indirection_tbl[i];
3393 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3394 const u8 *key, const u8 hfunc)
3396 struct hclge_vport *vport = hclge_get_vport(handle);
3397 struct hclge_dev *hdev = vport->back;
3401 /* Set the RSS Hash Key if specififed by the user */
3404 case ETH_RSS_HASH_TOP:
3405 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3407 case ETH_RSS_HASH_XOR:
3408 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3410 case ETH_RSS_HASH_NO_CHANGE:
3411 hash_algo = vport->rss_algo;
3417 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3421 /* Update the shadow RSS key with user specified qids */
3422 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3423 vport->rss_algo = hash_algo;
3426 /* Update the shadow RSS table with user specified qids */
3427 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3428 vport->rss_indirection_tbl[i] = indir[i];
3430 /* Update the hardware */
3431 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3434 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3436 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3438 if (nfc->data & RXH_L4_B_2_3)
3439 hash_sets |= HCLGE_D_PORT_BIT;
3441 hash_sets &= ~HCLGE_D_PORT_BIT;
3443 if (nfc->data & RXH_IP_SRC)
3444 hash_sets |= HCLGE_S_IP_BIT;
3446 hash_sets &= ~HCLGE_S_IP_BIT;
3448 if (nfc->data & RXH_IP_DST)
3449 hash_sets |= HCLGE_D_IP_BIT;
3451 hash_sets &= ~HCLGE_D_IP_BIT;
3453 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3454 hash_sets |= HCLGE_V_TAG_BIT;
3459 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3460 struct ethtool_rxnfc *nfc)
3462 struct hclge_vport *vport = hclge_get_vport(handle);
3463 struct hclge_dev *hdev = vport->back;
3464 struct hclge_rss_input_tuple_cmd *req;
3465 struct hclge_desc desc;
3469 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3470 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3473 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3474 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3476 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3477 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3478 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3479 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3480 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3481 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3482 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3483 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3485 tuple_sets = hclge_get_rss_hash_bits(nfc);
3486 switch (nfc->flow_type) {
3488 req->ipv4_tcp_en = tuple_sets;
3491 req->ipv6_tcp_en = tuple_sets;
3494 req->ipv4_udp_en = tuple_sets;
3497 req->ipv6_udp_en = tuple_sets;
3500 req->ipv4_sctp_en = tuple_sets;
3503 if ((nfc->data & RXH_L4_B_0_1) ||
3504 (nfc->data & RXH_L4_B_2_3))
3507 req->ipv6_sctp_en = tuple_sets;
3510 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3513 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3519 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3521 dev_err(&hdev->pdev->dev,
3522 "Set rss tuple fail, status = %d\n", ret);
3526 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3527 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3528 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3529 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3530 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3531 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3532 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3533 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3534 hclge_get_rss_type(vport);
3538 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3539 struct ethtool_rxnfc *nfc)
3541 struct hclge_vport *vport = hclge_get_vport(handle);
3546 switch (nfc->flow_type) {
3548 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3551 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3554 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3557 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3560 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3563 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3567 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3576 if (tuple_sets & HCLGE_D_PORT_BIT)
3577 nfc->data |= RXH_L4_B_2_3;
3578 if (tuple_sets & HCLGE_S_PORT_BIT)
3579 nfc->data |= RXH_L4_B_0_1;
3580 if (tuple_sets & HCLGE_D_IP_BIT)
3581 nfc->data |= RXH_IP_DST;
3582 if (tuple_sets & HCLGE_S_IP_BIT)
3583 nfc->data |= RXH_IP_SRC;
3588 static int hclge_get_tc_size(struct hnae3_handle *handle)
3590 struct hclge_vport *vport = hclge_get_vport(handle);
3591 struct hclge_dev *hdev = vport->back;
3593 return hdev->rss_size_max;
3596 int hclge_rss_init_hw(struct hclge_dev *hdev)
3598 struct hclge_vport *vport = hdev->vport;
3599 u8 *rss_indir = vport[0].rss_indirection_tbl;
3600 u16 rss_size = vport[0].alloc_rss_size;
3601 u8 *key = vport[0].rss_hash_key;
3602 u8 hfunc = vport[0].rss_algo;
3603 u16 tc_offset[HCLGE_MAX_TC_NUM];
3604 u16 tc_valid[HCLGE_MAX_TC_NUM];
3605 u16 tc_size[HCLGE_MAX_TC_NUM];
3609 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3613 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3617 ret = hclge_set_rss_input_tuple(hdev);
3621 /* Each TC have the same queue size, and tc_size set to hardware is
3622 * the log2 of roundup power of two of rss_size, the acutal queue
3623 * size is limited by indirection table.
3625 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3626 dev_err(&hdev->pdev->dev,
3627 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3632 roundup_size = roundup_pow_of_two(rss_size);
3633 roundup_size = ilog2(roundup_size);
3635 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3638 if (!(hdev->hw_tc_map & BIT(i)))
3642 tc_size[i] = roundup_size;
3643 tc_offset[i] = rss_size * i;
3646 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3649 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3651 struct hclge_vport *vport = hdev->vport;
3654 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3655 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3656 vport[j].rss_indirection_tbl[i] =
3657 i % vport[j].alloc_rss_size;
3661 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3663 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3664 struct hclge_vport *vport = hdev->vport;
3666 if (hdev->pdev->revision >= 0x21)
3667 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3669 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3670 vport[i].rss_tuple_sets.ipv4_tcp_en =
3671 HCLGE_RSS_INPUT_TUPLE_OTHER;
3672 vport[i].rss_tuple_sets.ipv4_udp_en =
3673 HCLGE_RSS_INPUT_TUPLE_OTHER;
3674 vport[i].rss_tuple_sets.ipv4_sctp_en =
3675 HCLGE_RSS_INPUT_TUPLE_SCTP;
3676 vport[i].rss_tuple_sets.ipv4_fragment_en =
3677 HCLGE_RSS_INPUT_TUPLE_OTHER;
3678 vport[i].rss_tuple_sets.ipv6_tcp_en =
3679 HCLGE_RSS_INPUT_TUPLE_OTHER;
3680 vport[i].rss_tuple_sets.ipv6_udp_en =
3681 HCLGE_RSS_INPUT_TUPLE_OTHER;
3682 vport[i].rss_tuple_sets.ipv6_sctp_en =
3683 HCLGE_RSS_INPUT_TUPLE_SCTP;
3684 vport[i].rss_tuple_sets.ipv6_fragment_en =
3685 HCLGE_RSS_INPUT_TUPLE_OTHER;
3687 vport[i].rss_algo = rss_algo;
3689 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3690 HCLGE_RSS_KEY_SIZE);
3693 hclge_rss_indir_init_cfg(hdev);
3696 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3697 int vector_id, bool en,
3698 struct hnae3_ring_chain_node *ring_chain)
3700 struct hclge_dev *hdev = vport->back;
3701 struct hnae3_ring_chain_node *node;
3702 struct hclge_desc desc;
3703 struct hclge_ctrl_vector_chain_cmd *req
3704 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3705 enum hclge_cmd_status status;
3706 enum hclge_opcode_type op;
3707 u16 tqp_type_and_id;
3710 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3711 hclge_cmd_setup_basic_desc(&desc, op, false);
3712 req->int_vector_id = vector_id;
3715 for (node = ring_chain; node; node = node->next) {
3716 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3717 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3719 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3720 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3721 HCLGE_TQP_ID_S, node->tqp_index);
3722 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3724 hnae3_get_field(node->int_gl_idx,
3725 HNAE3_RING_GL_IDX_M,
3726 HNAE3_RING_GL_IDX_S));
3727 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3728 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3729 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3730 req->vfid = vport->vport_id;
3732 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3734 dev_err(&hdev->pdev->dev,
3735 "Map TQP fail, status is %d.\n",
3741 hclge_cmd_setup_basic_desc(&desc,
3744 req->int_vector_id = vector_id;
3749 req->int_cause_num = i;
3750 req->vfid = vport->vport_id;
3751 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3753 dev_err(&hdev->pdev->dev,
3754 "Map TQP fail, status is %d.\n", status);
3762 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3764 struct hnae3_ring_chain_node *ring_chain)
3766 struct hclge_vport *vport = hclge_get_vport(handle);
3767 struct hclge_dev *hdev = vport->back;
3770 vector_id = hclge_get_vector_index(hdev, vector);
3771 if (vector_id < 0) {
3772 dev_err(&hdev->pdev->dev,
3773 "Get vector index fail. vector_id =%d\n", vector_id);
3777 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3780 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3782 struct hnae3_ring_chain_node *ring_chain)
3784 struct hclge_vport *vport = hclge_get_vport(handle);
3785 struct hclge_dev *hdev = vport->back;
3788 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3791 vector_id = hclge_get_vector_index(hdev, vector);
3792 if (vector_id < 0) {
3793 dev_err(&handle->pdev->dev,
3794 "Get vector index fail. ret =%d\n", vector_id);
3798 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3800 dev_err(&handle->pdev->dev,
3801 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3808 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3809 struct hclge_promisc_param *param)
3811 struct hclge_promisc_cfg_cmd *req;
3812 struct hclge_desc desc;
3815 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3817 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3818 req->vf_id = param->vf_id;
3820 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3821 * pdev revision(0x20), new revision support them. The
3822 * value of this two fields will not return error when driver
3823 * send command to fireware in revision(0x20).
3825 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3826 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3828 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3830 dev_err(&hdev->pdev->dev,
3831 "Set promisc mode fail, status is %d.\n", ret);
3836 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3837 bool en_mc, bool en_bc, int vport_id)
3842 memset(param, 0, sizeof(struct hclge_promisc_param));
3844 param->enable = HCLGE_PROMISC_EN_UC;
3846 param->enable |= HCLGE_PROMISC_EN_MC;
3848 param->enable |= HCLGE_PROMISC_EN_BC;
3849 param->vf_id = vport_id;
3852 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3855 struct hclge_vport *vport = hclge_get_vport(handle);
3856 struct hclge_dev *hdev = vport->back;
3857 struct hclge_promisc_param param;
3858 bool en_bc_pmc = true;
3860 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
3861 * always bypassed. So broadcast promisc should be disabled until
3862 * user enable promisc mode
3864 if (handle->pdev->revision == 0x20)
3865 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3867 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3869 return hclge_cmd_set_promisc_mode(hdev, ¶m);
3872 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3874 struct hclge_get_fd_mode_cmd *req;
3875 struct hclge_desc desc;
3878 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3880 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3882 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3884 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3888 *fd_mode = req->mode;
3893 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3894 u32 *stage1_entry_num,
3895 u32 *stage2_entry_num,
3896 u16 *stage1_counter_num,
3897 u16 *stage2_counter_num)
3899 struct hclge_get_fd_allocation_cmd *req;
3900 struct hclge_desc desc;
3903 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3905 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3909 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3914 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3915 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3916 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3917 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3922 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3924 struct hclge_set_fd_key_config_cmd *req;
3925 struct hclge_fd_key_cfg *stage;
3926 struct hclge_desc desc;
3929 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3931 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3932 stage = &hdev->fd_cfg.key_cfg[stage_num];
3933 req->stage = stage_num;
3934 req->key_select = stage->key_sel;
3935 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3936 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3937 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3938 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3939 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3940 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3942 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3944 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3949 static int hclge_init_fd_config(struct hclge_dev *hdev)
3951 #define LOW_2_WORDS 0x03
3952 struct hclge_fd_key_cfg *key_cfg;
3955 if (!hnae3_dev_fd_supported(hdev))
3958 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3962 switch (hdev->fd_cfg.fd_mode) {
3963 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3964 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3966 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3967 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3970 dev_err(&hdev->pdev->dev,
3971 "Unsupported flow director mode %d\n",
3972 hdev->fd_cfg.fd_mode);
3976 hdev->fd_cfg.fd_en = true;
3977 hdev->fd_cfg.proto_support =
3978 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3979 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3980 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3981 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3982 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3983 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3984 key_cfg->outer_sipv6_word_en = 0;
3985 key_cfg->outer_dipv6_word_en = 0;
3987 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3988 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3989 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3990 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3992 /* If use max 400bit key, we can support tuples for ether type */
3993 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3994 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3995 key_cfg->tuple_active |=
3996 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3999 /* roce_type is used to filter roce frames
4000 * dst_vport is used to specify the rule
4002 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4004 ret = hclge_get_fd_allocation(hdev,
4005 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4006 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4007 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4008 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4012 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4015 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4016 int loc, u8 *key, bool is_add)
4018 struct hclge_fd_tcam_config_1_cmd *req1;
4019 struct hclge_fd_tcam_config_2_cmd *req2;
4020 struct hclge_fd_tcam_config_3_cmd *req3;
4021 struct hclge_desc desc[3];
4024 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4025 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4026 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4027 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4028 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4030 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4031 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4032 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4034 req1->stage = stage;
4035 req1->xy_sel = sel_x ? 1 : 0;
4036 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4037 req1->index = cpu_to_le32(loc);
4038 req1->entry_vld = sel_x ? is_add : 0;
4041 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4042 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4043 sizeof(req2->tcam_data));
4044 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4045 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4048 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4050 dev_err(&hdev->pdev->dev,
4051 "config tcam key fail, ret=%d\n",
4057 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4058 struct hclge_fd_ad_data *action)
4060 struct hclge_fd_ad_config_cmd *req;
4061 struct hclge_desc desc;
4065 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4067 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4068 req->index = cpu_to_le32(loc);
4071 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4072 action->write_rule_id_to_bd);
4073 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4076 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4077 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4078 action->forward_to_direct_queue);
4079 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4081 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4082 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4083 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4084 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4085 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4086 action->counter_id);
4088 req->ad_data = cpu_to_le64(ad_data);
4089 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4091 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4096 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4097 struct hclge_fd_rule *rule)
4099 u16 tmp_x_s, tmp_y_s;
4100 u32 tmp_x_l, tmp_y_l;
4103 if (rule->unused_tuple & tuple_bit)
4106 switch (tuple_bit) {
4109 case BIT(INNER_DST_MAC):
4110 for (i = 0; i < 6; i++) {
4111 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4112 rule->tuples_mask.dst_mac[i]);
4113 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4114 rule->tuples_mask.dst_mac[i]);
4118 case BIT(INNER_SRC_MAC):
4119 for (i = 0; i < 6; i++) {
4120 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4121 rule->tuples.src_mac[i]);
4122 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4123 rule->tuples.src_mac[i]);
4127 case BIT(INNER_VLAN_TAG_FST):
4128 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4129 rule->tuples_mask.vlan_tag1);
4130 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4131 rule->tuples_mask.vlan_tag1);
4132 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4133 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4136 case BIT(INNER_ETH_TYPE):
4137 calc_x(tmp_x_s, rule->tuples.ether_proto,
4138 rule->tuples_mask.ether_proto);
4139 calc_y(tmp_y_s, rule->tuples.ether_proto,
4140 rule->tuples_mask.ether_proto);
4141 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4142 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4145 case BIT(INNER_IP_TOS):
4146 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4147 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4150 case BIT(INNER_IP_PROTO):
4151 calc_x(*key_x, rule->tuples.ip_proto,
4152 rule->tuples_mask.ip_proto);
4153 calc_y(*key_y, rule->tuples.ip_proto,
4154 rule->tuples_mask.ip_proto);
4157 case BIT(INNER_SRC_IP):
4158 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4159 rule->tuples_mask.src_ip[3]);
4160 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4161 rule->tuples_mask.src_ip[3]);
4162 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4163 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4166 case BIT(INNER_DST_IP):
4167 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4168 rule->tuples_mask.dst_ip[3]);
4169 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4170 rule->tuples_mask.dst_ip[3]);
4171 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4172 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4175 case BIT(INNER_SRC_PORT):
4176 calc_x(tmp_x_s, rule->tuples.src_port,
4177 rule->tuples_mask.src_port);
4178 calc_y(tmp_y_s, rule->tuples.src_port,
4179 rule->tuples_mask.src_port);
4180 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4181 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4184 case BIT(INNER_DST_PORT):
4185 calc_x(tmp_x_s, rule->tuples.dst_port,
4186 rule->tuples_mask.dst_port);
4187 calc_y(tmp_y_s, rule->tuples.dst_port,
4188 rule->tuples_mask.dst_port);
4189 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4190 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4198 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4199 u8 vf_id, u8 network_port_id)
4201 u32 port_number = 0;
4203 if (port_type == HOST_PORT) {
4204 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4206 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4208 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4210 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4211 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4212 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4218 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4219 __le32 *key_x, __le32 *key_y,
4220 struct hclge_fd_rule *rule)
4222 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4223 u8 cur_pos = 0, tuple_size, shift_bits;
4226 for (i = 0; i < MAX_META_DATA; i++) {
4227 tuple_size = meta_data_key_info[i].key_length;
4228 tuple_bit = key_cfg->meta_data_active & BIT(i);
4230 switch (tuple_bit) {
4231 case BIT(ROCE_TYPE):
4232 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4233 cur_pos += tuple_size;
4235 case BIT(DST_VPORT):
4236 port_number = hclge_get_port_number(HOST_PORT, 0,
4238 hnae3_set_field(meta_data,
4239 GENMASK(cur_pos + tuple_size, cur_pos),
4240 cur_pos, port_number);
4241 cur_pos += tuple_size;
4248 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4249 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4250 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4252 *key_x = cpu_to_le32(tmp_x << shift_bits);
4253 *key_y = cpu_to_le32(tmp_y << shift_bits);
4256 /* A complete key is combined with meta data key and tuple key.
4257 * Meta data key is stored at the MSB region, and tuple key is stored at
4258 * the LSB region, unused bits will be filled 0.
4260 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4261 struct hclge_fd_rule *rule)
4263 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4264 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4265 u8 *cur_key_x, *cur_key_y;
4266 int i, ret, tuple_size;
4267 u8 meta_data_region;
4269 memset(key_x, 0, sizeof(key_x));
4270 memset(key_y, 0, sizeof(key_y));
4274 for (i = 0 ; i < MAX_TUPLE; i++) {
4278 tuple_size = tuple_key_info[i].key_length / 8;
4279 check_tuple = key_cfg->tuple_active & BIT(i);
4281 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4284 cur_key_x += tuple_size;
4285 cur_key_y += tuple_size;
4289 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4290 MAX_META_DATA_LENGTH / 8;
4292 hclge_fd_convert_meta_data(key_cfg,
4293 (__le32 *)(key_x + meta_data_region),
4294 (__le32 *)(key_y + meta_data_region),
4297 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4300 dev_err(&hdev->pdev->dev,
4301 "fd key_y config fail, loc=%d, ret=%d\n",
4302 rule->queue_id, ret);
4306 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4309 dev_err(&hdev->pdev->dev,
4310 "fd key_x config fail, loc=%d, ret=%d\n",
4311 rule->queue_id, ret);
4315 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4316 struct hclge_fd_rule *rule)
4318 struct hclge_fd_ad_data ad_data;
4320 ad_data.ad_id = rule->location;
4322 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4323 ad_data.drop_packet = true;
4324 ad_data.forward_to_direct_queue = false;
4325 ad_data.queue_id = 0;
4327 ad_data.drop_packet = false;
4328 ad_data.forward_to_direct_queue = true;
4329 ad_data.queue_id = rule->queue_id;
4332 ad_data.use_counter = false;
4333 ad_data.counter_id = 0;
4335 ad_data.use_next_stage = false;
4336 ad_data.next_input_key = 0;
4338 ad_data.write_rule_id_to_bd = true;
4339 ad_data.rule_id = rule->location;
4341 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4344 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4345 struct ethtool_rx_flow_spec *fs, u32 *unused)
4347 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4348 struct ethtool_usrip4_spec *usr_ip4_spec;
4349 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4350 struct ethtool_usrip6_spec *usr_ip6_spec;
4351 struct ethhdr *ether_spec;
4353 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4356 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4359 if ((fs->flow_type & FLOW_EXT) &&
4360 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4361 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4365 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4369 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4370 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4372 if (!tcp_ip4_spec->ip4src)
4373 *unused |= BIT(INNER_SRC_IP);
4375 if (!tcp_ip4_spec->ip4dst)
4376 *unused |= BIT(INNER_DST_IP);
4378 if (!tcp_ip4_spec->psrc)
4379 *unused |= BIT(INNER_SRC_PORT);
4381 if (!tcp_ip4_spec->pdst)
4382 *unused |= BIT(INNER_DST_PORT);
4384 if (!tcp_ip4_spec->tos)
4385 *unused |= BIT(INNER_IP_TOS);
4389 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4390 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4391 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4393 if (!usr_ip4_spec->ip4src)
4394 *unused |= BIT(INNER_SRC_IP);
4396 if (!usr_ip4_spec->ip4dst)
4397 *unused |= BIT(INNER_DST_IP);
4399 if (!usr_ip4_spec->tos)
4400 *unused |= BIT(INNER_IP_TOS);
4402 if (!usr_ip4_spec->proto)
4403 *unused |= BIT(INNER_IP_PROTO);
4405 if (usr_ip4_spec->l4_4_bytes)
4408 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4415 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4416 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4419 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4420 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4421 *unused |= BIT(INNER_SRC_IP);
4423 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4424 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4425 *unused |= BIT(INNER_DST_IP);
4427 if (!tcp_ip6_spec->psrc)
4428 *unused |= BIT(INNER_SRC_PORT);
4430 if (!tcp_ip6_spec->pdst)
4431 *unused |= BIT(INNER_DST_PORT);
4433 if (tcp_ip6_spec->tclass)
4437 case IPV6_USER_FLOW:
4438 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4439 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4440 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4441 BIT(INNER_DST_PORT);
4443 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4444 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4445 *unused |= BIT(INNER_SRC_IP);
4447 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4448 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4449 *unused |= BIT(INNER_DST_IP);
4451 if (!usr_ip6_spec->l4_proto)
4452 *unused |= BIT(INNER_IP_PROTO);
4454 if (usr_ip6_spec->tclass)
4457 if (usr_ip6_spec->l4_4_bytes)
4462 ether_spec = &fs->h_u.ether_spec;
4463 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4464 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4465 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4467 if (is_zero_ether_addr(ether_spec->h_source))
4468 *unused |= BIT(INNER_SRC_MAC);
4470 if (is_zero_ether_addr(ether_spec->h_dest))
4471 *unused |= BIT(INNER_DST_MAC);
4473 if (!ether_spec->h_proto)
4474 *unused |= BIT(INNER_ETH_TYPE);
4481 if ((fs->flow_type & FLOW_EXT)) {
4482 if (fs->h_ext.vlan_etype)
4484 if (!fs->h_ext.vlan_tci)
4485 *unused |= BIT(INNER_VLAN_TAG_FST);
4487 if (fs->m_ext.vlan_tci) {
4488 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4492 *unused |= BIT(INNER_VLAN_TAG_FST);
4495 if (fs->flow_type & FLOW_MAC_EXT) {
4496 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4499 if (is_zero_ether_addr(fs->h_ext.h_dest))
4500 *unused |= BIT(INNER_DST_MAC);
4502 *unused &= ~(BIT(INNER_DST_MAC));
4508 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4510 struct hclge_fd_rule *rule = NULL;
4511 struct hlist_node *node2;
4513 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4514 if (rule->location >= location)
4518 return rule && rule->location == location;
4521 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4522 struct hclge_fd_rule *new_rule,
4526 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4527 struct hlist_node *node2;
4529 if (is_add && !new_rule)
4532 hlist_for_each_entry_safe(rule, node2,
4533 &hdev->fd_rule_list, rule_node) {
4534 if (rule->location >= location)
4539 if (rule && rule->location == location) {
4540 hlist_del(&rule->rule_node);
4542 hdev->hclge_fd_rule_num--;
4547 } else if (!is_add) {
4548 dev_err(&hdev->pdev->dev,
4549 "delete fail, rule %d is inexistent\n",
4554 INIT_HLIST_NODE(&new_rule->rule_node);
4557 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4559 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4561 hdev->hclge_fd_rule_num++;
4566 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4567 struct ethtool_rx_flow_spec *fs,
4568 struct hclge_fd_rule *rule)
4570 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4572 switch (flow_type) {
4576 rule->tuples.src_ip[3] =
4577 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4578 rule->tuples_mask.src_ip[3] =
4579 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4581 rule->tuples.dst_ip[3] =
4582 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4583 rule->tuples_mask.dst_ip[3] =
4584 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4586 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4587 rule->tuples_mask.src_port =
4588 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4590 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4591 rule->tuples_mask.dst_port =
4592 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4594 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4595 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4597 rule->tuples.ether_proto = ETH_P_IP;
4598 rule->tuples_mask.ether_proto = 0xFFFF;
4602 rule->tuples.src_ip[3] =
4603 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4604 rule->tuples_mask.src_ip[3] =
4605 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4607 rule->tuples.dst_ip[3] =
4608 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4609 rule->tuples_mask.dst_ip[3] =
4610 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4612 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4613 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4615 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4616 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4618 rule->tuples.ether_proto = ETH_P_IP;
4619 rule->tuples_mask.ether_proto = 0xFFFF;
4625 be32_to_cpu_array(rule->tuples.src_ip,
4626 fs->h_u.tcp_ip6_spec.ip6src, 4);
4627 be32_to_cpu_array(rule->tuples_mask.src_ip,
4628 fs->m_u.tcp_ip6_spec.ip6src, 4);
4630 be32_to_cpu_array(rule->tuples.dst_ip,
4631 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4632 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4633 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4635 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4636 rule->tuples_mask.src_port =
4637 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4639 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4640 rule->tuples_mask.dst_port =
4641 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4643 rule->tuples.ether_proto = ETH_P_IPV6;
4644 rule->tuples_mask.ether_proto = 0xFFFF;
4647 case IPV6_USER_FLOW:
4648 be32_to_cpu_array(rule->tuples.src_ip,
4649 fs->h_u.usr_ip6_spec.ip6src, 4);
4650 be32_to_cpu_array(rule->tuples_mask.src_ip,
4651 fs->m_u.usr_ip6_spec.ip6src, 4);
4653 be32_to_cpu_array(rule->tuples.dst_ip,
4654 fs->h_u.usr_ip6_spec.ip6dst, 4);
4655 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4656 fs->m_u.usr_ip6_spec.ip6dst, 4);
4658 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4659 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4661 rule->tuples.ether_proto = ETH_P_IPV6;
4662 rule->tuples_mask.ether_proto = 0xFFFF;
4666 ether_addr_copy(rule->tuples.src_mac,
4667 fs->h_u.ether_spec.h_source);
4668 ether_addr_copy(rule->tuples_mask.src_mac,
4669 fs->m_u.ether_spec.h_source);
4671 ether_addr_copy(rule->tuples.dst_mac,
4672 fs->h_u.ether_spec.h_dest);
4673 ether_addr_copy(rule->tuples_mask.dst_mac,
4674 fs->m_u.ether_spec.h_dest);
4676 rule->tuples.ether_proto =
4677 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4678 rule->tuples_mask.ether_proto =
4679 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4686 switch (flow_type) {
4689 rule->tuples.ip_proto = IPPROTO_SCTP;
4690 rule->tuples_mask.ip_proto = 0xFF;
4694 rule->tuples.ip_proto = IPPROTO_TCP;
4695 rule->tuples_mask.ip_proto = 0xFF;
4699 rule->tuples.ip_proto = IPPROTO_UDP;
4700 rule->tuples_mask.ip_proto = 0xFF;
4706 if ((fs->flow_type & FLOW_EXT)) {
4707 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4708 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4711 if (fs->flow_type & FLOW_MAC_EXT) {
4712 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4713 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4719 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4720 struct ethtool_rxnfc *cmd)
4722 struct hclge_vport *vport = hclge_get_vport(handle);
4723 struct hclge_dev *hdev = vport->back;
4724 u16 dst_vport_id = 0, q_index = 0;
4725 struct ethtool_rx_flow_spec *fs;
4726 struct hclge_fd_rule *rule;
4731 if (!hnae3_dev_fd_supported(hdev))
4734 if (!hdev->fd_cfg.fd_en) {
4735 dev_warn(&hdev->pdev->dev,
4736 "Please enable flow director first\n");
4740 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4742 ret = hclge_fd_check_spec(hdev, fs, &unused);
4744 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4748 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4749 action = HCLGE_FD_ACTION_DROP_PACKET;
4751 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4752 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4755 if (vf > hdev->num_req_vfs) {
4756 dev_err(&hdev->pdev->dev,
4757 "Error: vf id (%d) > max vf num (%d)\n",
4758 vf, hdev->num_req_vfs);
4762 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4763 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4766 dev_err(&hdev->pdev->dev,
4767 "Error: queue id (%d) > max tqp num (%d)\n",
4772 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4776 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4780 ret = hclge_fd_get_tuple(hdev, fs, rule);
4784 rule->flow_type = fs->flow_type;
4786 rule->location = fs->location;
4787 rule->unused_tuple = unused;
4788 rule->vf_id = dst_vport_id;
4789 rule->queue_id = q_index;
4790 rule->action = action;
4792 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4796 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4800 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4811 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4812 struct ethtool_rxnfc *cmd)
4814 struct hclge_vport *vport = hclge_get_vport(handle);
4815 struct hclge_dev *hdev = vport->back;
4816 struct ethtool_rx_flow_spec *fs;
4819 if (!hnae3_dev_fd_supported(hdev))
4822 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4824 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4827 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4828 dev_err(&hdev->pdev->dev,
4829 "Delete fail, rule %d is inexistent\n",
4834 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4835 fs->location, NULL, false);
4839 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4843 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4846 struct hclge_vport *vport = hclge_get_vport(handle);
4847 struct hclge_dev *hdev = vport->back;
4848 struct hclge_fd_rule *rule;
4849 struct hlist_node *node;
4851 if (!hnae3_dev_fd_supported(hdev))
4855 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4857 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4858 rule->location, NULL, false);
4859 hlist_del(&rule->rule_node);
4861 hdev->hclge_fd_rule_num--;
4864 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4866 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4867 rule->location, NULL, false);
4871 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4873 struct hclge_vport *vport = hclge_get_vport(handle);
4874 struct hclge_dev *hdev = vport->back;
4875 struct hclge_fd_rule *rule;
4876 struct hlist_node *node;
4879 /* Return ok here, because reset error handling will check this
4880 * return value. If error is returned here, the reset process will
4883 if (!hnae3_dev_fd_supported(hdev))
4886 /* if fd is disabled, should not restore it when reset */
4887 if (!hdev->fd_cfg.fd_en)
4890 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4891 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4893 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4896 dev_warn(&hdev->pdev->dev,
4897 "Restore rule %d failed, remove it\n",
4899 hlist_del(&rule->rule_node);
4901 hdev->hclge_fd_rule_num--;
4907 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4908 struct ethtool_rxnfc *cmd)
4910 struct hclge_vport *vport = hclge_get_vport(handle);
4911 struct hclge_dev *hdev = vport->back;
4913 if (!hnae3_dev_fd_supported(hdev))
4916 cmd->rule_cnt = hdev->hclge_fd_rule_num;
4917 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4922 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4923 struct ethtool_rxnfc *cmd)
4925 struct hclge_vport *vport = hclge_get_vport(handle);
4926 struct hclge_fd_rule *rule = NULL;
4927 struct hclge_dev *hdev = vport->back;
4928 struct ethtool_rx_flow_spec *fs;
4929 struct hlist_node *node2;
4931 if (!hnae3_dev_fd_supported(hdev))
4934 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4936 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4937 if (rule->location >= fs->location)
4941 if (!rule || fs->location != rule->location)
4944 fs->flow_type = rule->flow_type;
4945 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4949 fs->h_u.tcp_ip4_spec.ip4src =
4950 cpu_to_be32(rule->tuples.src_ip[3]);
4951 fs->m_u.tcp_ip4_spec.ip4src =
4952 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4953 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4955 fs->h_u.tcp_ip4_spec.ip4dst =
4956 cpu_to_be32(rule->tuples.dst_ip[3]);
4957 fs->m_u.tcp_ip4_spec.ip4dst =
4958 rule->unused_tuple & BIT(INNER_DST_IP) ?
4959 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4961 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4962 fs->m_u.tcp_ip4_spec.psrc =
4963 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4964 0 : cpu_to_be16(rule->tuples_mask.src_port);
4966 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4967 fs->m_u.tcp_ip4_spec.pdst =
4968 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4969 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4971 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4972 fs->m_u.tcp_ip4_spec.tos =
4973 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4974 0 : rule->tuples_mask.ip_tos;
4978 fs->h_u.usr_ip4_spec.ip4src =
4979 cpu_to_be32(rule->tuples.src_ip[3]);
4980 fs->m_u.tcp_ip4_spec.ip4src =
4981 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4982 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4984 fs->h_u.usr_ip4_spec.ip4dst =
4985 cpu_to_be32(rule->tuples.dst_ip[3]);
4986 fs->m_u.usr_ip4_spec.ip4dst =
4987 rule->unused_tuple & BIT(INNER_DST_IP) ?
4988 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4990 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4991 fs->m_u.usr_ip4_spec.tos =
4992 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4993 0 : rule->tuples_mask.ip_tos;
4995 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4996 fs->m_u.usr_ip4_spec.proto =
4997 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4998 0 : rule->tuples_mask.ip_proto;
5000 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5006 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5007 rule->tuples.src_ip, 4);
5008 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5009 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5011 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5012 rule->tuples_mask.src_ip, 4);
5014 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5015 rule->tuples.dst_ip, 4);
5016 if (rule->unused_tuple & BIT(INNER_DST_IP))
5017 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5019 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5020 rule->tuples_mask.dst_ip, 4);
5022 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5023 fs->m_u.tcp_ip6_spec.psrc =
5024 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5025 0 : cpu_to_be16(rule->tuples_mask.src_port);
5027 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5028 fs->m_u.tcp_ip6_spec.pdst =
5029 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5030 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5033 case IPV6_USER_FLOW:
5034 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5035 rule->tuples.src_ip, 4);
5036 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5037 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5039 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5040 rule->tuples_mask.src_ip, 4);
5042 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5043 rule->tuples.dst_ip, 4);
5044 if (rule->unused_tuple & BIT(INNER_DST_IP))
5045 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5047 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5048 rule->tuples_mask.dst_ip, 4);
5050 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5051 fs->m_u.usr_ip6_spec.l4_proto =
5052 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5053 0 : rule->tuples_mask.ip_proto;
5057 ether_addr_copy(fs->h_u.ether_spec.h_source,
5058 rule->tuples.src_mac);
5059 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5060 eth_zero_addr(fs->m_u.ether_spec.h_source);
5062 ether_addr_copy(fs->m_u.ether_spec.h_source,
5063 rule->tuples_mask.src_mac);
5065 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5066 rule->tuples.dst_mac);
5067 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5068 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5070 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5071 rule->tuples_mask.dst_mac);
5073 fs->h_u.ether_spec.h_proto =
5074 cpu_to_be16(rule->tuples.ether_proto);
5075 fs->m_u.ether_spec.h_proto =
5076 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5077 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5084 if (fs->flow_type & FLOW_EXT) {
5085 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5086 fs->m_ext.vlan_tci =
5087 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5088 cpu_to_be16(VLAN_VID_MASK) :
5089 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5092 if (fs->flow_type & FLOW_MAC_EXT) {
5093 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5094 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5095 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5097 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5098 rule->tuples_mask.dst_mac);
5101 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5102 fs->ring_cookie = RX_CLS_FLOW_DISC;
5106 fs->ring_cookie = rule->queue_id;
5107 vf_id = rule->vf_id;
5108 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5109 fs->ring_cookie |= vf_id;
5115 static int hclge_get_all_rules(struct hnae3_handle *handle,
5116 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5118 struct hclge_vport *vport = hclge_get_vport(handle);
5119 struct hclge_dev *hdev = vport->back;
5120 struct hclge_fd_rule *rule;
5121 struct hlist_node *node2;
5124 if (!hnae3_dev_fd_supported(hdev))
5127 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5129 hlist_for_each_entry_safe(rule, node2,
5130 &hdev->fd_rule_list, rule_node) {
5131 if (cnt == cmd->rule_cnt)
5134 rule_locs[cnt] = rule->location;
5138 cmd->rule_cnt = cnt;
5143 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5145 struct hclge_vport *vport = hclge_get_vport(handle);
5146 struct hclge_dev *hdev = vport->back;
5148 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5149 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5152 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5154 struct hclge_vport *vport = hclge_get_vport(handle);
5155 struct hclge_dev *hdev = vport->back;
5157 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5160 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5162 struct hclge_vport *vport = hclge_get_vport(handle);
5163 struct hclge_dev *hdev = vport->back;
5165 return hdev->reset_count;
5168 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5170 struct hclge_vport *vport = hclge_get_vport(handle);
5171 struct hclge_dev *hdev = vport->back;
5173 hdev->fd_cfg.fd_en = enable;
5175 hclge_del_all_fd_entries(handle, false);
5177 hclge_restore_fd_entries(handle);
5180 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5182 struct hclge_desc desc;
5183 struct hclge_config_mac_mode_cmd *req =
5184 (struct hclge_config_mac_mode_cmd *)desc.data;
5188 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5189 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5190 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5191 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5192 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5193 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5194 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5195 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5196 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5197 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5198 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5199 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5200 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5201 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5202 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5203 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5205 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5207 dev_err(&hdev->pdev->dev,
5208 "mac enable fail, ret =%d.\n", ret);
5211 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5213 struct hclge_config_mac_mode_cmd *req;
5214 struct hclge_desc desc;
5218 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5219 /* 1 Read out the MAC mode config at first */
5220 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5223 dev_err(&hdev->pdev->dev,
5224 "mac loopback get fail, ret =%d.\n", ret);
5228 /* 2 Then setup the loopback flag */
5229 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5230 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5231 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5232 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5234 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5236 /* 3 Config mac work mode with loopback flag
5237 * and its original configure parameters
5239 hclge_cmd_reuse_desc(&desc, false);
5240 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5242 dev_err(&hdev->pdev->dev,
5243 "mac loopback set fail, ret =%d.\n", ret);
5247 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5248 enum hnae3_loop loop_mode)
5250 #define HCLGE_SERDES_RETRY_MS 10
5251 #define HCLGE_SERDES_RETRY_NUM 100
5253 #define HCLGE_MAC_LINK_STATUS_MS 20
5254 #define HCLGE_MAC_LINK_STATUS_NUM 10
5255 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5256 #define HCLGE_MAC_LINK_STATUS_UP 1
5258 struct hclge_serdes_lb_cmd *req;
5259 struct hclge_desc desc;
5260 int mac_link_ret = 0;
5264 req = (struct hclge_serdes_lb_cmd *)desc.data;
5265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5267 switch (loop_mode) {
5268 case HNAE3_LOOP_SERIAL_SERDES:
5269 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5271 case HNAE3_LOOP_PARALLEL_SERDES:
5272 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5275 dev_err(&hdev->pdev->dev,
5276 "unsupported serdes loopback mode %d\n", loop_mode);
5281 req->enable = loop_mode_b;
5282 req->mask = loop_mode_b;
5283 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5285 req->mask = loop_mode_b;
5286 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5289 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5291 dev_err(&hdev->pdev->dev,
5292 "serdes loopback set fail, ret = %d\n", ret);
5297 msleep(HCLGE_SERDES_RETRY_MS);
5298 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5300 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5302 dev_err(&hdev->pdev->dev,
5303 "serdes loopback get, ret = %d\n", ret);
5306 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5307 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5309 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5310 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5312 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5313 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5317 hclge_cfg_mac_mode(hdev, en);
5321 /* serdes Internal loopback, independent of the network cable.*/
5322 msleep(HCLGE_MAC_LINK_STATUS_MS);
5323 ret = hclge_get_mac_link_status(hdev);
5324 if (ret == mac_link_ret)
5326 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5328 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5333 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5334 int stream_id, bool enable)
5336 struct hclge_desc desc;
5337 struct hclge_cfg_com_tqp_queue_cmd *req =
5338 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5341 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5342 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5343 req->stream_id = cpu_to_le16(stream_id);
5344 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5346 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5348 dev_err(&hdev->pdev->dev,
5349 "Tqp enable fail, status =%d.\n", ret);
5353 static int hclge_set_loopback(struct hnae3_handle *handle,
5354 enum hnae3_loop loop_mode, bool en)
5356 struct hclge_vport *vport = hclge_get_vport(handle);
5357 struct hnae3_knic_private_info *kinfo;
5358 struct hclge_dev *hdev = vport->back;
5361 switch (loop_mode) {
5362 case HNAE3_LOOP_APP:
5363 ret = hclge_set_app_loopback(hdev, en);
5365 case HNAE3_LOOP_SERIAL_SERDES:
5366 case HNAE3_LOOP_PARALLEL_SERDES:
5367 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5371 dev_err(&hdev->pdev->dev,
5372 "loop_mode %d is not supported\n", loop_mode);
5379 kinfo = &vport->nic.kinfo;
5380 for (i = 0; i < kinfo->num_tqps; i++) {
5381 ret = hclge_tqp_enable(hdev, i, 0, en);
5389 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5391 struct hclge_vport *vport = hclge_get_vport(handle);
5392 struct hnae3_knic_private_info *kinfo;
5393 struct hnae3_queue *queue;
5394 struct hclge_tqp *tqp;
5397 kinfo = &vport->nic.kinfo;
5398 for (i = 0; i < kinfo->num_tqps; i++) {
5399 queue = handle->kinfo.tqp[i];
5400 tqp = container_of(queue, struct hclge_tqp, q);
5401 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5405 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5407 struct hclge_vport *vport = hclge_get_vport(handle);
5408 struct hclge_dev *hdev = vport->back;
5411 mod_timer(&hdev->service_timer, jiffies + HZ);
5413 del_timer_sync(&hdev->service_timer);
5414 cancel_work_sync(&hdev->service_task);
5415 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5419 static int hclge_ae_start(struct hnae3_handle *handle)
5421 struct hclge_vport *vport = hclge_get_vport(handle);
5422 struct hclge_dev *hdev = vport->back;
5425 hclge_cfg_mac_mode(hdev, true);
5426 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5427 hdev->hw.mac.link = 0;
5429 /* reset tqp stats */
5430 hclge_reset_tqp_stats(handle);
5432 hclge_mac_start_phy(hdev);
5437 static void hclge_ae_stop(struct hnae3_handle *handle)
5439 struct hclge_vport *vport = hclge_get_vport(handle);
5440 struct hclge_dev *hdev = vport->back;
5443 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5445 /* If it is not PF reset, the firmware will disable the MAC,
5446 * so it only need to stop phy here.
5448 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5449 hdev->reset_type != HNAE3_FUNC_RESET) {
5450 hclge_mac_stop_phy(hdev);
5454 for (i = 0; i < handle->kinfo.num_tqps; i++)
5455 hclge_reset_tqp(handle, i);
5458 hclge_cfg_mac_mode(hdev, false);
5460 hclge_mac_stop_phy(hdev);
5462 /* reset tqp stats */
5463 hclge_reset_tqp_stats(handle);
5464 hclge_update_link_status(hdev);
5467 int hclge_vport_start(struct hclge_vport *vport)
5469 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5470 vport->last_active_jiffies = jiffies;
5474 void hclge_vport_stop(struct hclge_vport *vport)
5476 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5479 static int hclge_client_start(struct hnae3_handle *handle)
5481 struct hclge_vport *vport = hclge_get_vport(handle);
5483 return hclge_vport_start(vport);
5486 static void hclge_client_stop(struct hnae3_handle *handle)
5488 struct hclge_vport *vport = hclge_get_vport(handle);
5490 hclge_vport_stop(vport);
5493 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5494 u16 cmdq_resp, u8 resp_code,
5495 enum hclge_mac_vlan_tbl_opcode op)
5497 struct hclge_dev *hdev = vport->back;
5498 int return_status = -EIO;
5501 dev_err(&hdev->pdev->dev,
5502 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5507 if (op == HCLGE_MAC_VLAN_ADD) {
5508 if ((!resp_code) || (resp_code == 1)) {
5510 } else if (resp_code == 2) {
5511 return_status = -ENOSPC;
5512 dev_err(&hdev->pdev->dev,
5513 "add mac addr failed for uc_overflow.\n");
5514 } else if (resp_code == 3) {
5515 return_status = -ENOSPC;
5516 dev_err(&hdev->pdev->dev,
5517 "add mac addr failed for mc_overflow.\n");
5519 dev_err(&hdev->pdev->dev,
5520 "add mac addr failed for undefined, code=%d.\n",
5523 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5526 } else if (resp_code == 1) {
5527 return_status = -ENOENT;
5528 dev_dbg(&hdev->pdev->dev,
5529 "remove mac addr failed for miss.\n");
5531 dev_err(&hdev->pdev->dev,
5532 "remove mac addr failed for undefined, code=%d.\n",
5535 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5538 } else if (resp_code == 1) {
5539 return_status = -ENOENT;
5540 dev_dbg(&hdev->pdev->dev,
5541 "lookup mac addr failed for miss.\n");
5543 dev_err(&hdev->pdev->dev,
5544 "lookup mac addr failed for undefined, code=%d.\n",
5548 return_status = -EINVAL;
5549 dev_err(&hdev->pdev->dev,
5550 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5554 return return_status;
5557 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5562 if (vfid > 255 || vfid < 0)
5565 if (vfid >= 0 && vfid <= 191) {
5566 word_num = vfid / 32;
5567 bit_num = vfid % 32;
5569 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5571 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5573 word_num = (vfid - 192) / 32;
5574 bit_num = vfid % 32;
5576 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5578 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5584 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5586 #define HCLGE_DESC_NUMBER 3
5587 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5590 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5591 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5592 if (desc[i].data[j])
5598 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5601 const unsigned char *mac_addr = addr;
5602 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5603 (mac_addr[0]) | (mac_addr[1] << 8);
5604 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5606 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5607 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5610 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5611 struct hclge_mac_vlan_tbl_entry_cmd *req)
5613 struct hclge_dev *hdev = vport->back;
5614 struct hclge_desc desc;
5619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5621 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5623 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5625 dev_err(&hdev->pdev->dev,
5626 "del mac addr failed for cmd_send, ret =%d.\n",
5630 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5631 retval = le16_to_cpu(desc.retval);
5633 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5634 HCLGE_MAC_VLAN_REMOVE);
5637 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5638 struct hclge_mac_vlan_tbl_entry_cmd *req,
5639 struct hclge_desc *desc,
5642 struct hclge_dev *hdev = vport->back;
5647 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5649 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5650 memcpy(desc[0].data,
5652 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5653 hclge_cmd_setup_basic_desc(&desc[1],
5654 HCLGE_OPC_MAC_VLAN_ADD,
5656 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5657 hclge_cmd_setup_basic_desc(&desc[2],
5658 HCLGE_OPC_MAC_VLAN_ADD,
5660 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5662 memcpy(desc[0].data,
5664 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5665 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5668 dev_err(&hdev->pdev->dev,
5669 "lookup mac addr failed for cmd_send, ret =%d.\n",
5673 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5674 retval = le16_to_cpu(desc[0].retval);
5676 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5677 HCLGE_MAC_VLAN_LKUP);
5680 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5681 struct hclge_mac_vlan_tbl_entry_cmd *req,
5682 struct hclge_desc *mc_desc)
5684 struct hclge_dev *hdev = vport->back;
5691 struct hclge_desc desc;
5693 hclge_cmd_setup_basic_desc(&desc,
5694 HCLGE_OPC_MAC_VLAN_ADD,
5696 memcpy(desc.data, req,
5697 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5698 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5699 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5700 retval = le16_to_cpu(desc.retval);
5702 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5704 HCLGE_MAC_VLAN_ADD);
5706 hclge_cmd_reuse_desc(&mc_desc[0], false);
5707 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5708 hclge_cmd_reuse_desc(&mc_desc[1], false);
5709 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5710 hclge_cmd_reuse_desc(&mc_desc[2], false);
5711 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5712 memcpy(mc_desc[0].data, req,
5713 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5714 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5715 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5716 retval = le16_to_cpu(mc_desc[0].retval);
5718 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5720 HCLGE_MAC_VLAN_ADD);
5724 dev_err(&hdev->pdev->dev,
5725 "add mac addr failed for cmd_send, ret =%d.\n",
5733 static int hclge_init_umv_space(struct hclge_dev *hdev)
5735 u16 allocated_size = 0;
5738 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5743 if (allocated_size < hdev->wanted_umv_size)
5744 dev_warn(&hdev->pdev->dev,
5745 "Alloc umv space failed, want %d, get %d\n",
5746 hdev->wanted_umv_size, allocated_size);
5748 mutex_init(&hdev->umv_mutex);
5749 hdev->max_umv_size = allocated_size;
5750 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5751 hdev->share_umv_size = hdev->priv_umv_size +
5752 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5757 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5761 if (hdev->max_umv_size > 0) {
5762 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5766 hdev->max_umv_size = 0;
5768 mutex_destroy(&hdev->umv_mutex);
5773 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5774 u16 *allocated_size, bool is_alloc)
5776 struct hclge_umv_spc_alc_cmd *req;
5777 struct hclge_desc desc;
5780 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5781 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5782 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5783 req->space_size = cpu_to_le32(space_size);
5785 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5787 dev_err(&hdev->pdev->dev,
5788 "%s umv space failed for cmd_send, ret =%d\n",
5789 is_alloc ? "allocate" : "free", ret);
5793 if (is_alloc && allocated_size)
5794 *allocated_size = le32_to_cpu(desc.data[1]);
5799 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5801 struct hclge_vport *vport;
5804 for (i = 0; i < hdev->num_alloc_vport; i++) {
5805 vport = &hdev->vport[i];
5806 vport->used_umv_num = 0;
5809 mutex_lock(&hdev->umv_mutex);
5810 hdev->share_umv_size = hdev->priv_umv_size +
5811 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5812 mutex_unlock(&hdev->umv_mutex);
5815 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5817 struct hclge_dev *hdev = vport->back;
5820 mutex_lock(&hdev->umv_mutex);
5821 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5822 hdev->share_umv_size == 0);
5823 mutex_unlock(&hdev->umv_mutex);
5828 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5830 struct hclge_dev *hdev = vport->back;
5832 mutex_lock(&hdev->umv_mutex);
5834 if (vport->used_umv_num > hdev->priv_umv_size)
5835 hdev->share_umv_size++;
5836 vport->used_umv_num--;
5838 if (vport->used_umv_num >= hdev->priv_umv_size)
5839 hdev->share_umv_size--;
5840 vport->used_umv_num++;
5842 mutex_unlock(&hdev->umv_mutex);
5845 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5846 const unsigned char *addr)
5848 struct hclge_vport *vport = hclge_get_vport(handle);
5850 return hclge_add_uc_addr_common(vport, addr);
5853 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5854 const unsigned char *addr)
5856 struct hclge_dev *hdev = vport->back;
5857 struct hclge_mac_vlan_tbl_entry_cmd req;
5858 struct hclge_desc desc;
5859 u16 egress_port = 0;
5862 /* mac addr check */
5863 if (is_zero_ether_addr(addr) ||
5864 is_broadcast_ether_addr(addr) ||
5865 is_multicast_ether_addr(addr)) {
5866 dev_err(&hdev->pdev->dev,
5867 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5869 is_zero_ether_addr(addr),
5870 is_broadcast_ether_addr(addr),
5871 is_multicast_ether_addr(addr));
5875 memset(&req, 0, sizeof(req));
5876 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5878 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5879 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5881 req.egress_port = cpu_to_le16(egress_port);
5883 hclge_prepare_mac_addr(&req, addr);
5885 /* Lookup the mac address in the mac_vlan table, and add
5886 * it if the entry is inexistent. Repeated unicast entry
5887 * is not allowed in the mac vlan table.
5889 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5890 if (ret == -ENOENT) {
5891 if (!hclge_is_umv_space_full(vport)) {
5892 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5894 hclge_update_umv_space(vport, false);
5898 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5899 hdev->priv_umv_size);
5904 /* check if we just hit the duplicate */
5908 dev_err(&hdev->pdev->dev,
5909 "PF failed to add unicast entry(%pM) in the MAC table\n",
5915 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5916 const unsigned char *addr)
5918 struct hclge_vport *vport = hclge_get_vport(handle);
5920 return hclge_rm_uc_addr_common(vport, addr);
5923 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5924 const unsigned char *addr)
5926 struct hclge_dev *hdev = vport->back;
5927 struct hclge_mac_vlan_tbl_entry_cmd req;
5930 /* mac addr check */
5931 if (is_zero_ether_addr(addr) ||
5932 is_broadcast_ether_addr(addr) ||
5933 is_multicast_ether_addr(addr)) {
5934 dev_dbg(&hdev->pdev->dev,
5935 "Remove mac err! invalid mac:%pM.\n",
5940 memset(&req, 0, sizeof(req));
5941 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5942 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5943 hclge_prepare_mac_addr(&req, addr);
5944 ret = hclge_remove_mac_vlan_tbl(vport, &req);
5946 hclge_update_umv_space(vport, true);
5951 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5952 const unsigned char *addr)
5954 struct hclge_vport *vport = hclge_get_vport(handle);
5956 return hclge_add_mc_addr_common(vport, addr);
5959 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5960 const unsigned char *addr)
5962 struct hclge_dev *hdev = vport->back;
5963 struct hclge_mac_vlan_tbl_entry_cmd req;
5964 struct hclge_desc desc[3];
5967 /* mac addr check */
5968 if (!is_multicast_ether_addr(addr)) {
5969 dev_err(&hdev->pdev->dev,
5970 "Add mc mac err! invalid mac:%pM.\n",
5974 memset(&req, 0, sizeof(req));
5975 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5976 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5977 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5978 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5979 hclge_prepare_mac_addr(&req, addr);
5980 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5982 /* This mac addr exist, update VFID for it */
5983 hclge_update_desc_vfid(desc, vport->vport_id, false);
5984 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5986 /* This mac addr do not exist, add new entry for it */
5987 memset(desc[0].data, 0, sizeof(desc[0].data));
5988 memset(desc[1].data, 0, sizeof(desc[0].data));
5989 memset(desc[2].data, 0, sizeof(desc[0].data));
5990 hclge_update_desc_vfid(desc, vport->vport_id, false);
5991 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5994 if (status == -ENOSPC)
5995 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6000 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6001 const unsigned char *addr)
6003 struct hclge_vport *vport = hclge_get_vport(handle);
6005 return hclge_rm_mc_addr_common(vport, addr);
6008 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6009 const unsigned char *addr)
6011 struct hclge_dev *hdev = vport->back;
6012 struct hclge_mac_vlan_tbl_entry_cmd req;
6013 enum hclge_cmd_status status;
6014 struct hclge_desc desc[3];
6016 /* mac addr check */
6017 if (!is_multicast_ether_addr(addr)) {
6018 dev_dbg(&hdev->pdev->dev,
6019 "Remove mc mac err! invalid mac:%pM.\n",
6024 memset(&req, 0, sizeof(req));
6025 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6026 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6027 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6028 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6029 hclge_prepare_mac_addr(&req, addr);
6030 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6032 /* This mac addr exist, remove this handle's VFID for it */
6033 hclge_update_desc_vfid(desc, vport->vport_id, true);
6035 if (hclge_is_all_function_id_zero(desc))
6036 /* All the vfid is zero, so need to delete this entry */
6037 status = hclge_remove_mac_vlan_tbl(vport, &req);
6039 /* Not all the vfid is zero, update the vfid */
6040 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6043 /* Maybe this mac address is in mta table, but it cannot be
6044 * deleted here because an entry of mta represents an address
6045 * range rather than a specific address. the delete action to
6046 * all entries will take effect in update_mta_status called by
6047 * hns3_nic_set_rx_mode.
6055 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6056 u16 cmdq_resp, u8 resp_code)
6058 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6059 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6060 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6061 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6066 dev_err(&hdev->pdev->dev,
6067 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6072 switch (resp_code) {
6073 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6074 case HCLGE_ETHERTYPE_ALREADY_ADD:
6077 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6078 dev_err(&hdev->pdev->dev,
6079 "add mac ethertype failed for manager table overflow.\n");
6080 return_status = -EIO;
6082 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6083 dev_err(&hdev->pdev->dev,
6084 "add mac ethertype failed for key conflict.\n");
6085 return_status = -EIO;
6088 dev_err(&hdev->pdev->dev,
6089 "add mac ethertype failed for undefined, code=%d.\n",
6091 return_status = -EIO;
6094 return return_status;
6097 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6098 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6100 struct hclge_desc desc;
6105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6106 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6108 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6110 dev_err(&hdev->pdev->dev,
6111 "add mac ethertype failed for cmd_send, ret =%d.\n",
6116 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6117 retval = le16_to_cpu(desc.retval);
6119 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6122 static int init_mgr_tbl(struct hclge_dev *hdev)
6127 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6128 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6130 dev_err(&hdev->pdev->dev,
6131 "add mac ethertype failed, ret =%d.\n",
6140 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6142 struct hclge_vport *vport = hclge_get_vport(handle);
6143 struct hclge_dev *hdev = vport->back;
6145 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6148 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6151 const unsigned char *new_addr = (const unsigned char *)p;
6152 struct hclge_vport *vport = hclge_get_vport(handle);
6153 struct hclge_dev *hdev = vport->back;
6156 /* mac addr check */
6157 if (is_zero_ether_addr(new_addr) ||
6158 is_broadcast_ether_addr(new_addr) ||
6159 is_multicast_ether_addr(new_addr)) {
6160 dev_err(&hdev->pdev->dev,
6161 "Change uc mac err! invalid mac:%p.\n",
6166 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6167 dev_warn(&hdev->pdev->dev,
6168 "remove old uc mac address fail.\n");
6170 ret = hclge_add_uc_addr(handle, new_addr);
6172 dev_err(&hdev->pdev->dev,
6173 "add uc mac address fail, ret =%d.\n",
6177 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6178 dev_err(&hdev->pdev->dev,
6179 "restore uc mac address fail.\n");
6184 ret = hclge_pause_addr_cfg(hdev, new_addr);
6186 dev_err(&hdev->pdev->dev,
6187 "configure mac pause address fail, ret =%d.\n",
6192 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6197 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6200 struct hclge_vport *vport = hclge_get_vport(handle);
6201 struct hclge_dev *hdev = vport->back;
6203 if (!hdev->hw.mac.phydev)
6206 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6209 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6210 u8 fe_type, bool filter_en)
6212 struct hclge_vlan_filter_ctrl_cmd *req;
6213 struct hclge_desc desc;
6216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6218 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6219 req->vlan_type = vlan_type;
6220 req->vlan_fe = filter_en ? fe_type : 0;
6222 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6224 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6230 #define HCLGE_FILTER_TYPE_VF 0
6231 #define HCLGE_FILTER_TYPE_PORT 1
6232 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6233 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6234 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6235 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6236 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6237 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6238 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6239 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6240 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6242 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6244 struct hclge_vport *vport = hclge_get_vport(handle);
6245 struct hclge_dev *hdev = vport->back;
6247 if (hdev->pdev->revision >= 0x21) {
6248 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6249 HCLGE_FILTER_FE_EGRESS, enable);
6250 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6251 HCLGE_FILTER_FE_INGRESS, enable);
6253 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6254 HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6257 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6259 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6262 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6263 bool is_kill, u16 vlan, u8 qos,
6266 #define HCLGE_MAX_VF_BYTES 16
6267 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6268 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6269 struct hclge_desc desc[2];
6274 hclge_cmd_setup_basic_desc(&desc[0],
6275 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6276 hclge_cmd_setup_basic_desc(&desc[1],
6277 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6279 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6281 vf_byte_off = vfid / 8;
6282 vf_byte_val = 1 << (vfid % 8);
6284 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6285 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6287 req0->vlan_id = cpu_to_le16(vlan);
6288 req0->vlan_cfg = is_kill;
6290 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6291 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6293 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6295 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6297 dev_err(&hdev->pdev->dev,
6298 "Send vf vlan command fail, ret =%d.\n",
6304 #define HCLGE_VF_VLAN_NO_ENTRY 2
6305 if (!req0->resp_code || req0->resp_code == 1)
6308 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6309 dev_warn(&hdev->pdev->dev,
6310 "vf vlan table is full, vf vlan filter is disabled\n");
6314 dev_err(&hdev->pdev->dev,
6315 "Add vf vlan filter fail, ret =%d.\n",
6318 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6319 if (!req0->resp_code)
6322 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6323 dev_warn(&hdev->pdev->dev,
6324 "vlan %d filter is not in vf vlan table\n",
6329 dev_err(&hdev->pdev->dev,
6330 "Kill vf vlan filter fail, ret =%d.\n",
6337 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6338 u16 vlan_id, bool is_kill)
6340 struct hclge_vlan_filter_pf_cfg_cmd *req;
6341 struct hclge_desc desc;
6342 u8 vlan_offset_byte_val;
6343 u8 vlan_offset_byte;
6347 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6349 vlan_offset_160 = vlan_id / 160;
6350 vlan_offset_byte = (vlan_id % 160) / 8;
6351 vlan_offset_byte_val = 1 << (vlan_id % 8);
6353 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6354 req->vlan_offset = vlan_offset_160;
6355 req->vlan_cfg = is_kill;
6356 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6358 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6360 dev_err(&hdev->pdev->dev,
6361 "port vlan command, send fail, ret =%d.\n", ret);
6365 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6366 u16 vport_id, u16 vlan_id, u8 qos,
6369 u16 vport_idx, vport_num = 0;
6372 if (is_kill && !vlan_id)
6375 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6378 dev_err(&hdev->pdev->dev,
6379 "Set %d vport vlan filter config fail, ret =%d.\n",
6384 /* vlan 0 may be added twice when 8021q module is enabled */
6385 if (!is_kill && !vlan_id &&
6386 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6389 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6390 dev_err(&hdev->pdev->dev,
6391 "Add port vlan failed, vport %d is already in vlan %d\n",
6397 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6398 dev_err(&hdev->pdev->dev,
6399 "Delete port vlan failed, vport %d is not in vlan %d\n",
6404 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6407 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6408 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6414 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6415 u16 vlan_id, bool is_kill)
6417 struct hclge_vport *vport = hclge_get_vport(handle);
6418 struct hclge_dev *hdev = vport->back;
6420 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6424 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6425 u16 vlan, u8 qos, __be16 proto)
6427 struct hclge_vport *vport = hclge_get_vport(handle);
6428 struct hclge_dev *hdev = vport->back;
6430 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6432 if (proto != htons(ETH_P_8021Q))
6433 return -EPROTONOSUPPORT;
6435 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6438 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6440 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6441 struct hclge_vport_vtag_tx_cfg_cmd *req;
6442 struct hclge_dev *hdev = vport->back;
6443 struct hclge_desc desc;
6446 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6448 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6449 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6450 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6451 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6452 vcfg->accept_tag1 ? 1 : 0);
6453 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6454 vcfg->accept_untag1 ? 1 : 0);
6455 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6456 vcfg->accept_tag2 ? 1 : 0);
6457 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6458 vcfg->accept_untag2 ? 1 : 0);
6459 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6460 vcfg->insert_tag1_en ? 1 : 0);
6461 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6462 vcfg->insert_tag2_en ? 1 : 0);
6463 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6465 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6466 req->vf_bitmap[req->vf_offset] =
6467 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6469 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6471 dev_err(&hdev->pdev->dev,
6472 "Send port txvlan cfg command fail, ret =%d\n",
6478 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6480 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6481 struct hclge_vport_vtag_rx_cfg_cmd *req;
6482 struct hclge_dev *hdev = vport->back;
6483 struct hclge_desc desc;
6486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6488 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6489 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6490 vcfg->strip_tag1_en ? 1 : 0);
6491 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6492 vcfg->strip_tag2_en ? 1 : 0);
6493 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6494 vcfg->vlan1_vlan_prionly ? 1 : 0);
6495 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6496 vcfg->vlan2_vlan_prionly ? 1 : 0);
6498 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6499 req->vf_bitmap[req->vf_offset] =
6500 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6502 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6504 dev_err(&hdev->pdev->dev,
6505 "Send port rxvlan cfg command fail, ret =%d\n",
6511 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6513 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6514 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6515 struct hclge_desc desc;
6518 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6519 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6520 rx_req->ot_fst_vlan_type =
6521 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6522 rx_req->ot_sec_vlan_type =
6523 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6524 rx_req->in_fst_vlan_type =
6525 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6526 rx_req->in_sec_vlan_type =
6527 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6529 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6531 dev_err(&hdev->pdev->dev,
6532 "Send rxvlan protocol type command fail, ret =%d\n",
6537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6539 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6540 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6541 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6543 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6545 dev_err(&hdev->pdev->dev,
6546 "Send txvlan protocol type command fail, ret =%d\n",
6552 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6554 #define HCLGE_DEF_VLAN_TYPE 0x8100
6556 struct hnae3_handle *handle = &hdev->vport[0].nic;
6557 struct hclge_vport *vport;
6561 if (hdev->pdev->revision >= 0x21) {
6562 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6563 HCLGE_FILTER_FE_EGRESS, true);
6567 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6568 HCLGE_FILTER_FE_INGRESS, true);
6572 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6573 HCLGE_FILTER_FE_EGRESS_V1_B,
6579 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6581 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6582 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6583 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6584 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6585 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6586 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6588 ret = hclge_set_vlan_protocol_type(hdev);
6592 for (i = 0; i < hdev->num_alloc_vport; i++) {
6593 vport = &hdev->vport[i];
6594 vport->txvlan_cfg.accept_tag1 = true;
6595 vport->txvlan_cfg.accept_untag1 = true;
6597 /* accept_tag2 and accept_untag2 are not supported on
6598 * pdev revision(0x20), new revision support them. The
6599 * value of this two fields will not return error when driver
6600 * send command to fireware in revision(0x20).
6601 * This two fields can not configured by user.
6603 vport->txvlan_cfg.accept_tag2 = true;
6604 vport->txvlan_cfg.accept_untag2 = true;
6606 vport->txvlan_cfg.insert_tag1_en = false;
6607 vport->txvlan_cfg.insert_tag2_en = false;
6608 vport->txvlan_cfg.default_tag1 = 0;
6609 vport->txvlan_cfg.default_tag2 = 0;
6611 ret = hclge_set_vlan_tx_offload_cfg(vport);
6615 vport->rxvlan_cfg.strip_tag1_en = false;
6616 vport->rxvlan_cfg.strip_tag2_en = true;
6617 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6618 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6620 ret = hclge_set_vlan_rx_offload_cfg(vport);
6625 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6628 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6630 struct hclge_vport *vport = hclge_get_vport(handle);
6632 vport->rxvlan_cfg.strip_tag1_en = false;
6633 vport->rxvlan_cfg.strip_tag2_en = enable;
6634 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6635 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6637 return hclge_set_vlan_rx_offload_cfg(vport);
6640 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6642 struct hclge_config_max_frm_size_cmd *req;
6643 struct hclge_desc desc;
6645 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6647 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6648 req->max_frm_size = cpu_to_le16(new_mps);
6649 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6651 return hclge_cmd_send(&hdev->hw, &desc, 1);
6654 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6656 struct hclge_vport *vport = hclge_get_vport(handle);
6658 return hclge_set_vport_mtu(vport, new_mtu);
6661 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6663 struct hclge_dev *hdev = vport->back;
6664 int i, max_frm_size, ret = 0;
6666 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6667 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6668 max_frm_size > HCLGE_MAC_MAX_FRAME)
6671 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6672 mutex_lock(&hdev->vport_lock);
6673 /* VF's mps must fit within hdev->mps */
6674 if (vport->vport_id && max_frm_size > hdev->mps) {
6675 mutex_unlock(&hdev->vport_lock);
6677 } else if (vport->vport_id) {
6678 vport->mps = max_frm_size;
6679 mutex_unlock(&hdev->vport_lock);
6683 /* PF's mps must be greater then VF's mps */
6684 for (i = 1; i < hdev->num_alloc_vport; i++)
6685 if (max_frm_size < hdev->vport[i].mps) {
6686 mutex_unlock(&hdev->vport_lock);
6690 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6692 ret = hclge_set_mac_mtu(hdev, max_frm_size);
6694 dev_err(&hdev->pdev->dev,
6695 "Change mtu fail, ret =%d\n", ret);
6699 hdev->mps = max_frm_size;
6700 vport->mps = max_frm_size;
6702 ret = hclge_buffer_alloc(hdev);
6704 dev_err(&hdev->pdev->dev,
6705 "Allocate buffer fail, ret =%d\n", ret);
6708 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6709 mutex_unlock(&hdev->vport_lock);
6713 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6716 struct hclge_reset_tqp_queue_cmd *req;
6717 struct hclge_desc desc;
6720 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6722 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6723 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6724 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6726 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6728 dev_err(&hdev->pdev->dev,
6729 "Send tqp reset cmd error, status =%d\n", ret);
6736 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6738 struct hclge_reset_tqp_queue_cmd *req;
6739 struct hclge_desc desc;
6742 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6744 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6745 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6747 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6749 dev_err(&hdev->pdev->dev,
6750 "Get reset status error, status =%d\n", ret);
6754 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6757 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
6759 struct hnae3_queue *queue;
6760 struct hclge_tqp *tqp;
6762 queue = handle->kinfo.tqp[queue_id];
6763 tqp = container_of(queue, struct hclge_tqp, q);
6768 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6770 struct hclge_vport *vport = hclge_get_vport(handle);
6771 struct hclge_dev *hdev = vport->back;
6772 int reset_try_times = 0;
6777 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6779 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6781 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6785 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6787 dev_err(&hdev->pdev->dev,
6788 "Send reset tqp cmd fail, ret = %d\n", ret);
6792 reset_try_times = 0;
6793 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6794 /* Wait for tqp hw reset */
6796 reset_status = hclge_get_reset_status(hdev, queue_gid);
6801 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6802 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6806 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6808 dev_err(&hdev->pdev->dev,
6809 "Deassert the soft reset fail, ret = %d\n", ret);
6814 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6816 struct hclge_dev *hdev = vport->back;
6817 int reset_try_times = 0;
6822 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6824 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6826 dev_warn(&hdev->pdev->dev,
6827 "Send reset tqp cmd fail, ret = %d\n", ret);
6831 reset_try_times = 0;
6832 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6833 /* Wait for tqp hw reset */
6835 reset_status = hclge_get_reset_status(hdev, queue_gid);
6840 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6841 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6845 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6847 dev_warn(&hdev->pdev->dev,
6848 "Deassert the soft reset fail, ret = %d\n", ret);
6851 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6853 struct hclge_vport *vport = hclge_get_vport(handle);
6854 struct hclge_dev *hdev = vport->back;
6856 return hdev->fw_version;
6859 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6861 struct phy_device *phydev = hdev->hw.mac.phydev;
6866 phy_set_asym_pause(phydev, rx_en, tx_en);
6869 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6874 hdev->fc_mode_last_time = HCLGE_FC_FULL;
6875 else if (rx_en && !tx_en)
6876 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6877 else if (!rx_en && tx_en)
6878 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6880 hdev->fc_mode_last_time = HCLGE_FC_NONE;
6882 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6885 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6887 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6892 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6897 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6899 struct phy_device *phydev = hdev->hw.mac.phydev;
6900 u16 remote_advertising = 0;
6901 u16 local_advertising = 0;
6902 u32 rx_pause, tx_pause;
6905 if (!phydev->link || !phydev->autoneg)
6908 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6911 remote_advertising = LPA_PAUSE_CAP;
6913 if (phydev->asym_pause)
6914 remote_advertising |= LPA_PAUSE_ASYM;
6916 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6917 remote_advertising);
6918 tx_pause = flowctl & FLOW_CTRL_TX;
6919 rx_pause = flowctl & FLOW_CTRL_RX;
6921 if (phydev->duplex == HCLGE_MAC_HALF) {
6926 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6929 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6930 u32 *rx_en, u32 *tx_en)
6932 struct hclge_vport *vport = hclge_get_vport(handle);
6933 struct hclge_dev *hdev = vport->back;
6935 *auto_neg = hclge_get_autoneg(handle);
6937 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6943 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6946 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6949 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6958 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6959 u32 rx_en, u32 tx_en)
6961 struct hclge_vport *vport = hclge_get_vport(handle);
6962 struct hclge_dev *hdev = vport->back;
6963 struct phy_device *phydev = hdev->hw.mac.phydev;
6966 fc_autoneg = hclge_get_autoneg(handle);
6967 if (auto_neg != fc_autoneg) {
6968 dev_info(&hdev->pdev->dev,
6969 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6973 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6974 dev_info(&hdev->pdev->dev,
6975 "Priority flow control enabled. Cannot set link flow control.\n");
6979 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6982 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6984 /* Only support flow control negotiation for netdev with
6985 * phy attached for now.
6990 return phy_start_aneg(phydev);
6993 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6994 u8 *auto_neg, u32 *speed, u8 *duplex)
6996 struct hclge_vport *vport = hclge_get_vport(handle);
6997 struct hclge_dev *hdev = vport->back;
7000 *speed = hdev->hw.mac.speed;
7002 *duplex = hdev->hw.mac.duplex;
7004 *auto_neg = hdev->hw.mac.autoneg;
7007 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7009 struct hclge_vport *vport = hclge_get_vport(handle);
7010 struct hclge_dev *hdev = vport->back;
7013 *media_type = hdev->hw.mac.media_type;
7016 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7017 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7019 struct hclge_vport *vport = hclge_get_vport(handle);
7020 struct hclge_dev *hdev = vport->back;
7021 struct phy_device *phydev = hdev->hw.mac.phydev;
7022 int mdix_ctrl, mdix, retval, is_resolved;
7025 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7026 *tp_mdix = ETH_TP_MDI_INVALID;
7030 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7032 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7033 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7034 HCLGE_PHY_MDIX_CTRL_S);
7036 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7037 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7038 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7040 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7042 switch (mdix_ctrl) {
7044 *tp_mdix_ctrl = ETH_TP_MDI;
7047 *tp_mdix_ctrl = ETH_TP_MDI_X;
7050 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7053 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7058 *tp_mdix = ETH_TP_MDI_INVALID;
7060 *tp_mdix = ETH_TP_MDI_X;
7062 *tp_mdix = ETH_TP_MDI;
7065 static int hclge_init_client_instance(struct hnae3_client *client,
7066 struct hnae3_ae_dev *ae_dev)
7068 struct hclge_dev *hdev = ae_dev->priv;
7069 struct hclge_vport *vport;
7072 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7073 vport = &hdev->vport[i];
7075 switch (client->type) {
7076 case HNAE3_CLIENT_KNIC:
7078 hdev->nic_client = client;
7079 vport->nic.client = client;
7080 ret = client->ops->init_instance(&vport->nic);
7084 hnae3_set_client_init_flag(client, ae_dev, 1);
7086 if (hdev->roce_client &&
7087 hnae3_dev_roce_supported(hdev)) {
7088 struct hnae3_client *rc = hdev->roce_client;
7090 ret = hclge_init_roce_base_info(vport);
7094 ret = rc->ops->init_instance(&vport->roce);
7098 hnae3_set_client_init_flag(hdev->roce_client,
7103 case HNAE3_CLIENT_UNIC:
7104 hdev->nic_client = client;
7105 vport->nic.client = client;
7107 ret = client->ops->init_instance(&vport->nic);
7111 hnae3_set_client_init_flag(client, ae_dev, 1);
7114 case HNAE3_CLIENT_ROCE:
7115 if (hnae3_dev_roce_supported(hdev)) {
7116 hdev->roce_client = client;
7117 vport->roce.client = client;
7120 if (hdev->roce_client && hdev->nic_client) {
7121 ret = hclge_init_roce_base_info(vport);
7125 ret = client->ops->init_instance(&vport->roce);
7129 hnae3_set_client_init_flag(client, ae_dev, 1);
7141 hdev->nic_client = NULL;
7142 vport->nic.client = NULL;
7145 hdev->roce_client = NULL;
7146 vport->roce.client = NULL;
7150 static void hclge_uninit_client_instance(struct hnae3_client *client,
7151 struct hnae3_ae_dev *ae_dev)
7153 struct hclge_dev *hdev = ae_dev->priv;
7154 struct hclge_vport *vport;
7157 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7158 vport = &hdev->vport[i];
7159 if (hdev->roce_client) {
7160 hdev->roce_client->ops->uninit_instance(&vport->roce,
7162 hdev->roce_client = NULL;
7163 vport->roce.client = NULL;
7165 if (client->type == HNAE3_CLIENT_ROCE)
7167 if (hdev->nic_client && client->ops->uninit_instance) {
7168 client->ops->uninit_instance(&vport->nic, 0);
7169 hdev->nic_client = NULL;
7170 vport->nic.client = NULL;
7175 static int hclge_pci_init(struct hclge_dev *hdev)
7177 struct pci_dev *pdev = hdev->pdev;
7178 struct hclge_hw *hw;
7181 ret = pci_enable_device(pdev);
7183 dev_err(&pdev->dev, "failed to enable PCI device\n");
7187 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7189 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7192 "can't set consistent PCI DMA");
7193 goto err_disable_device;
7195 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7198 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7200 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7201 goto err_disable_device;
7204 pci_set_master(pdev);
7206 hw->io_base = pcim_iomap(pdev, 2, 0);
7208 dev_err(&pdev->dev, "Can't map configuration register space\n");
7210 goto err_clr_master;
7213 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7217 pci_clear_master(pdev);
7218 pci_release_regions(pdev);
7220 pci_disable_device(pdev);
7225 static void hclge_pci_uninit(struct hclge_dev *hdev)
7227 struct pci_dev *pdev = hdev->pdev;
7229 pcim_iounmap(pdev, hdev->hw.io_base);
7230 pci_free_irq_vectors(pdev);
7231 pci_clear_master(pdev);
7232 pci_release_mem_regions(pdev);
7233 pci_disable_device(pdev);
7236 static void hclge_state_init(struct hclge_dev *hdev)
7238 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7239 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7240 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7241 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7242 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7243 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7246 static void hclge_state_uninit(struct hclge_dev *hdev)
7248 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7250 if (hdev->service_timer.function)
7251 del_timer_sync(&hdev->service_timer);
7252 if (hdev->reset_timer.function)
7253 del_timer_sync(&hdev->reset_timer);
7254 if (hdev->service_task.func)
7255 cancel_work_sync(&hdev->service_task);
7256 if (hdev->rst_service_task.func)
7257 cancel_work_sync(&hdev->rst_service_task);
7258 if (hdev->mbx_service_task.func)
7259 cancel_work_sync(&hdev->mbx_service_task);
7262 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7264 #define HCLGE_FLR_WAIT_MS 100
7265 #define HCLGE_FLR_WAIT_CNT 50
7266 struct hclge_dev *hdev = ae_dev->priv;
7269 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7270 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7271 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7272 hclge_reset_event(hdev->pdev, NULL);
7274 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7275 cnt++ < HCLGE_FLR_WAIT_CNT)
7276 msleep(HCLGE_FLR_WAIT_MS);
7278 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7279 dev_err(&hdev->pdev->dev,
7280 "flr wait down timeout: %d\n", cnt);
7283 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7285 struct hclge_dev *hdev = ae_dev->priv;
7287 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7290 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7292 struct pci_dev *pdev = ae_dev->pdev;
7293 struct hclge_dev *hdev;
7296 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7303 hdev->ae_dev = ae_dev;
7304 hdev->reset_type = HNAE3_NONE_RESET;
7305 hdev->reset_level = HNAE3_FUNC_RESET;
7306 ae_dev->priv = hdev;
7307 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7309 mutex_init(&hdev->vport_lock);
7311 ret = hclge_pci_init(hdev);
7313 dev_err(&pdev->dev, "PCI init failed\n");
7317 /* Firmware command queue initialize */
7318 ret = hclge_cmd_queue_init(hdev);
7320 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7321 goto err_pci_uninit;
7324 /* Firmware command initialize */
7325 ret = hclge_cmd_init(hdev);
7327 goto err_cmd_uninit;
7329 ret = hclge_get_cap(hdev);
7331 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7333 goto err_cmd_uninit;
7336 ret = hclge_configure(hdev);
7338 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7339 goto err_cmd_uninit;
7342 ret = hclge_init_msi(hdev);
7344 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7345 goto err_cmd_uninit;
7348 ret = hclge_misc_irq_init(hdev);
7351 "Misc IRQ(vector0) init error, ret = %d.\n",
7353 goto err_msi_uninit;
7356 ret = hclge_alloc_tqps(hdev);
7358 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7359 goto err_msi_irq_uninit;
7362 ret = hclge_alloc_vport(hdev);
7364 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7365 goto err_msi_irq_uninit;
7368 ret = hclge_map_tqp(hdev);
7370 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7371 goto err_msi_irq_uninit;
7374 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7375 ret = hclge_mac_mdio_config(hdev);
7377 dev_err(&hdev->pdev->dev,
7378 "mdio config fail ret=%d\n", ret);
7379 goto err_msi_irq_uninit;
7383 ret = hclge_init_umv_space(hdev);
7385 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7386 goto err_mdiobus_unreg;
7389 ret = hclge_mac_init(hdev);
7391 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7392 goto err_mdiobus_unreg;
7395 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7397 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7398 goto err_mdiobus_unreg;
7401 ret = hclge_config_gro(hdev, true);
7403 goto err_mdiobus_unreg;
7405 ret = hclge_init_vlan_config(hdev);
7407 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7408 goto err_mdiobus_unreg;
7411 ret = hclge_tm_schd_init(hdev);
7413 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7414 goto err_mdiobus_unreg;
7417 hclge_rss_init_cfg(hdev);
7418 ret = hclge_rss_init_hw(hdev);
7420 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7421 goto err_mdiobus_unreg;
7424 ret = init_mgr_tbl(hdev);
7426 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7427 goto err_mdiobus_unreg;
7430 ret = hclge_init_fd_config(hdev);
7433 "fd table init fail, ret=%d\n", ret);
7434 goto err_mdiobus_unreg;
7437 ret = hclge_hw_error_set_state(hdev, true);
7440 "fail(%d) to enable hw error interrupts\n", ret);
7441 goto err_mdiobus_unreg;
7444 hclge_dcb_ops_set(hdev);
7446 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7447 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7448 INIT_WORK(&hdev->service_task, hclge_service_task);
7449 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7450 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7452 hclge_clear_all_event_cause(hdev);
7454 /* Enable MISC vector(vector0) */
7455 hclge_enable_vector(&hdev->misc_vector, true);
7457 hclge_state_init(hdev);
7458 hdev->last_reset_time = jiffies;
7460 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7464 if (hdev->hw.mac.phydev)
7465 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7467 hclge_misc_irq_uninit(hdev);
7469 pci_free_irq_vectors(pdev);
7471 hclge_destroy_cmd_queue(&hdev->hw);
7473 pcim_iounmap(pdev, hdev->hw.io_base);
7474 pci_clear_master(pdev);
7475 pci_release_regions(pdev);
7476 pci_disable_device(pdev);
7481 static void hclge_stats_clear(struct hclge_dev *hdev)
7483 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7486 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7488 struct hclge_vport *vport = hdev->vport;
7491 for (i = 0; i < hdev->num_alloc_vport; i++) {
7492 hclge_vport_start(vport);
7497 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7499 struct hclge_dev *hdev = ae_dev->priv;
7500 struct pci_dev *pdev = ae_dev->pdev;
7503 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7505 hclge_stats_clear(hdev);
7506 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7508 ret = hclge_cmd_init(hdev);
7510 dev_err(&pdev->dev, "Cmd queue init failed\n");
7514 ret = hclge_map_tqp(hdev);
7516 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7520 hclge_reset_umv_space(hdev);
7522 ret = hclge_mac_init(hdev);
7524 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7528 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7530 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7534 ret = hclge_config_gro(hdev, true);
7538 ret = hclge_init_vlan_config(hdev);
7540 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7544 ret = hclge_tm_init_hw(hdev, true);
7546 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7550 ret = hclge_rss_init_hw(hdev);
7552 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7556 ret = hclge_init_fd_config(hdev);
7559 "fd table init fail, ret=%d\n", ret);
7563 /* Re-enable the hw error interrupts because
7564 * the interrupts get disabled on core/global reset.
7566 ret = hclge_hw_error_set_state(hdev, true);
7569 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
7573 hclge_reset_vport_state(hdev);
7575 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7581 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7583 struct hclge_dev *hdev = ae_dev->priv;
7584 struct hclge_mac *mac = &hdev->hw.mac;
7586 hclge_state_uninit(hdev);
7589 mdiobus_unregister(mac->mdio_bus);
7591 hclge_uninit_umv_space(hdev);
7593 /* Disable MISC vector(vector0) */
7594 hclge_enable_vector(&hdev->misc_vector, false);
7595 synchronize_irq(hdev->misc_vector.vector_irq);
7597 hclge_hw_error_set_state(hdev, false);
7598 hclge_destroy_cmd_queue(&hdev->hw);
7599 hclge_misc_irq_uninit(hdev);
7600 hclge_pci_uninit(hdev);
7601 mutex_destroy(&hdev->vport_lock);
7602 ae_dev->priv = NULL;
7605 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7607 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7608 struct hclge_vport *vport = hclge_get_vport(handle);
7609 struct hclge_dev *hdev = vport->back;
7611 return min_t(u32, hdev->rss_size_max,
7612 vport->alloc_tqps / kinfo->num_tc);
7615 static void hclge_get_channels(struct hnae3_handle *handle,
7616 struct ethtool_channels *ch)
7618 ch->max_combined = hclge_get_max_channels(handle);
7619 ch->other_count = 1;
7621 ch->combined_count = handle->kinfo.rss_size;
7624 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7625 u16 *alloc_tqps, u16 *max_rss_size)
7627 struct hclge_vport *vport = hclge_get_vport(handle);
7628 struct hclge_dev *hdev = vport->back;
7630 *alloc_tqps = vport->alloc_tqps;
7631 *max_rss_size = hdev->rss_size_max;
7634 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7635 bool rxfh_configured)
7637 struct hclge_vport *vport = hclge_get_vport(handle);
7638 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7639 struct hclge_dev *hdev = vport->back;
7640 int cur_rss_size = kinfo->rss_size;
7641 int cur_tqps = kinfo->num_tqps;
7642 u16 tc_offset[HCLGE_MAX_TC_NUM];
7643 u16 tc_valid[HCLGE_MAX_TC_NUM];
7644 u16 tc_size[HCLGE_MAX_TC_NUM];
7649 kinfo->req_rss_size = new_tqps_num;
7651 ret = hclge_tm_vport_map_update(hdev);
7653 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7657 roundup_size = roundup_pow_of_two(kinfo->rss_size);
7658 roundup_size = ilog2(roundup_size);
7659 /* Set the RSS TC mode according to the new RSS size */
7660 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7663 if (!(hdev->hw_tc_map & BIT(i)))
7667 tc_size[i] = roundup_size;
7668 tc_offset[i] = kinfo->rss_size * i;
7670 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7674 /* RSS indirection table has been configuared by user */
7675 if (rxfh_configured)
7678 /* Reinitializes the rss indirect table according to the new RSS size */
7679 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7683 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7684 rss_indir[i] = i % kinfo->rss_size;
7686 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7688 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7695 dev_info(&hdev->pdev->dev,
7696 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7697 cur_rss_size, kinfo->rss_size,
7698 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7703 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7704 u32 *regs_num_64_bit)
7706 struct hclge_desc desc;
7710 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7711 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7713 dev_err(&hdev->pdev->dev,
7714 "Query register number cmd failed, ret = %d.\n", ret);
7718 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7719 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7721 total_num = *regs_num_32_bit + *regs_num_64_bit;
7728 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7731 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7733 struct hclge_desc *desc;
7734 u32 *reg_val = data;
7743 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7744 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7748 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7749 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7751 dev_err(&hdev->pdev->dev,
7752 "Query 32 bit register cmd failed, ret = %d.\n", ret);
7757 for (i = 0; i < cmd_num; i++) {
7759 desc_data = (__le32 *)(&desc[i].data[0]);
7760 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7762 desc_data = (__le32 *)(&desc[i]);
7763 n = HCLGE_32_BIT_REG_RTN_DATANUM;
7765 for (k = 0; k < n; k++) {
7766 *reg_val++ = le32_to_cpu(*desc_data++);
7778 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7781 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7783 struct hclge_desc *desc;
7784 u64 *reg_val = data;
7793 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7794 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7798 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7799 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7801 dev_err(&hdev->pdev->dev,
7802 "Query 64 bit register cmd failed, ret = %d.\n", ret);
7807 for (i = 0; i < cmd_num; i++) {
7809 desc_data = (__le64 *)(&desc[i].data[0]);
7810 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7812 desc_data = (__le64 *)(&desc[i]);
7813 n = HCLGE_64_BIT_REG_RTN_DATANUM;
7815 for (k = 0; k < n; k++) {
7816 *reg_val++ = le64_to_cpu(*desc_data++);
7828 #define MAX_SEPARATE_NUM 4
7829 #define SEPARATOR_VALUE 0xFFFFFFFF
7830 #define REG_NUM_PER_LINE 4
7831 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
7833 static int hclge_get_regs_len(struct hnae3_handle *handle)
7835 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
7836 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7837 struct hclge_vport *vport = hclge_get_vport(handle);
7838 struct hclge_dev *hdev = vport->back;
7839 u32 regs_num_32_bit, regs_num_64_bit;
7842 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7844 dev_err(&hdev->pdev->dev,
7845 "Get register number failed, ret = %d.\n", ret);
7849 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
7850 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
7851 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
7852 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
7854 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
7855 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
7856 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7859 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7862 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7863 struct hclge_vport *vport = hclge_get_vport(handle);
7864 struct hclge_dev *hdev = vport->back;
7865 u32 regs_num_32_bit, regs_num_64_bit;
7866 int i, j, reg_um, separator_num;
7870 *version = hdev->fw_version;
7872 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7874 dev_err(&hdev->pdev->dev,
7875 "Get register number failed, ret = %d.\n", ret);
7879 /* fetching per-PF registers valus from PF PCIe register space */
7880 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
7881 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7882 for (i = 0; i < reg_um; i++)
7883 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
7884 for (i = 0; i < separator_num; i++)
7885 *reg++ = SEPARATOR_VALUE;
7887 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
7888 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7889 for (i = 0; i < reg_um; i++)
7890 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
7891 for (i = 0; i < separator_num; i++)
7892 *reg++ = SEPARATOR_VALUE;
7894 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
7895 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7896 for (j = 0; j < kinfo->num_tqps; j++) {
7897 for (i = 0; i < reg_um; i++)
7898 *reg++ = hclge_read_dev(&hdev->hw,
7899 ring_reg_addr_list[i] +
7901 for (i = 0; i < separator_num; i++)
7902 *reg++ = SEPARATOR_VALUE;
7905 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
7906 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7907 for (j = 0; j < hdev->num_msi_used - 1; j++) {
7908 for (i = 0; i < reg_um; i++)
7909 *reg++ = hclge_read_dev(&hdev->hw,
7910 tqp_intr_reg_addr_list[i] +
7912 for (i = 0; i < separator_num; i++)
7913 *reg++ = SEPARATOR_VALUE;
7916 /* fetching PF common registers values from firmware */
7917 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
7919 dev_err(&hdev->pdev->dev,
7920 "Get 32 bit register failed, ret = %d.\n", ret);
7924 reg += regs_num_32_bit;
7925 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
7927 dev_err(&hdev->pdev->dev,
7928 "Get 64 bit register failed, ret = %d.\n", ret);
7931 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7933 struct hclge_set_led_state_cmd *req;
7934 struct hclge_desc desc;
7937 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7939 req = (struct hclge_set_led_state_cmd *)desc.data;
7940 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7941 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7943 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7945 dev_err(&hdev->pdev->dev,
7946 "Send set led state cmd error, ret =%d\n", ret);
7951 enum hclge_led_status {
7954 HCLGE_LED_NO_CHANGE = 0xFF,
7957 static int hclge_set_led_id(struct hnae3_handle *handle,
7958 enum ethtool_phys_id_state status)
7960 struct hclge_vport *vport = hclge_get_vport(handle);
7961 struct hclge_dev *hdev = vport->back;
7964 case ETHTOOL_ID_ACTIVE:
7965 return hclge_set_led_status(hdev, HCLGE_LED_ON);
7966 case ETHTOOL_ID_INACTIVE:
7967 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7973 static void hclge_get_link_mode(struct hnae3_handle *handle,
7974 unsigned long *supported,
7975 unsigned long *advertising)
7977 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7978 struct hclge_vport *vport = hclge_get_vport(handle);
7979 struct hclge_dev *hdev = vport->back;
7980 unsigned int idx = 0;
7982 for (; idx < size; idx++) {
7983 supported[idx] = hdev->hw.mac.supported[idx];
7984 advertising[idx] = hdev->hw.mac.advertising[idx];
7988 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7990 struct hclge_vport *vport = hclge_get_vport(handle);
7991 struct hclge_dev *hdev = vport->back;
7993 return hclge_config_gro(hdev, enable);
7996 static const struct hnae3_ae_ops hclge_ops = {
7997 .init_ae_dev = hclge_init_ae_dev,
7998 .uninit_ae_dev = hclge_uninit_ae_dev,
7999 .flr_prepare = hclge_flr_prepare,
8000 .flr_done = hclge_flr_done,
8001 .init_client_instance = hclge_init_client_instance,
8002 .uninit_client_instance = hclge_uninit_client_instance,
8003 .map_ring_to_vector = hclge_map_ring_to_vector,
8004 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8005 .get_vector = hclge_get_vector,
8006 .put_vector = hclge_put_vector,
8007 .set_promisc_mode = hclge_set_promisc_mode,
8008 .set_loopback = hclge_set_loopback,
8009 .start = hclge_ae_start,
8010 .stop = hclge_ae_stop,
8011 .client_start = hclge_client_start,
8012 .client_stop = hclge_client_stop,
8013 .get_status = hclge_get_status,
8014 .get_ksettings_an_result = hclge_get_ksettings_an_result,
8015 .update_speed_duplex_h = hclge_update_speed_duplex_h,
8016 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8017 .get_media_type = hclge_get_media_type,
8018 .get_rss_key_size = hclge_get_rss_key_size,
8019 .get_rss_indir_size = hclge_get_rss_indir_size,
8020 .get_rss = hclge_get_rss,
8021 .set_rss = hclge_set_rss,
8022 .set_rss_tuple = hclge_set_rss_tuple,
8023 .get_rss_tuple = hclge_get_rss_tuple,
8024 .get_tc_size = hclge_get_tc_size,
8025 .get_mac_addr = hclge_get_mac_addr,
8026 .set_mac_addr = hclge_set_mac_addr,
8027 .do_ioctl = hclge_do_ioctl,
8028 .add_uc_addr = hclge_add_uc_addr,
8029 .rm_uc_addr = hclge_rm_uc_addr,
8030 .add_mc_addr = hclge_add_mc_addr,
8031 .rm_mc_addr = hclge_rm_mc_addr,
8032 .set_autoneg = hclge_set_autoneg,
8033 .get_autoneg = hclge_get_autoneg,
8034 .get_pauseparam = hclge_get_pauseparam,
8035 .set_pauseparam = hclge_set_pauseparam,
8036 .set_mtu = hclge_set_mtu,
8037 .reset_queue = hclge_reset_tqp,
8038 .get_stats = hclge_get_stats,
8039 .update_stats = hclge_update_stats,
8040 .get_strings = hclge_get_strings,
8041 .get_sset_count = hclge_get_sset_count,
8042 .get_fw_version = hclge_get_fw_version,
8043 .get_mdix_mode = hclge_get_mdix_mode,
8044 .enable_vlan_filter = hclge_enable_vlan_filter,
8045 .set_vlan_filter = hclge_set_vlan_filter,
8046 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8047 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8048 .reset_event = hclge_reset_event,
8049 .set_default_reset_request = hclge_set_def_reset_request,
8050 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8051 .set_channels = hclge_set_channels,
8052 .get_channels = hclge_get_channels,
8053 .get_regs_len = hclge_get_regs_len,
8054 .get_regs = hclge_get_regs,
8055 .set_led_id = hclge_set_led_id,
8056 .get_link_mode = hclge_get_link_mode,
8057 .add_fd_entry = hclge_add_fd_entry,
8058 .del_fd_entry = hclge_del_fd_entry,
8059 .del_all_fd_entries = hclge_del_all_fd_entries,
8060 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8061 .get_fd_rule_info = hclge_get_fd_rule_info,
8062 .get_fd_all_rules = hclge_get_all_rules,
8063 .restore_fd_rules = hclge_restore_fd_entries,
8064 .enable_fd = hclge_enable_fd,
8065 .dbg_run_cmd = hclge_dbg_run_cmd,
8066 .handle_hw_ras_error = hclge_handle_hw_ras_error,
8067 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8068 .ae_dev_resetting = hclge_ae_dev_resetting,
8069 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8070 .set_gro_en = hclge_gro_en,
8071 .get_global_queue_id = hclge_covert_handle_qid_global,
8072 .set_timer_task = hclge_set_timer_task,
8073 .mac_connect_phy = hclge_mac_connect_phy,
8074 .mac_disconnect_phy = hclge_mac_disconnect_phy,
8077 static struct hnae3_ae_algo ae_algo = {
8079 .pdev_id_table = ae_algo_pci_tbl,
8082 static int hclge_init(void)
8084 pr_info("%s is initializing\n", HCLGE_NAME);
8086 hnae3_register_ae_algo(&ae_algo);
8091 static void hclge_exit(void)
8093 hnae3_unregister_ae_algo(&ae_algo);
8095 module_init(hclge_init);
8096 module_exit(hclge_exit);
8098 MODULE_LICENSE("GPL");
8099 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8100 MODULE_DESCRIPTION("HCLGE Driver");
8101 MODULE_VERSION(HCLGE_MOD_VERSION);