]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2683399a07457bdd5cd4a53a51f81c9979341b0f
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
36                                u16 *allocated_size, bool is_alloc);
37
38 static struct hnae3_ae_algo ae_algo;
39
40 static const struct pci_device_id ae_algo_pci_tbl[] = {
41         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
48         /* required last entry */
49         {0, }
50 };
51
52 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
53
54 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
55                                          HCLGE_CMDQ_TX_ADDR_H_REG,
56                                          HCLGE_CMDQ_TX_DEPTH_REG,
57                                          HCLGE_CMDQ_TX_TAIL_REG,
58                                          HCLGE_CMDQ_TX_HEAD_REG,
59                                          HCLGE_CMDQ_RX_ADDR_L_REG,
60                                          HCLGE_CMDQ_RX_ADDR_H_REG,
61                                          HCLGE_CMDQ_RX_DEPTH_REG,
62                                          HCLGE_CMDQ_RX_TAIL_REG,
63                                          HCLGE_CMDQ_RX_HEAD_REG,
64                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
65                                          HCLGE_CMDQ_INTR_STS_REG,
66                                          HCLGE_CMDQ_INTR_EN_REG,
67                                          HCLGE_CMDQ_INTR_GEN_REG};
68
69 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
70                                            HCLGE_VECTOR0_OTER_EN_REG,
71                                            HCLGE_MISC_RESET_STS_REG,
72                                            HCLGE_MISC_VECTOR_INT_STS,
73                                            HCLGE_GLOBAL_RESET_REG,
74                                            HCLGE_FUN_RST_ING,
75                                            HCLGE_GRO_EN_REG};
76
77 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
78                                          HCLGE_RING_RX_ADDR_H_REG,
79                                          HCLGE_RING_RX_BD_NUM_REG,
80                                          HCLGE_RING_RX_BD_LENGTH_REG,
81                                          HCLGE_RING_RX_MERGE_EN_REG,
82                                          HCLGE_RING_RX_TAIL_REG,
83                                          HCLGE_RING_RX_HEAD_REG,
84                                          HCLGE_RING_RX_FBD_NUM_REG,
85                                          HCLGE_RING_RX_OFFSET_REG,
86                                          HCLGE_RING_RX_FBD_OFFSET_REG,
87                                          HCLGE_RING_RX_STASH_REG,
88                                          HCLGE_RING_RX_BD_ERR_REG,
89                                          HCLGE_RING_TX_ADDR_L_REG,
90                                          HCLGE_RING_TX_ADDR_H_REG,
91                                          HCLGE_RING_TX_BD_NUM_REG,
92                                          HCLGE_RING_TX_PRIORITY_REG,
93                                          HCLGE_RING_TX_TC_REG,
94                                          HCLGE_RING_TX_MERGE_EN_REG,
95                                          HCLGE_RING_TX_TAIL_REG,
96                                          HCLGE_RING_TX_HEAD_REG,
97                                          HCLGE_RING_TX_FBD_NUM_REG,
98                                          HCLGE_RING_TX_OFFSET_REG,
99                                          HCLGE_RING_TX_EBD_NUM_REG,
100                                          HCLGE_RING_TX_EBD_OFFSET_REG,
101                                          HCLGE_RING_TX_BD_ERR_REG,
102                                          HCLGE_RING_EN_REG};
103
104 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
105                                              HCLGE_TQP_INTR_GL0_REG,
106                                              HCLGE_TQP_INTR_GL1_REG,
107                                              HCLGE_TQP_INTR_GL2_REG,
108                                              HCLGE_TQP_INTR_RL_REG};
109
110 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
111         "App    Loopback test",
112         "Serdes serial Loopback test",
113         "Serdes parallel Loopback test",
114         "Phy    Loopback test"
115 };
116
117 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
118         {"mac_tx_mac_pause_num",
119                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
120         {"mac_rx_mac_pause_num",
121                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
122         {"mac_tx_control_pkt_num",
123                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
124         {"mac_rx_control_pkt_num",
125                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
126         {"mac_tx_pfc_pkt_num",
127                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
128         {"mac_tx_pfc_pri0_pkt_num",
129                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
130         {"mac_tx_pfc_pri1_pkt_num",
131                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
132         {"mac_tx_pfc_pri2_pkt_num",
133                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
134         {"mac_tx_pfc_pri3_pkt_num",
135                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
136         {"mac_tx_pfc_pri4_pkt_num",
137                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
138         {"mac_tx_pfc_pri5_pkt_num",
139                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
140         {"mac_tx_pfc_pri6_pkt_num",
141                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
142         {"mac_tx_pfc_pri7_pkt_num",
143                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
144         {"mac_rx_pfc_pkt_num",
145                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
146         {"mac_rx_pfc_pri0_pkt_num",
147                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
148         {"mac_rx_pfc_pri1_pkt_num",
149                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
150         {"mac_rx_pfc_pri2_pkt_num",
151                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
152         {"mac_rx_pfc_pri3_pkt_num",
153                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
154         {"mac_rx_pfc_pri4_pkt_num",
155                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
156         {"mac_rx_pfc_pri5_pkt_num",
157                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
158         {"mac_rx_pfc_pri6_pkt_num",
159                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
160         {"mac_rx_pfc_pri7_pkt_num",
161                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
162         {"mac_tx_total_pkt_num",
163                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
164         {"mac_tx_total_oct_num",
165                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
166         {"mac_tx_good_pkt_num",
167                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
168         {"mac_tx_bad_pkt_num",
169                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
170         {"mac_tx_good_oct_num",
171                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
172         {"mac_tx_bad_oct_num",
173                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
174         {"mac_tx_uni_pkt_num",
175                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
176         {"mac_tx_multi_pkt_num",
177                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
178         {"mac_tx_broad_pkt_num",
179                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
180         {"mac_tx_undersize_pkt_num",
181                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
182         {"mac_tx_oversize_pkt_num",
183                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
184         {"mac_tx_64_oct_pkt_num",
185                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
186         {"mac_tx_65_127_oct_pkt_num",
187                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
188         {"mac_tx_128_255_oct_pkt_num",
189                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
190         {"mac_tx_256_511_oct_pkt_num",
191                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
192         {"mac_tx_512_1023_oct_pkt_num",
193                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
194         {"mac_tx_1024_1518_oct_pkt_num",
195                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
196         {"mac_tx_1519_2047_oct_pkt_num",
197                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
198         {"mac_tx_2048_4095_oct_pkt_num",
199                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
200         {"mac_tx_4096_8191_oct_pkt_num",
201                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
202         {"mac_tx_8192_9216_oct_pkt_num",
203                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
204         {"mac_tx_9217_12287_oct_pkt_num",
205                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
206         {"mac_tx_12288_16383_oct_pkt_num",
207                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
208         {"mac_tx_1519_max_good_pkt_num",
209                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
210         {"mac_tx_1519_max_bad_pkt_num",
211                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
212         {"mac_rx_total_pkt_num",
213                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
214         {"mac_rx_total_oct_num",
215                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
216         {"mac_rx_good_pkt_num",
217                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
218         {"mac_rx_bad_pkt_num",
219                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
220         {"mac_rx_good_oct_num",
221                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
222         {"mac_rx_bad_oct_num",
223                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
224         {"mac_rx_uni_pkt_num",
225                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
226         {"mac_rx_multi_pkt_num",
227                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
228         {"mac_rx_broad_pkt_num",
229                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
230         {"mac_rx_undersize_pkt_num",
231                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
232         {"mac_rx_oversize_pkt_num",
233                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
234         {"mac_rx_64_oct_pkt_num",
235                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
236         {"mac_rx_65_127_oct_pkt_num",
237                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
238         {"mac_rx_128_255_oct_pkt_num",
239                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
240         {"mac_rx_256_511_oct_pkt_num",
241                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
242         {"mac_rx_512_1023_oct_pkt_num",
243                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
244         {"mac_rx_1024_1518_oct_pkt_num",
245                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
246         {"mac_rx_1519_2047_oct_pkt_num",
247                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
248         {"mac_rx_2048_4095_oct_pkt_num",
249                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
250         {"mac_rx_4096_8191_oct_pkt_num",
251                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
252         {"mac_rx_8192_9216_oct_pkt_num",
253                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
254         {"mac_rx_9217_12287_oct_pkt_num",
255                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
256         {"mac_rx_12288_16383_oct_pkt_num",
257                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
258         {"mac_rx_1519_max_good_pkt_num",
259                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
260         {"mac_rx_1519_max_bad_pkt_num",
261                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
262
263         {"mac_tx_fragment_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
265         {"mac_tx_undermin_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
267         {"mac_tx_jabber_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
269         {"mac_tx_err_all_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
271         {"mac_tx_from_app_good_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
273         {"mac_tx_from_app_bad_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
275         {"mac_rx_fragment_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
277         {"mac_rx_undermin_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
279         {"mac_rx_jabber_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
281         {"mac_rx_fcs_err_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
283         {"mac_rx_send_app_good_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
285         {"mac_rx_send_app_bad_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
287 };
288
289 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
290         {
291                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
292                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
293                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
294                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
295                 .i_port_bitmap = 0x1,
296         },
297 };
298
299 static const u8 hclge_hash_key[] = {
300         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
301         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
302         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
303         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
304         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
305 };
306
307 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
308 {
309 #define HCLGE_MAC_CMD_NUM 21
310
311         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
312         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
313         __le64 *desc_data;
314         int i, k, n;
315         int ret;
316
317         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
318         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
319         if (ret) {
320                 dev_err(&hdev->pdev->dev,
321                         "Get MAC pkt stats fail, status = %d.\n", ret);
322
323                 return ret;
324         }
325
326         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
327                 /* for special opcode 0032, only the first desc has the head */
328                 if (unlikely(i == 0)) {
329                         desc_data = (__le64 *)(&desc[i].data[0]);
330                         n = HCLGE_RD_FIRST_STATS_NUM;
331                 } else {
332                         desc_data = (__le64 *)(&desc[i]);
333                         n = HCLGE_RD_OTHER_STATS_NUM;
334                 }
335
336                 for (k = 0; k < n; k++) {
337                         *data += le64_to_cpu(*desc_data);
338                         data++;
339                         desc_data++;
340                 }
341         }
342
343         return 0;
344 }
345
346 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
347 {
348         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
349         struct hclge_desc *desc;
350         __le64 *desc_data;
351         u16 i, k, n;
352         int ret;
353
354         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
355         if (!desc)
356                 return -ENOMEM;
357         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
358         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
359         if (ret) {
360                 kfree(desc);
361                 return ret;
362         }
363
364         for (i = 0; i < desc_num; i++) {
365                 /* for special opcode 0034, only the first desc has the head */
366                 if (i == 0) {
367                         desc_data = (__le64 *)(&desc[i].data[0]);
368                         n = HCLGE_RD_FIRST_STATS_NUM;
369                 } else {
370                         desc_data = (__le64 *)(&desc[i]);
371                         n = HCLGE_RD_OTHER_STATS_NUM;
372                 }
373
374                 for (k = 0; k < n; k++) {
375                         *data += le64_to_cpu(*desc_data);
376                         data++;
377                         desc_data++;
378                 }
379         }
380
381         kfree(desc);
382
383         return 0;
384 }
385
386 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
387 {
388         struct hclge_desc desc;
389         __le32 *desc_data;
390         u32 reg_num;
391         int ret;
392
393         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
394         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
395         if (ret)
396                 return ret;
397
398         desc_data = (__le32 *)(&desc.data[0]);
399         reg_num = le32_to_cpu(*desc_data);
400
401         *desc_num = 1 + ((reg_num - 3) >> 2) +
402                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
403
404         return 0;
405 }
406
407 static int hclge_mac_update_stats(struct hclge_dev *hdev)
408 {
409         u32 desc_num;
410         int ret;
411
412         ret = hclge_mac_query_reg_num(hdev, &desc_num);
413
414         /* The firmware supports the new statistics acquisition method */
415         if (!ret)
416                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
417         else if (ret == -EOPNOTSUPP)
418                 ret = hclge_mac_update_stats_defective(hdev);
419         else
420                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
421
422         return ret;
423 }
424
425 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
426 {
427         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
428         struct hclge_vport *vport = hclge_get_vport(handle);
429         struct hclge_dev *hdev = vport->back;
430         struct hnae3_queue *queue;
431         struct hclge_desc desc[1];
432         struct hclge_tqp *tqp;
433         int ret, i;
434
435         for (i = 0; i < kinfo->num_tqps; i++) {
436                 queue = handle->kinfo.tqp[i];
437                 tqp = container_of(queue, struct hclge_tqp, q);
438                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
439                 hclge_cmd_setup_basic_desc(&desc[0],
440                                            HCLGE_OPC_QUERY_RX_STATUS,
441                                            true);
442
443                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
444                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
445                 if (ret) {
446                         dev_err(&hdev->pdev->dev,
447                                 "Query tqp stat fail, status = %d,queue = %d\n",
448                                 ret,    i);
449                         return ret;
450                 }
451                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
452                         le32_to_cpu(desc[0].data[1]);
453         }
454
455         for (i = 0; i < kinfo->num_tqps; i++) {
456                 queue = handle->kinfo.tqp[i];
457                 tqp = container_of(queue, struct hclge_tqp, q);
458                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
459                 hclge_cmd_setup_basic_desc(&desc[0],
460                                            HCLGE_OPC_QUERY_TX_STATUS,
461                                            true);
462
463                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
464                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
465                 if (ret) {
466                         dev_err(&hdev->pdev->dev,
467                                 "Query tqp stat fail, status = %d,queue = %d\n",
468                                 ret, i);
469                         return ret;
470                 }
471                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
472                         le32_to_cpu(desc[0].data[1]);
473         }
474
475         return 0;
476 }
477
478 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
479 {
480         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
481         struct hclge_tqp *tqp;
482         u64 *buff = data;
483         int i;
484
485         for (i = 0; i < kinfo->num_tqps; i++) {
486                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
487                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
488         }
489
490         for (i = 0; i < kinfo->num_tqps; i++) {
491                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
492                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
493         }
494
495         return buff;
496 }
497
498 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
499 {
500         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
501
502         return kinfo->num_tqps * (2);
503 }
504
505 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
506 {
507         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
508         u8 *buff = data;
509         int i = 0;
510
511         for (i = 0; i < kinfo->num_tqps; i++) {
512                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
513                         struct hclge_tqp, q);
514                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
515                          tqp->index);
516                 buff = buff + ETH_GSTRING_LEN;
517         }
518
519         for (i = 0; i < kinfo->num_tqps; i++) {
520                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
521                         struct hclge_tqp, q);
522                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
523                          tqp->index);
524                 buff = buff + ETH_GSTRING_LEN;
525         }
526
527         return buff;
528 }
529
530 static u64 *hclge_comm_get_stats(void *comm_stats,
531                                  const struct hclge_comm_stats_str strs[],
532                                  int size, u64 *data)
533 {
534         u64 *buf = data;
535         u32 i;
536
537         for (i = 0; i < size; i++)
538                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
539
540         return buf + size;
541 }
542
543 static u8 *hclge_comm_get_strings(u32 stringset,
544                                   const struct hclge_comm_stats_str strs[],
545                                   int size, u8 *data)
546 {
547         char *buff = (char *)data;
548         u32 i;
549
550         if (stringset != ETH_SS_STATS)
551                 return buff;
552
553         for (i = 0; i < size; i++) {
554                 snprintf(buff, ETH_GSTRING_LEN,
555                          strs[i].desc);
556                 buff = buff + ETH_GSTRING_LEN;
557         }
558
559         return (u8 *)buff;
560 }
561
562 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
563 {
564         struct hnae3_handle *handle;
565         int status;
566
567         handle = &hdev->vport[0].nic;
568         if (handle->client) {
569                 status = hclge_tqps_update_stats(handle);
570                 if (status) {
571                         dev_err(&hdev->pdev->dev,
572                                 "Update TQPS stats fail, status = %d.\n",
573                                 status);
574                 }
575         }
576
577         status = hclge_mac_update_stats(hdev);
578         if (status)
579                 dev_err(&hdev->pdev->dev,
580                         "Update MAC stats fail, status = %d.\n", status);
581 }
582
583 static void hclge_update_stats(struct hnae3_handle *handle,
584                                struct net_device_stats *net_stats)
585 {
586         struct hclge_vport *vport = hclge_get_vport(handle);
587         struct hclge_dev *hdev = vport->back;
588         int status;
589
590         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
591                 return;
592
593         status = hclge_mac_update_stats(hdev);
594         if (status)
595                 dev_err(&hdev->pdev->dev,
596                         "Update MAC stats fail, status = %d.\n",
597                         status);
598
599         status = hclge_tqps_update_stats(handle);
600         if (status)
601                 dev_err(&hdev->pdev->dev,
602                         "Update TQPS stats fail, status = %d.\n",
603                         status);
604
605         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
606 }
607
608 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
609 {
610 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
611                 HNAE3_SUPPORT_PHY_LOOPBACK |\
612                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
613                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
614
615         struct hclge_vport *vport = hclge_get_vport(handle);
616         struct hclge_dev *hdev = vport->back;
617         int count = 0;
618
619         /* Loopback test support rules:
620          * mac: only GE mode support
621          * serdes: all mac mode will support include GE/XGE/LGE/CGE
622          * phy: only support when phy device exist on board
623          */
624         if (stringset == ETH_SS_TEST) {
625                 /* clear loopback bit flags at first */
626                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
627                 if (hdev->pdev->revision >= 0x21 ||
628                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
629                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
630                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
631                         count += 1;
632                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
633                 }
634
635                 count += 2;
636                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
637                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
638         } else if (stringset == ETH_SS_STATS) {
639                 count = ARRAY_SIZE(g_mac_stats_string) +
640                         hclge_tqps_get_sset_count(handle, stringset);
641         }
642
643         return count;
644 }
645
646 static void hclge_get_strings(struct hnae3_handle *handle,
647                               u32 stringset,
648                               u8 *data)
649 {
650         u8 *p = (char *)data;
651         int size;
652
653         if (stringset == ETH_SS_STATS) {
654                 size = ARRAY_SIZE(g_mac_stats_string);
655                 p = hclge_comm_get_strings(stringset,
656                                            g_mac_stats_string,
657                                            size,
658                                            p);
659                 p = hclge_tqps_get_strings(handle, p);
660         } else if (stringset == ETH_SS_TEST) {
661                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
662                         memcpy(p,
663                                hns3_nic_test_strs[HNAE3_LOOP_APP],
664                                ETH_GSTRING_LEN);
665                         p += ETH_GSTRING_LEN;
666                 }
667                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
668                         memcpy(p,
669                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
670                                ETH_GSTRING_LEN);
671                         p += ETH_GSTRING_LEN;
672                 }
673                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
674                         memcpy(p,
675                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
676                                ETH_GSTRING_LEN);
677                         p += ETH_GSTRING_LEN;
678                 }
679                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
680                         memcpy(p,
681                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
682                                ETH_GSTRING_LEN);
683                         p += ETH_GSTRING_LEN;
684                 }
685         }
686 }
687
688 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
689 {
690         struct hclge_vport *vport = hclge_get_vport(handle);
691         struct hclge_dev *hdev = vport->back;
692         u64 *p;
693
694         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
695                                  g_mac_stats_string,
696                                  ARRAY_SIZE(g_mac_stats_string),
697                                  data);
698         p = hclge_tqps_get_stats(handle, p);
699 }
700
701 static int hclge_parse_func_status(struct hclge_dev *hdev,
702                                    struct hclge_func_status_cmd *status)
703 {
704         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
705                 return -EINVAL;
706
707         /* Set the pf to main pf */
708         if (status->pf_state & HCLGE_PF_STATE_MAIN)
709                 hdev->flag |= HCLGE_FLAG_MAIN;
710         else
711                 hdev->flag &= ~HCLGE_FLAG_MAIN;
712
713         return 0;
714 }
715
716 static int hclge_query_function_status(struct hclge_dev *hdev)
717 {
718         struct hclge_func_status_cmd *req;
719         struct hclge_desc desc;
720         int timeout = 0;
721         int ret;
722
723         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
724         req = (struct hclge_func_status_cmd *)desc.data;
725
726         do {
727                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
728                 if (ret) {
729                         dev_err(&hdev->pdev->dev,
730                                 "query function status failed %d.\n",
731                                 ret);
732
733                         return ret;
734                 }
735
736                 /* Check pf reset is done */
737                 if (req->pf_state)
738                         break;
739                 usleep_range(1000, 2000);
740         } while (timeout++ < 5);
741
742         ret = hclge_parse_func_status(hdev, req);
743
744         return ret;
745 }
746
747 static int hclge_query_pf_resource(struct hclge_dev *hdev)
748 {
749         struct hclge_pf_res_cmd *req;
750         struct hclge_desc desc;
751         int ret;
752
753         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
754         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
755         if (ret) {
756                 dev_err(&hdev->pdev->dev,
757                         "query pf resource failed %d.\n", ret);
758                 return ret;
759         }
760
761         req = (struct hclge_pf_res_cmd *)desc.data;
762         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
763         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
764
765         if (req->tx_buf_size)
766                 hdev->tx_buf_size =
767                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
768         else
769                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
770
771         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
772
773         if (req->dv_buf_size)
774                 hdev->dv_buf_size =
775                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
776         else
777                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
778
779         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
780
781         if (hnae3_dev_roce_supported(hdev)) {
782                 hdev->roce_base_msix_offset =
783                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
784                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
785                 hdev->num_roce_msi =
786                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
787                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
788
789                 /* PF should have NIC vectors and Roce vectors,
790                  * NIC vectors are queued before Roce vectors.
791                  */
792                 hdev->num_msi = hdev->num_roce_msi  +
793                                 hdev->roce_base_msix_offset;
794         } else {
795                 hdev->num_msi =
796                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
797                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
798         }
799
800         return 0;
801 }
802
803 static int hclge_parse_speed(int speed_cmd, int *speed)
804 {
805         switch (speed_cmd) {
806         case 6:
807                 *speed = HCLGE_MAC_SPEED_10M;
808                 break;
809         case 7:
810                 *speed = HCLGE_MAC_SPEED_100M;
811                 break;
812         case 0:
813                 *speed = HCLGE_MAC_SPEED_1G;
814                 break;
815         case 1:
816                 *speed = HCLGE_MAC_SPEED_10G;
817                 break;
818         case 2:
819                 *speed = HCLGE_MAC_SPEED_25G;
820                 break;
821         case 3:
822                 *speed = HCLGE_MAC_SPEED_40G;
823                 break;
824         case 4:
825                 *speed = HCLGE_MAC_SPEED_50G;
826                 break;
827         case 5:
828                 *speed = HCLGE_MAC_SPEED_100G;
829                 break;
830         default:
831                 return -EINVAL;
832         }
833
834         return 0;
835 }
836
837 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
838                                         u8 speed_ability)
839 {
840         unsigned long *supported = hdev->hw.mac.supported;
841
842         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
843                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
844                                  supported);
845
846         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
847                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
848                                  supported);
849
850         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
851                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
852                                  supported);
853
854         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
855                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
856                                  supported);
857
858         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
859                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
860                                  supported);
861
862         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
863         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
864 }
865
866 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
867                                          u8 speed_ability)
868 {
869         unsigned long *supported = hdev->hw.mac.supported;
870
871         /* default to support all speed for GE port */
872         if (!speed_ability)
873                 speed_ability = HCLGE_SUPPORT_GE;
874
875         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
876                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
877                                  supported);
878
879         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
880                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
881                                  supported);
882                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
883                                  supported);
884         }
885
886         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
887                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
888                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
889         }
890
891         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
892         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
893         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
894 }
895
896 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
897 {
898         u8 media_type = hdev->hw.mac.media_type;
899
900         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
901                 hclge_parse_fiber_link_mode(hdev, speed_ability);
902         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
903                 hclge_parse_copper_link_mode(hdev, speed_ability);
904 }
905
906 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
907 {
908         struct hclge_cfg_param_cmd *req;
909         u64 mac_addr_tmp_high;
910         u64 mac_addr_tmp;
911         int i;
912
913         req = (struct hclge_cfg_param_cmd *)desc[0].data;
914
915         /* get the configuration */
916         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
917                                               HCLGE_CFG_VMDQ_M,
918                                               HCLGE_CFG_VMDQ_S);
919         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
920                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
921         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
922                                             HCLGE_CFG_TQP_DESC_N_M,
923                                             HCLGE_CFG_TQP_DESC_N_S);
924
925         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
926                                         HCLGE_CFG_PHY_ADDR_M,
927                                         HCLGE_CFG_PHY_ADDR_S);
928         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
929                                           HCLGE_CFG_MEDIA_TP_M,
930                                           HCLGE_CFG_MEDIA_TP_S);
931         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
932                                           HCLGE_CFG_RX_BUF_LEN_M,
933                                           HCLGE_CFG_RX_BUF_LEN_S);
934         /* get mac_address */
935         mac_addr_tmp = __le32_to_cpu(req->param[2]);
936         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
937                                             HCLGE_CFG_MAC_ADDR_H_M,
938                                             HCLGE_CFG_MAC_ADDR_H_S);
939
940         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
941
942         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
943                                              HCLGE_CFG_DEFAULT_SPEED_M,
944                                              HCLGE_CFG_DEFAULT_SPEED_S);
945         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
946                                             HCLGE_CFG_RSS_SIZE_M,
947                                             HCLGE_CFG_RSS_SIZE_S);
948
949         for (i = 0; i < ETH_ALEN; i++)
950                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
951
952         req = (struct hclge_cfg_param_cmd *)desc[1].data;
953         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
954
955         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
956                                              HCLGE_CFG_SPEED_ABILITY_M,
957                                              HCLGE_CFG_SPEED_ABILITY_S);
958         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
959                                          HCLGE_CFG_UMV_TBL_SPACE_M,
960                                          HCLGE_CFG_UMV_TBL_SPACE_S);
961         if (!cfg->umv_space)
962                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
963 }
964
965 /* hclge_get_cfg: query the static parameter from flash
966  * @hdev: pointer to struct hclge_dev
967  * @hcfg: the config structure to be getted
968  */
969 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
970 {
971         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
972         struct hclge_cfg_param_cmd *req;
973         int i, ret;
974
975         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
976                 u32 offset = 0;
977
978                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
979                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
980                                            true);
981                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
982                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
983                 /* Len should be united by 4 bytes when send to hardware */
984                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
985                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
986                 req->offset = cpu_to_le32(offset);
987         }
988
989         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
990         if (ret) {
991                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
992                 return ret;
993         }
994
995         hclge_parse_cfg(hcfg, desc);
996
997         return 0;
998 }
999
1000 static int hclge_get_cap(struct hclge_dev *hdev)
1001 {
1002         int ret;
1003
1004         ret = hclge_query_function_status(hdev);
1005         if (ret) {
1006                 dev_err(&hdev->pdev->dev,
1007                         "query function status error %d.\n", ret);
1008                 return ret;
1009         }
1010
1011         /* get pf resource */
1012         ret = hclge_query_pf_resource(hdev);
1013         if (ret)
1014                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1015
1016         return ret;
1017 }
1018
1019 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1020 {
1021 #define HCLGE_MIN_TX_DESC       64
1022 #define HCLGE_MIN_RX_DESC       64
1023
1024         if (!is_kdump_kernel())
1025                 return;
1026
1027         dev_info(&hdev->pdev->dev,
1028                  "Running kdump kernel. Using minimal resources\n");
1029
1030         /* minimal queue pairs equals to the number of vports */
1031         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1032         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1033         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1034 }
1035
1036 static int hclge_configure(struct hclge_dev *hdev)
1037 {
1038         struct hclge_cfg cfg;
1039         int ret, i;
1040
1041         ret = hclge_get_cfg(hdev, &cfg);
1042         if (ret) {
1043                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1044                 return ret;
1045         }
1046
1047         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1048         hdev->base_tqp_pid = 0;
1049         hdev->rss_size_max = cfg.rss_size_max;
1050         hdev->rx_buf_len = cfg.rx_buf_len;
1051         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1052         hdev->hw.mac.media_type = cfg.media_type;
1053         hdev->hw.mac.phy_addr = cfg.phy_addr;
1054         hdev->num_tx_desc = cfg.tqp_desc_num;
1055         hdev->num_rx_desc = cfg.tqp_desc_num;
1056         hdev->tm_info.num_pg = 1;
1057         hdev->tc_max = cfg.tc_num;
1058         hdev->tm_info.hw_pfc_map = 0;
1059         hdev->wanted_umv_size = cfg.umv_space;
1060
1061         if (hnae3_dev_fd_supported(hdev))
1062                 hdev->fd_en = true;
1063
1064         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1065         if (ret) {
1066                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1067                 return ret;
1068         }
1069
1070         hclge_parse_link_mode(hdev, cfg.speed_ability);
1071
1072         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1073             (hdev->tc_max < 1)) {
1074                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1075                          hdev->tc_max);
1076                 hdev->tc_max = 1;
1077         }
1078
1079         /* Dev does not support DCB */
1080         if (!hnae3_dev_dcb_supported(hdev)) {
1081                 hdev->tc_max = 1;
1082                 hdev->pfc_max = 0;
1083         } else {
1084                 hdev->pfc_max = hdev->tc_max;
1085         }
1086
1087         hdev->tm_info.num_tc = 1;
1088
1089         /* Currently not support uncontiuous tc */
1090         for (i = 0; i < hdev->tm_info.num_tc; i++)
1091                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1092
1093         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1094
1095         hclge_init_kdump_kernel_config(hdev);
1096
1097         return ret;
1098 }
1099
1100 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1101                             int tso_mss_max)
1102 {
1103         struct hclge_cfg_tso_status_cmd *req;
1104         struct hclge_desc desc;
1105         u16 tso_mss;
1106
1107         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1108
1109         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1110
1111         tso_mss = 0;
1112         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1113                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1114         req->tso_mss_min = cpu_to_le16(tso_mss);
1115
1116         tso_mss = 0;
1117         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1118                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1119         req->tso_mss_max = cpu_to_le16(tso_mss);
1120
1121         return hclge_cmd_send(&hdev->hw, &desc, 1);
1122 }
1123
1124 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1125 {
1126         struct hclge_cfg_gro_status_cmd *req;
1127         struct hclge_desc desc;
1128         int ret;
1129
1130         if (!hnae3_dev_gro_supported(hdev))
1131                 return 0;
1132
1133         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1134         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1135
1136         req->gro_en = cpu_to_le16(en ? 1 : 0);
1137
1138         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1139         if (ret)
1140                 dev_err(&hdev->pdev->dev,
1141                         "GRO hardware config cmd failed, ret = %d\n", ret);
1142
1143         return ret;
1144 }
1145
1146 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1147 {
1148         struct hclge_tqp *tqp;
1149         int i;
1150
1151         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1152                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1153         if (!hdev->htqp)
1154                 return -ENOMEM;
1155
1156         tqp = hdev->htqp;
1157
1158         for (i = 0; i < hdev->num_tqps; i++) {
1159                 tqp->dev = &hdev->pdev->dev;
1160                 tqp->index = i;
1161
1162                 tqp->q.ae_algo = &ae_algo;
1163                 tqp->q.buf_size = hdev->rx_buf_len;
1164                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1165                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1166                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1167                         i * HCLGE_TQP_REG_SIZE;
1168
1169                 tqp++;
1170         }
1171
1172         return 0;
1173 }
1174
1175 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1176                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1177 {
1178         struct hclge_tqp_map_cmd *req;
1179         struct hclge_desc desc;
1180         int ret;
1181
1182         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1183
1184         req = (struct hclge_tqp_map_cmd *)desc.data;
1185         req->tqp_id = cpu_to_le16(tqp_pid);
1186         req->tqp_vf = func_id;
1187         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1188                         1 << HCLGE_TQP_MAP_EN_B;
1189         req->tqp_vid = cpu_to_le16(tqp_vid);
1190
1191         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1192         if (ret)
1193                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1194
1195         return ret;
1196 }
1197
1198 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1199 {
1200         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1201         struct hclge_dev *hdev = vport->back;
1202         int i, alloced;
1203
1204         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1205              alloced < num_tqps; i++) {
1206                 if (!hdev->htqp[i].alloced) {
1207                         hdev->htqp[i].q.handle = &vport->nic;
1208                         hdev->htqp[i].q.tqp_index = alloced;
1209                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1210                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1211                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1212                         hdev->htqp[i].alloced = true;
1213                         alloced++;
1214                 }
1215         }
1216         vport->alloc_tqps = alloced;
1217         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1218                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1219
1220         return 0;
1221 }
1222
1223 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1224                             u16 num_tx_desc, u16 num_rx_desc)
1225
1226 {
1227         struct hnae3_handle *nic = &vport->nic;
1228         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1229         struct hclge_dev *hdev = vport->back;
1230         int ret;
1231
1232         kinfo->num_tx_desc = num_tx_desc;
1233         kinfo->num_rx_desc = num_rx_desc;
1234
1235         kinfo->rx_buf_len = hdev->rx_buf_len;
1236
1237         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1238                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1239         if (!kinfo->tqp)
1240                 return -ENOMEM;
1241
1242         ret = hclge_assign_tqp(vport, num_tqps);
1243         if (ret)
1244                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1245
1246         return ret;
1247 }
1248
1249 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1250                                   struct hclge_vport *vport)
1251 {
1252         struct hnae3_handle *nic = &vport->nic;
1253         struct hnae3_knic_private_info *kinfo;
1254         u16 i;
1255
1256         kinfo = &nic->kinfo;
1257         for (i = 0; i < vport->alloc_tqps; i++) {
1258                 struct hclge_tqp *q =
1259                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1260                 bool is_pf;
1261                 int ret;
1262
1263                 is_pf = !(vport->vport_id);
1264                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1265                                              i, is_pf);
1266                 if (ret)
1267                         return ret;
1268         }
1269
1270         return 0;
1271 }
1272
1273 static int hclge_map_tqp(struct hclge_dev *hdev)
1274 {
1275         struct hclge_vport *vport = hdev->vport;
1276         u16 i, num_vport;
1277
1278         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1279         for (i = 0; i < num_vport; i++) {
1280                 int ret;
1281
1282                 ret = hclge_map_tqp_to_vport(hdev, vport);
1283                 if (ret)
1284                         return ret;
1285
1286                 vport++;
1287         }
1288
1289         return 0;
1290 }
1291
1292 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1293 {
1294         /* this would be initialized later */
1295 }
1296
1297 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1298 {
1299         struct hnae3_handle *nic = &vport->nic;
1300         struct hclge_dev *hdev = vport->back;
1301         int ret;
1302
1303         nic->pdev = hdev->pdev;
1304         nic->ae_algo = &ae_algo;
1305         nic->numa_node_mask = hdev->numa_node_mask;
1306
1307         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1308                 ret = hclge_knic_setup(vport, num_tqps,
1309                                        hdev->num_tx_desc, hdev->num_rx_desc);
1310
1311                 if (ret) {
1312                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1313                                 ret);
1314                         return ret;
1315                 }
1316         } else {
1317                 hclge_unic_setup(vport, num_tqps);
1318         }
1319
1320         return 0;
1321 }
1322
1323 static int hclge_alloc_vport(struct hclge_dev *hdev)
1324 {
1325         struct pci_dev *pdev = hdev->pdev;
1326         struct hclge_vport *vport;
1327         u32 tqp_main_vport;
1328         u32 tqp_per_vport;
1329         int num_vport, i;
1330         int ret;
1331
1332         /* We need to alloc a vport for main NIC of PF */
1333         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1334
1335         if (hdev->num_tqps < num_vport) {
1336                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1337                         hdev->num_tqps, num_vport);
1338                 return -EINVAL;
1339         }
1340
1341         /* Alloc the same number of TQPs for every vport */
1342         tqp_per_vport = hdev->num_tqps / num_vport;
1343         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1344
1345         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1346                              GFP_KERNEL);
1347         if (!vport)
1348                 return -ENOMEM;
1349
1350         hdev->vport = vport;
1351         hdev->num_alloc_vport = num_vport;
1352
1353         if (IS_ENABLED(CONFIG_PCI_IOV))
1354                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1355
1356         for (i = 0; i < num_vport; i++) {
1357                 vport->back = hdev;
1358                 vport->vport_id = i;
1359                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1360                 INIT_LIST_HEAD(&vport->vlan_list);
1361                 INIT_LIST_HEAD(&vport->uc_mac_list);
1362                 INIT_LIST_HEAD(&vport->mc_mac_list);
1363
1364                 if (i == 0)
1365                         ret = hclge_vport_setup(vport, tqp_main_vport);
1366                 else
1367                         ret = hclge_vport_setup(vport, tqp_per_vport);
1368                 if (ret) {
1369                         dev_err(&pdev->dev,
1370                                 "vport setup failed for vport %d, %d\n",
1371                                 i, ret);
1372                         return ret;
1373                 }
1374
1375                 vport++;
1376         }
1377
1378         return 0;
1379 }
1380
1381 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1382                                     struct hclge_pkt_buf_alloc *buf_alloc)
1383 {
1384 /* TX buffer size is unit by 128 byte */
1385 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1386 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1387         struct hclge_tx_buff_alloc_cmd *req;
1388         struct hclge_desc desc;
1389         int ret;
1390         u8 i;
1391
1392         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1393
1394         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1395         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1396                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1397
1398                 req->tx_pkt_buff[i] =
1399                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1400                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1401         }
1402
1403         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1404         if (ret)
1405                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1406                         ret);
1407
1408         return ret;
1409 }
1410
1411 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1412                                  struct hclge_pkt_buf_alloc *buf_alloc)
1413 {
1414         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1415
1416         if (ret)
1417                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1418
1419         return ret;
1420 }
1421
1422 static int hclge_get_tc_num(struct hclge_dev *hdev)
1423 {
1424         int i, cnt = 0;
1425
1426         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1427                 if (hdev->hw_tc_map & BIT(i))
1428                         cnt++;
1429         return cnt;
1430 }
1431
1432 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1433 {
1434         int i, cnt = 0;
1435
1436         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1437                 if (hdev->hw_tc_map & BIT(i) &&
1438                     hdev->tm_info.hw_pfc_map & BIT(i))
1439                         cnt++;
1440         return cnt;
1441 }
1442
1443 /* Get the number of pfc enabled TCs, which have private buffer */
1444 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1445                                   struct hclge_pkt_buf_alloc *buf_alloc)
1446 {
1447         struct hclge_priv_buf *priv;
1448         int i, cnt = 0;
1449
1450         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1451                 priv = &buf_alloc->priv_buf[i];
1452                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1453                     priv->enable)
1454                         cnt++;
1455         }
1456
1457         return cnt;
1458 }
1459
1460 /* Get the number of pfc disabled TCs, which have private buffer */
1461 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1462                                      struct hclge_pkt_buf_alloc *buf_alloc)
1463 {
1464         struct hclge_priv_buf *priv;
1465         int i, cnt = 0;
1466
1467         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1468                 priv = &buf_alloc->priv_buf[i];
1469                 if (hdev->hw_tc_map & BIT(i) &&
1470                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1471                     priv->enable)
1472                         cnt++;
1473         }
1474
1475         return cnt;
1476 }
1477
1478 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1479 {
1480         struct hclge_priv_buf *priv;
1481         u32 rx_priv = 0;
1482         int i;
1483
1484         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1485                 priv = &buf_alloc->priv_buf[i];
1486                 if (priv->enable)
1487                         rx_priv += priv->buf_size;
1488         }
1489         return rx_priv;
1490 }
1491
1492 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1493 {
1494         u32 i, total_tx_size = 0;
1495
1496         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1497                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1498
1499         return total_tx_size;
1500 }
1501
1502 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1503                                 struct hclge_pkt_buf_alloc *buf_alloc,
1504                                 u32 rx_all)
1505 {
1506         u32 shared_buf_min, shared_buf_tc, shared_std;
1507         int tc_num, pfc_enable_num;
1508         u32 shared_buf, aligned_mps;
1509         u32 rx_priv;
1510         int i;
1511
1512         tc_num = hclge_get_tc_num(hdev);
1513         pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1514         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1515
1516         if (hnae3_dev_dcb_supported(hdev))
1517                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1518         else
1519                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1520                                         + hdev->dv_buf_size;
1521
1522         shared_buf_tc = pfc_enable_num * aligned_mps +
1523                         (tc_num - pfc_enable_num) * aligned_mps / 2 +
1524                         aligned_mps;
1525         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1526                              HCLGE_BUF_SIZE_UNIT);
1527
1528         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1529         if (rx_all < rx_priv + shared_std)
1530                 return false;
1531
1532         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1533         buf_alloc->s_buf.buf_size = shared_buf;
1534         if (hnae3_dev_dcb_supported(hdev)) {
1535                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1536                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1537                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1538         } else {
1539                 buf_alloc->s_buf.self.high = aligned_mps +
1540                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1541                 buf_alloc->s_buf.self.low =
1542                         roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1543         }
1544
1545         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1546                 if ((hdev->hw_tc_map & BIT(i)) &&
1547                     (hdev->tm_info.hw_pfc_map & BIT(i))) {
1548                         buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1549                         buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1550                 } else {
1551                         buf_alloc->s_buf.tc_thrd[i].low = 0;
1552                         buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1553                 }
1554         }
1555
1556         return true;
1557 }
1558
1559 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1560                                 struct hclge_pkt_buf_alloc *buf_alloc)
1561 {
1562         u32 i, total_size;
1563
1564         total_size = hdev->pkt_buf_size;
1565
1566         /* alloc tx buffer for all enabled tc */
1567         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1568                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1569
1570                 if (hdev->hw_tc_map & BIT(i)) {
1571                         if (total_size < hdev->tx_buf_size)
1572                                 return -ENOMEM;
1573
1574                         priv->tx_buf_size = hdev->tx_buf_size;
1575                 } else {
1576                         priv->tx_buf_size = 0;
1577                 }
1578
1579                 total_size -= priv->tx_buf_size;
1580         }
1581
1582         return 0;
1583 }
1584
1585 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1586                                   struct hclge_pkt_buf_alloc *buf_alloc)
1587 {
1588         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1589         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1590         int i;
1591
1592         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1593                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1594
1595                 priv->enable = 0;
1596                 priv->wl.low = 0;
1597                 priv->wl.high = 0;
1598                 priv->buf_size = 0;
1599
1600                 if (!(hdev->hw_tc_map & BIT(i)))
1601                         continue;
1602
1603                 priv->enable = 1;
1604
1605                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1606                         priv->wl.low = max ? aligned_mps : 256;
1607                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1608                                                 HCLGE_BUF_SIZE_UNIT);
1609                 } else {
1610                         priv->wl.low = 0;
1611                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1612                 }
1613
1614                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1615         }
1616
1617         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1618 }
1619
1620 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1621                                           struct hclge_pkt_buf_alloc *buf_alloc)
1622 {
1623         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1624         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1625         int i;
1626
1627         /* let the last to be cleared first */
1628         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1629                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1630
1631                 if (hdev->hw_tc_map & BIT(i) &&
1632                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1633                         /* Clear the no pfc TC private buffer */
1634                         priv->wl.low = 0;
1635                         priv->wl.high = 0;
1636                         priv->buf_size = 0;
1637                         priv->enable = 0;
1638                         no_pfc_priv_num--;
1639                 }
1640
1641                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1642                     no_pfc_priv_num == 0)
1643                         break;
1644         }
1645
1646         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1647 }
1648
1649 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1650                                         struct hclge_pkt_buf_alloc *buf_alloc)
1651 {
1652         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1653         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1654         int i;
1655
1656         /* let the last to be cleared first */
1657         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1658                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1659
1660                 if (hdev->hw_tc_map & BIT(i) &&
1661                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1662                         /* Reduce the number of pfc TC with private buffer */
1663                         priv->wl.low = 0;
1664                         priv->enable = 0;
1665                         priv->wl.high = 0;
1666                         priv->buf_size = 0;
1667                         pfc_priv_num--;
1668                 }
1669
1670                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1671                     pfc_priv_num == 0)
1672                         break;
1673         }
1674
1675         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1676 }
1677
1678 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1679  * @hdev: pointer to struct hclge_dev
1680  * @buf_alloc: pointer to buffer calculation data
1681  * @return: 0: calculate sucessful, negative: fail
1682  */
1683 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1684                                 struct hclge_pkt_buf_alloc *buf_alloc)
1685 {
1686         /* When DCB is not supported, rx private buffer is not allocated. */
1687         if (!hnae3_dev_dcb_supported(hdev)) {
1688                 u32 rx_all = hdev->pkt_buf_size;
1689
1690                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1691                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1692                         return -ENOMEM;
1693
1694                 return 0;
1695         }
1696
1697         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1698                 return 0;
1699
1700         /* try to decrease the buffer size */
1701         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1702                 return 0;
1703
1704         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1705                 return 0;
1706
1707         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1708                 return 0;
1709
1710         return -ENOMEM;
1711 }
1712
1713 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1714                                    struct hclge_pkt_buf_alloc *buf_alloc)
1715 {
1716         struct hclge_rx_priv_buff_cmd *req;
1717         struct hclge_desc desc;
1718         int ret;
1719         int i;
1720
1721         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1722         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1723
1724         /* Alloc private buffer TCs */
1725         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1727
1728                 req->buf_num[i] =
1729                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1730                 req->buf_num[i] |=
1731                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1732         }
1733
1734         req->shared_buf =
1735                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1736                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1737
1738         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1739         if (ret)
1740                 dev_err(&hdev->pdev->dev,
1741                         "rx private buffer alloc cmd failed %d\n", ret);
1742
1743         return ret;
1744 }
1745
1746 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1747                                    struct hclge_pkt_buf_alloc *buf_alloc)
1748 {
1749         struct hclge_rx_priv_wl_buf *req;
1750         struct hclge_priv_buf *priv;
1751         struct hclge_desc desc[2];
1752         int i, j;
1753         int ret;
1754
1755         for (i = 0; i < 2; i++) {
1756                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1757                                            false);
1758                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1759
1760                 /* The first descriptor set the NEXT bit to 1 */
1761                 if (i == 0)
1762                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1763                 else
1764                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1765
1766                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1767                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1768
1769                         priv = &buf_alloc->priv_buf[idx];
1770                         req->tc_wl[j].high =
1771                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1772                         req->tc_wl[j].high |=
1773                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1774                         req->tc_wl[j].low =
1775                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1776                         req->tc_wl[j].low |=
1777                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1778                 }
1779         }
1780
1781         /* Send 2 descriptor at one time */
1782         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1783         if (ret)
1784                 dev_err(&hdev->pdev->dev,
1785                         "rx private waterline config cmd failed %d\n",
1786                         ret);
1787         return ret;
1788 }
1789
1790 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1791                                     struct hclge_pkt_buf_alloc *buf_alloc)
1792 {
1793         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1794         struct hclge_rx_com_thrd *req;
1795         struct hclge_desc desc[2];
1796         struct hclge_tc_thrd *tc;
1797         int i, j;
1798         int ret;
1799
1800         for (i = 0; i < 2; i++) {
1801                 hclge_cmd_setup_basic_desc(&desc[i],
1802                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1803                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1804
1805                 /* The first descriptor set the NEXT bit to 1 */
1806                 if (i == 0)
1807                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1808                 else
1809                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1810
1811                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1812                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1813
1814                         req->com_thrd[j].high =
1815                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1816                         req->com_thrd[j].high |=
1817                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1818                         req->com_thrd[j].low =
1819                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1820                         req->com_thrd[j].low |=
1821                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1822                 }
1823         }
1824
1825         /* Send 2 descriptors at one time */
1826         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1827         if (ret)
1828                 dev_err(&hdev->pdev->dev,
1829                         "common threshold config cmd failed %d\n", ret);
1830         return ret;
1831 }
1832
1833 static int hclge_common_wl_config(struct hclge_dev *hdev,
1834                                   struct hclge_pkt_buf_alloc *buf_alloc)
1835 {
1836         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1837         struct hclge_rx_com_wl *req;
1838         struct hclge_desc desc;
1839         int ret;
1840
1841         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1842
1843         req = (struct hclge_rx_com_wl *)desc.data;
1844         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1845         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1846
1847         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1848         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1849
1850         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1851         if (ret)
1852                 dev_err(&hdev->pdev->dev,
1853                         "common waterline config cmd failed %d\n", ret);
1854
1855         return ret;
1856 }
1857
1858 int hclge_buffer_alloc(struct hclge_dev *hdev)
1859 {
1860         struct hclge_pkt_buf_alloc *pkt_buf;
1861         int ret;
1862
1863         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1864         if (!pkt_buf)
1865                 return -ENOMEM;
1866
1867         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1868         if (ret) {
1869                 dev_err(&hdev->pdev->dev,
1870                         "could not calc tx buffer size for all TCs %d\n", ret);
1871                 goto out;
1872         }
1873
1874         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1875         if (ret) {
1876                 dev_err(&hdev->pdev->dev,
1877                         "could not alloc tx buffers %d\n", ret);
1878                 goto out;
1879         }
1880
1881         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1882         if (ret) {
1883                 dev_err(&hdev->pdev->dev,
1884                         "could not calc rx priv buffer size for all TCs %d\n",
1885                         ret);
1886                 goto out;
1887         }
1888
1889         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1890         if (ret) {
1891                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1892                         ret);
1893                 goto out;
1894         }
1895
1896         if (hnae3_dev_dcb_supported(hdev)) {
1897                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1898                 if (ret) {
1899                         dev_err(&hdev->pdev->dev,
1900                                 "could not configure rx private waterline %d\n",
1901                                 ret);
1902                         goto out;
1903                 }
1904
1905                 ret = hclge_common_thrd_config(hdev, pkt_buf);
1906                 if (ret) {
1907                         dev_err(&hdev->pdev->dev,
1908                                 "could not configure common threshold %d\n",
1909                                 ret);
1910                         goto out;
1911                 }
1912         }
1913
1914         ret = hclge_common_wl_config(hdev, pkt_buf);
1915         if (ret)
1916                 dev_err(&hdev->pdev->dev,
1917                         "could not configure common waterline %d\n", ret);
1918
1919 out:
1920         kfree(pkt_buf);
1921         return ret;
1922 }
1923
1924 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1925 {
1926         struct hnae3_handle *roce = &vport->roce;
1927         struct hnae3_handle *nic = &vport->nic;
1928
1929         roce->rinfo.num_vectors = vport->back->num_roce_msi;
1930
1931         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1932             vport->back->num_msi_left == 0)
1933                 return -EINVAL;
1934
1935         roce->rinfo.base_vector = vport->back->roce_base_vector;
1936
1937         roce->rinfo.netdev = nic->kinfo.netdev;
1938         roce->rinfo.roce_io_base = vport->back->hw.io_base;
1939
1940         roce->pdev = nic->pdev;
1941         roce->ae_algo = nic->ae_algo;
1942         roce->numa_node_mask = nic->numa_node_mask;
1943
1944         return 0;
1945 }
1946
1947 static int hclge_init_msi(struct hclge_dev *hdev)
1948 {
1949         struct pci_dev *pdev = hdev->pdev;
1950         int vectors;
1951         int i;
1952
1953         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1954                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
1955         if (vectors < 0) {
1956                 dev_err(&pdev->dev,
1957                         "failed(%d) to allocate MSI/MSI-X vectors\n",
1958                         vectors);
1959                 return vectors;
1960         }
1961         if (vectors < hdev->num_msi)
1962                 dev_warn(&hdev->pdev->dev,
1963                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1964                          hdev->num_msi, vectors);
1965
1966         hdev->num_msi = vectors;
1967         hdev->num_msi_left = vectors;
1968         hdev->base_msi_vector = pdev->irq;
1969         hdev->roce_base_vector = hdev->base_msi_vector +
1970                                 hdev->roce_base_msix_offset;
1971
1972         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1973                                            sizeof(u16), GFP_KERNEL);
1974         if (!hdev->vector_status) {
1975                 pci_free_irq_vectors(pdev);
1976                 return -ENOMEM;
1977         }
1978
1979         for (i = 0; i < hdev->num_msi; i++)
1980                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1981
1982         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1983                                         sizeof(int), GFP_KERNEL);
1984         if (!hdev->vector_irq) {
1985                 pci_free_irq_vectors(pdev);
1986                 return -ENOMEM;
1987         }
1988
1989         return 0;
1990 }
1991
1992 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1993 {
1994
1995         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1996                 duplex = HCLGE_MAC_FULL;
1997
1998         return duplex;
1999 }
2000
2001 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2002                                       u8 duplex)
2003 {
2004         struct hclge_config_mac_speed_dup_cmd *req;
2005         struct hclge_desc desc;
2006         int ret;
2007
2008         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2009
2010         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2011
2012         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2013
2014         switch (speed) {
2015         case HCLGE_MAC_SPEED_10M:
2016                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2017                                 HCLGE_CFG_SPEED_S, 6);
2018                 break;
2019         case HCLGE_MAC_SPEED_100M:
2020                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2021                                 HCLGE_CFG_SPEED_S, 7);
2022                 break;
2023         case HCLGE_MAC_SPEED_1G:
2024                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2025                                 HCLGE_CFG_SPEED_S, 0);
2026                 break;
2027         case HCLGE_MAC_SPEED_10G:
2028                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2029                                 HCLGE_CFG_SPEED_S, 1);
2030                 break;
2031         case HCLGE_MAC_SPEED_25G:
2032                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2033                                 HCLGE_CFG_SPEED_S, 2);
2034                 break;
2035         case HCLGE_MAC_SPEED_40G:
2036                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2037                                 HCLGE_CFG_SPEED_S, 3);
2038                 break;
2039         case HCLGE_MAC_SPEED_50G:
2040                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2041                                 HCLGE_CFG_SPEED_S, 4);
2042                 break;
2043         case HCLGE_MAC_SPEED_100G:
2044                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2045                                 HCLGE_CFG_SPEED_S, 5);
2046                 break;
2047         default:
2048                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2049                 return -EINVAL;
2050         }
2051
2052         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2053                       1);
2054
2055         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2056         if (ret) {
2057                 dev_err(&hdev->pdev->dev,
2058                         "mac speed/duplex config cmd failed %d.\n", ret);
2059                 return ret;
2060         }
2061
2062         return 0;
2063 }
2064
2065 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2066 {
2067         int ret;
2068
2069         duplex = hclge_check_speed_dup(duplex, speed);
2070         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2071                 return 0;
2072
2073         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2074         if (ret)
2075                 return ret;
2076
2077         hdev->hw.mac.speed = speed;
2078         hdev->hw.mac.duplex = duplex;
2079
2080         return 0;
2081 }
2082
2083 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2084                                      u8 duplex)
2085 {
2086         struct hclge_vport *vport = hclge_get_vport(handle);
2087         struct hclge_dev *hdev = vport->back;
2088
2089         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2090 }
2091
2092 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2093 {
2094         struct hclge_config_auto_neg_cmd *req;
2095         struct hclge_desc desc;
2096         u32 flag = 0;
2097         int ret;
2098
2099         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2100
2101         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2102         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2103         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2104
2105         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2106         if (ret)
2107                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2108                         ret);
2109
2110         return ret;
2111 }
2112
2113 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2114 {
2115         struct hclge_vport *vport = hclge_get_vport(handle);
2116         struct hclge_dev *hdev = vport->back;
2117
2118         return hclge_set_autoneg_en(hdev, enable);
2119 }
2120
2121 static int hclge_get_autoneg(struct hnae3_handle *handle)
2122 {
2123         struct hclge_vport *vport = hclge_get_vport(handle);
2124         struct hclge_dev *hdev = vport->back;
2125         struct phy_device *phydev = hdev->hw.mac.phydev;
2126
2127         if (phydev)
2128                 return phydev->autoneg;
2129
2130         return hdev->hw.mac.autoneg;
2131 }
2132
2133 static int hclge_mac_init(struct hclge_dev *hdev)
2134 {
2135         struct hclge_mac *mac = &hdev->hw.mac;
2136         int ret;
2137
2138         hdev->support_sfp_query = true;
2139         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2140         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2141                                          hdev->hw.mac.duplex);
2142         if (ret) {
2143                 dev_err(&hdev->pdev->dev,
2144                         "Config mac speed dup fail ret=%d\n", ret);
2145                 return ret;
2146         }
2147
2148         mac->link = 0;
2149
2150         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2151         if (ret) {
2152                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2153                 return ret;
2154         }
2155
2156         ret = hclge_buffer_alloc(hdev);
2157         if (ret)
2158                 dev_err(&hdev->pdev->dev,
2159                         "allocate buffer fail, ret=%d\n", ret);
2160
2161         return ret;
2162 }
2163
2164 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2165 {
2166         if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2167                 schedule_work(&hdev->mbx_service_task);
2168 }
2169
2170 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2171 {
2172         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2173                 schedule_work(&hdev->rst_service_task);
2174 }
2175
2176 static void hclge_task_schedule(struct hclge_dev *hdev)
2177 {
2178         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2179             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2180             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2181                 (void)schedule_work(&hdev->service_task);
2182 }
2183
2184 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2185 {
2186         struct hclge_link_status_cmd *req;
2187         struct hclge_desc desc;
2188         int link_status;
2189         int ret;
2190
2191         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2192         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2193         if (ret) {
2194                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2195                         ret);
2196                 return ret;
2197         }
2198
2199         req = (struct hclge_link_status_cmd *)desc.data;
2200         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2201
2202         return !!link_status;
2203 }
2204
2205 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2206 {
2207         int mac_state;
2208         int link_stat;
2209
2210         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2211                 return 0;
2212
2213         mac_state = hclge_get_mac_link_status(hdev);
2214
2215         if (hdev->hw.mac.phydev) {
2216                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2217                         link_stat = mac_state &
2218                                 hdev->hw.mac.phydev->link;
2219                 else
2220                         link_stat = 0;
2221
2222         } else {
2223                 link_stat = mac_state;
2224         }
2225
2226         return !!link_stat;
2227 }
2228
2229 static void hclge_update_link_status(struct hclge_dev *hdev)
2230 {
2231         struct hnae3_client *rclient = hdev->roce_client;
2232         struct hnae3_client *client = hdev->nic_client;
2233         struct hnae3_handle *rhandle;
2234         struct hnae3_handle *handle;
2235         int state;
2236         int i;
2237
2238         if (!client)
2239                 return;
2240         state = hclge_get_mac_phy_link(hdev);
2241         if (state != hdev->hw.mac.link) {
2242                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2243                         handle = &hdev->vport[i].nic;
2244                         client->ops->link_status_change(handle, state);
2245                         rhandle = &hdev->vport[i].roce;
2246                         if (rclient && rclient->ops->link_status_change)
2247                                 rclient->ops->link_status_change(rhandle,
2248                                                                  state);
2249                 }
2250                 hdev->hw.mac.link = state;
2251         }
2252 }
2253
2254 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2255 {
2256         struct hclge_sfp_speed_cmd *resp = NULL;
2257         struct hclge_desc desc;
2258         int ret;
2259
2260         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2261         resp = (struct hclge_sfp_speed_cmd *)desc.data;
2262         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2263         if (ret == -EOPNOTSUPP) {
2264                 dev_warn(&hdev->pdev->dev,
2265                          "IMP do not support get SFP speed %d\n", ret);
2266                 return ret;
2267         } else if (ret) {
2268                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2269                 return ret;
2270         }
2271
2272         *speed = resp->sfp_speed;
2273
2274         return 0;
2275 }
2276
2277 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2278 {
2279         struct hclge_mac mac = hdev->hw.mac;
2280         int speed;
2281         int ret;
2282
2283         /* get the speed from SFP cmd when phy
2284          * doesn't exit.
2285          */
2286         if (mac.phydev)
2287                 return 0;
2288
2289         /* if IMP does not support get SFP/qSFP speed, return directly */
2290         if (!hdev->support_sfp_query)
2291                 return 0;
2292
2293         ret = hclge_get_sfp_speed(hdev, &speed);
2294         if (ret == -EOPNOTSUPP) {
2295                 hdev->support_sfp_query = false;
2296                 return ret;
2297         } else if (ret) {
2298                 return ret;
2299         }
2300
2301         if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2302                 return 0; /* do nothing if no SFP */
2303
2304         /* must config full duplex for SFP */
2305         return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2306 }
2307
2308 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2309 {
2310         struct hclge_vport *vport = hclge_get_vport(handle);
2311         struct hclge_dev *hdev = vport->back;
2312
2313         return hclge_update_speed_duplex(hdev);
2314 }
2315
2316 static int hclge_get_status(struct hnae3_handle *handle)
2317 {
2318         struct hclge_vport *vport = hclge_get_vport(handle);
2319         struct hclge_dev *hdev = vport->back;
2320
2321         hclge_update_link_status(hdev);
2322
2323         return hdev->hw.mac.link;
2324 }
2325
2326 static void hclge_service_timer(struct timer_list *t)
2327 {
2328         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2329
2330         mod_timer(&hdev->service_timer, jiffies + HZ);
2331         hdev->hw_stats.stats_timer++;
2332         hclge_task_schedule(hdev);
2333 }
2334
2335 static void hclge_service_complete(struct hclge_dev *hdev)
2336 {
2337         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2338
2339         /* Flush memory before next watchdog */
2340         smp_mb__before_atomic();
2341         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2342 }
2343
2344 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2345 {
2346         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2347
2348         /* fetch the events from their corresponding regs */
2349         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2350         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2351         msix_src_reg = hclge_read_dev(&hdev->hw,
2352                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2353
2354         /* Assumption: If by any chance reset and mailbox events are reported
2355          * together then we will only process reset event in this go and will
2356          * defer the processing of the mailbox events. Since, we would have not
2357          * cleared RX CMDQ event this time we would receive again another
2358          * interrupt from H/W just for the mailbox.
2359          */
2360
2361         /* check for vector0 reset event sources */
2362         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2363                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2364                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2365                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2366                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2367                 return HCLGE_VECTOR0_EVENT_RST;
2368         }
2369
2370         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2371                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2372                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2373                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2374                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2375                 return HCLGE_VECTOR0_EVENT_RST;
2376         }
2377
2378         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2379                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2380                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2381                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2382                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2383                 return HCLGE_VECTOR0_EVENT_RST;
2384         }
2385
2386         /* check for vector0 msix event source */
2387         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2388                 return HCLGE_VECTOR0_EVENT_ERR;
2389
2390         /* check for vector0 mailbox(=CMDQ RX) event source */
2391         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2392                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2393                 *clearval = cmdq_src_reg;
2394                 return HCLGE_VECTOR0_EVENT_MBX;
2395         }
2396
2397         return HCLGE_VECTOR0_EVENT_OTHER;
2398 }
2399
2400 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2401                                     u32 regclr)
2402 {
2403         switch (event_type) {
2404         case HCLGE_VECTOR0_EVENT_RST:
2405                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2406                 break;
2407         case HCLGE_VECTOR0_EVENT_MBX:
2408                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2409                 break;
2410         default:
2411                 break;
2412         }
2413 }
2414
2415 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2416 {
2417         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2418                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2419                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2420                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2421         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2422 }
2423
2424 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2425 {
2426         writel(enable ? 1 : 0, vector->addr);
2427 }
2428
2429 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2430 {
2431         struct hclge_dev *hdev = data;
2432         u32 event_cause;
2433         u32 clearval;
2434
2435         hclge_enable_vector(&hdev->misc_vector, false);
2436         event_cause = hclge_check_event_cause(hdev, &clearval);
2437
2438         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2439         switch (event_cause) {
2440         case HCLGE_VECTOR0_EVENT_ERR:
2441                 /* we do not know what type of reset is required now. This could
2442                  * only be decided after we fetch the type of errors which
2443                  * caused this event. Therefore, we will do below for now:
2444                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2445                  *    have defered type of reset to be used.
2446                  * 2. Schedule the reset serivce task.
2447                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2448                  *    will fetch the correct type of reset.  This would be done
2449                  *    by first decoding the types of errors.
2450                  */
2451                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2452                 /* fall through */
2453         case HCLGE_VECTOR0_EVENT_RST:
2454                 hclge_reset_task_schedule(hdev);
2455                 break;
2456         case HCLGE_VECTOR0_EVENT_MBX:
2457                 /* If we are here then,
2458                  * 1. Either we are not handling any mbx task and we are not
2459                  *    scheduled as well
2460                  *                        OR
2461                  * 2. We could be handling a mbx task but nothing more is
2462                  *    scheduled.
2463                  * In both cases, we should schedule mbx task as there are more
2464                  * mbx messages reported by this interrupt.
2465                  */
2466                 hclge_mbx_task_schedule(hdev);
2467                 break;
2468         default:
2469                 dev_warn(&hdev->pdev->dev,
2470                          "received unknown or unhandled event of vector0\n");
2471                 break;
2472         }
2473
2474         /* clear the source of interrupt if it is not cause by reset */
2475         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2476                 hclge_clear_event_cause(hdev, event_cause, clearval);
2477                 hclge_enable_vector(&hdev->misc_vector, true);
2478         }
2479
2480         return IRQ_HANDLED;
2481 }
2482
2483 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2484 {
2485         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2486                 dev_warn(&hdev->pdev->dev,
2487                          "vector(vector_id %d) has been freed.\n", vector_id);
2488                 return;
2489         }
2490
2491         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2492         hdev->num_msi_left += 1;
2493         hdev->num_msi_used -= 1;
2494 }
2495
2496 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2497 {
2498         struct hclge_misc_vector *vector = &hdev->misc_vector;
2499
2500         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2501
2502         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2503         hdev->vector_status[0] = 0;
2504
2505         hdev->num_msi_left -= 1;
2506         hdev->num_msi_used += 1;
2507 }
2508
2509 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2510 {
2511         int ret;
2512
2513         hclge_get_misc_vector(hdev);
2514
2515         /* this would be explicitly freed in the end */
2516         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2517                           0, "hclge_misc", hdev);
2518         if (ret) {
2519                 hclge_free_vector(hdev, 0);
2520                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2521                         hdev->misc_vector.vector_irq);
2522         }
2523
2524         return ret;
2525 }
2526
2527 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2528 {
2529         free_irq(hdev->misc_vector.vector_irq, hdev);
2530         hclge_free_vector(hdev, 0);
2531 }
2532
2533 int hclge_notify_client(struct hclge_dev *hdev,
2534                         enum hnae3_reset_notify_type type)
2535 {
2536         struct hnae3_client *client = hdev->nic_client;
2537         u16 i;
2538
2539         if (!client->ops->reset_notify)
2540                 return -EOPNOTSUPP;
2541
2542         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2543                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2544                 int ret;
2545
2546                 ret = client->ops->reset_notify(handle, type);
2547                 if (ret) {
2548                         dev_err(&hdev->pdev->dev,
2549                                 "notify nic client failed %d(%d)\n", type, ret);
2550                         return ret;
2551                 }
2552         }
2553
2554         return 0;
2555 }
2556
2557 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2558                                     enum hnae3_reset_notify_type type)
2559 {
2560         struct hnae3_client *client = hdev->roce_client;
2561         int ret = 0;
2562         u16 i;
2563
2564         if (!client)
2565                 return 0;
2566
2567         if (!client->ops->reset_notify)
2568                 return -EOPNOTSUPP;
2569
2570         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2571                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2572
2573                 ret = client->ops->reset_notify(handle, type);
2574                 if (ret) {
2575                         dev_err(&hdev->pdev->dev,
2576                                 "notify roce client failed %d(%d)",
2577                                 type, ret);
2578                         return ret;
2579                 }
2580         }
2581
2582         return ret;
2583 }
2584
2585 static int hclge_reset_wait(struct hclge_dev *hdev)
2586 {
2587 #define HCLGE_RESET_WATI_MS     100
2588 #define HCLGE_RESET_WAIT_CNT    200
2589         u32 val, reg, reg_bit;
2590         u32 cnt = 0;
2591
2592         switch (hdev->reset_type) {
2593         case HNAE3_IMP_RESET:
2594                 reg = HCLGE_GLOBAL_RESET_REG;
2595                 reg_bit = HCLGE_IMP_RESET_BIT;
2596                 break;
2597         case HNAE3_GLOBAL_RESET:
2598                 reg = HCLGE_GLOBAL_RESET_REG;
2599                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2600                 break;
2601         case HNAE3_CORE_RESET:
2602                 reg = HCLGE_GLOBAL_RESET_REG;
2603                 reg_bit = HCLGE_CORE_RESET_BIT;
2604                 break;
2605         case HNAE3_FUNC_RESET:
2606                 reg = HCLGE_FUN_RST_ING;
2607                 reg_bit = HCLGE_FUN_RST_ING_B;
2608                 break;
2609         case HNAE3_FLR_RESET:
2610                 break;
2611         default:
2612                 dev_err(&hdev->pdev->dev,
2613                         "Wait for unsupported reset type: %d\n",
2614                         hdev->reset_type);
2615                 return -EINVAL;
2616         }
2617
2618         if (hdev->reset_type == HNAE3_FLR_RESET) {
2619                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2620                        cnt++ < HCLGE_RESET_WAIT_CNT)
2621                         msleep(HCLGE_RESET_WATI_MS);
2622
2623                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2624                         dev_err(&hdev->pdev->dev,
2625                                 "flr wait timeout: %d\n", cnt);
2626                         return -EBUSY;
2627                 }
2628
2629                 return 0;
2630         }
2631
2632         val = hclge_read_dev(&hdev->hw, reg);
2633         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2634                 msleep(HCLGE_RESET_WATI_MS);
2635                 val = hclge_read_dev(&hdev->hw, reg);
2636                 cnt++;
2637         }
2638
2639         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2640                 dev_warn(&hdev->pdev->dev,
2641                          "Wait for reset timeout: %d\n", hdev->reset_type);
2642                 return -EBUSY;
2643         }
2644
2645         return 0;
2646 }
2647
2648 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2649 {
2650         struct hclge_vf_rst_cmd *req;
2651         struct hclge_desc desc;
2652
2653         req = (struct hclge_vf_rst_cmd *)desc.data;
2654         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2655         req->dest_vfid = func_id;
2656
2657         if (reset)
2658                 req->vf_rst = 0x1;
2659
2660         return hclge_cmd_send(&hdev->hw, &desc, 1);
2661 }
2662
2663 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2664 {
2665         int i;
2666
2667         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2668                 struct hclge_vport *vport = &hdev->vport[i];
2669                 int ret;
2670
2671                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2672                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2673                 if (ret) {
2674                         dev_err(&hdev->pdev->dev,
2675                                 "set vf(%d) rst failed %d!\n",
2676                                 vport->vport_id, ret);
2677                         return ret;
2678                 }
2679
2680                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2681                         continue;
2682
2683                 /* Inform VF to process the reset.
2684                  * hclge_inform_reset_assert_to_vf may fail if VF
2685                  * driver is not loaded.
2686                  */
2687                 ret = hclge_inform_reset_assert_to_vf(vport);
2688                 if (ret)
2689                         dev_warn(&hdev->pdev->dev,
2690                                  "inform reset to vf(%d) failed %d!\n",
2691                                  vport->vport_id, ret);
2692         }
2693
2694         return 0;
2695 }
2696
2697 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2698 {
2699         struct hclge_desc desc;
2700         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2701         int ret;
2702
2703         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2704         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2705         req->fun_reset_vfid = func_id;
2706
2707         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2708         if (ret)
2709                 dev_err(&hdev->pdev->dev,
2710                         "send function reset cmd fail, status =%d\n", ret);
2711
2712         return ret;
2713 }
2714
2715 static void hclge_do_reset(struct hclge_dev *hdev)
2716 {
2717         struct pci_dev *pdev = hdev->pdev;
2718         u32 val;
2719
2720         switch (hdev->reset_type) {
2721         case HNAE3_GLOBAL_RESET:
2722                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2723                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2724                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2725                 dev_info(&pdev->dev, "Global Reset requested\n");
2726                 break;
2727         case HNAE3_CORE_RESET:
2728                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2729                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2730                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2731                 dev_info(&pdev->dev, "Core Reset requested\n");
2732                 break;
2733         case HNAE3_FUNC_RESET:
2734                 dev_info(&pdev->dev, "PF Reset requested\n");
2735                 /* schedule again to check later */
2736                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2737                 hclge_reset_task_schedule(hdev);
2738                 break;
2739         case HNAE3_FLR_RESET:
2740                 dev_info(&pdev->dev, "FLR requested\n");
2741                 /* schedule again to check later */
2742                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2743                 hclge_reset_task_schedule(hdev);
2744                 break;
2745         default:
2746                 dev_warn(&pdev->dev,
2747                          "Unsupported reset type: %d\n", hdev->reset_type);
2748                 break;
2749         }
2750 }
2751
2752 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2753                                                    unsigned long *addr)
2754 {
2755         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2756
2757         /* first, resolve any unknown reset type to the known type(s) */
2758         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2759                 /* we will intentionally ignore any errors from this function
2760                  *  as we will end up in *some* reset request in any case
2761                  */
2762                 hclge_handle_hw_msix_error(hdev, addr);
2763                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2764                 /* We defered the clearing of the error event which caused
2765                  * interrupt since it was not posssible to do that in
2766                  * interrupt context (and this is the reason we introduced
2767                  * new UNKNOWN reset type). Now, the errors have been
2768                  * handled and cleared in hardware we can safely enable
2769                  * interrupts. This is an exception to the norm.
2770                  */
2771                 hclge_enable_vector(&hdev->misc_vector, true);
2772         }
2773
2774         /* return the highest priority reset level amongst all */
2775         if (test_bit(HNAE3_IMP_RESET, addr)) {
2776                 rst_level = HNAE3_IMP_RESET;
2777                 clear_bit(HNAE3_IMP_RESET, addr);
2778                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2779                 clear_bit(HNAE3_CORE_RESET, addr);
2780                 clear_bit(HNAE3_FUNC_RESET, addr);
2781         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2782                 rst_level = HNAE3_GLOBAL_RESET;
2783                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2784                 clear_bit(HNAE3_CORE_RESET, addr);
2785                 clear_bit(HNAE3_FUNC_RESET, addr);
2786         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2787                 rst_level = HNAE3_CORE_RESET;
2788                 clear_bit(HNAE3_CORE_RESET, addr);
2789                 clear_bit(HNAE3_FUNC_RESET, addr);
2790         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2791                 rst_level = HNAE3_FUNC_RESET;
2792                 clear_bit(HNAE3_FUNC_RESET, addr);
2793         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2794                 rst_level = HNAE3_FLR_RESET;
2795                 clear_bit(HNAE3_FLR_RESET, addr);
2796         }
2797
2798         return rst_level;
2799 }
2800
2801 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2802 {
2803         u32 clearval = 0;
2804
2805         switch (hdev->reset_type) {
2806         case HNAE3_IMP_RESET:
2807                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2808                 break;
2809         case HNAE3_GLOBAL_RESET:
2810                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2811                 break;
2812         case HNAE3_CORE_RESET:
2813                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2814                 break;
2815         default:
2816                 break;
2817         }
2818
2819         if (!clearval)
2820                 return;
2821
2822         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2823         hclge_enable_vector(&hdev->misc_vector, true);
2824 }
2825
2826 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2827 {
2828         int ret = 0;
2829
2830         switch (hdev->reset_type) {
2831         case HNAE3_FUNC_RESET:
2832                 /* fall through */
2833         case HNAE3_FLR_RESET:
2834                 ret = hclge_set_all_vf_rst(hdev, true);
2835                 break;
2836         default:
2837                 break;
2838         }
2839
2840         return ret;
2841 }
2842
2843 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2844 {
2845         u32 reg_val;
2846         int ret = 0;
2847
2848         switch (hdev->reset_type) {
2849         case HNAE3_FUNC_RESET:
2850                 /* There is no mechanism for PF to know if VF has stopped IO
2851                  * for now, just wait 100 ms for VF to stop IO
2852                  */
2853                 msleep(100);
2854                 ret = hclge_func_reset_cmd(hdev, 0);
2855                 if (ret) {
2856                         dev_err(&hdev->pdev->dev,
2857                                 "asserting function reset fail %d!\n", ret);
2858                         return ret;
2859                 }
2860
2861                 /* After performaning pf reset, it is not necessary to do the
2862                  * mailbox handling or send any command to firmware, because
2863                  * any mailbox handling or command to firmware is only valid
2864                  * after hclge_cmd_init is called.
2865                  */
2866                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2867                 break;
2868         case HNAE3_FLR_RESET:
2869                 /* There is no mechanism for PF to know if VF has stopped IO
2870                  * for now, just wait 100 ms for VF to stop IO
2871                  */
2872                 msleep(100);
2873                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2874                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2875                 break;
2876         case HNAE3_IMP_RESET:
2877                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2878                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2879                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2880                 break;
2881         default:
2882                 break;
2883         }
2884
2885         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2886
2887         return ret;
2888 }
2889
2890 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2891 {
2892 #define MAX_RESET_FAIL_CNT 5
2893 #define RESET_UPGRADE_DELAY_SEC 10
2894
2895         if (hdev->reset_pending) {
2896                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2897                          hdev->reset_pending);
2898                 return true;
2899         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2900                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2901                     BIT(HCLGE_IMP_RESET_BIT))) {
2902                 dev_info(&hdev->pdev->dev,
2903                          "reset failed because IMP Reset is pending\n");
2904                 hclge_clear_reset_cause(hdev);
2905                 return false;
2906         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2907                 hdev->reset_fail_cnt++;
2908                 if (is_timeout) {
2909                         set_bit(hdev->reset_type, &hdev->reset_pending);
2910                         dev_info(&hdev->pdev->dev,
2911                                  "re-schedule to wait for hw reset done\n");
2912                         return true;
2913                 }
2914
2915                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2916                 hclge_clear_reset_cause(hdev);
2917                 mod_timer(&hdev->reset_timer,
2918                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2919
2920                 return false;
2921         }
2922
2923         hclge_clear_reset_cause(hdev);
2924         dev_err(&hdev->pdev->dev, "Reset fail!\n");
2925         return false;
2926 }
2927
2928 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2929 {
2930         int ret = 0;
2931
2932         switch (hdev->reset_type) {
2933         case HNAE3_FUNC_RESET:
2934                 /* fall through */
2935         case HNAE3_FLR_RESET:
2936                 ret = hclge_set_all_vf_rst(hdev, false);
2937                 break;
2938         default:
2939                 break;
2940         }
2941
2942         return ret;
2943 }
2944
2945 static void hclge_reset(struct hclge_dev *hdev)
2946 {
2947         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2948         bool is_timeout = false;
2949         int ret;
2950
2951         /* Initialize ae_dev reset status as well, in case enet layer wants to
2952          * know if device is undergoing reset
2953          */
2954         ae_dev->reset_type = hdev->reset_type;
2955         hdev->reset_count++;
2956         /* perform reset of the stack & ae device for a client */
2957         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2958         if (ret)
2959                 goto err_reset;
2960
2961         ret = hclge_reset_prepare_down(hdev);
2962         if (ret)
2963                 goto err_reset;
2964
2965         rtnl_lock();
2966         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2967         if (ret)
2968                 goto err_reset_lock;
2969
2970         rtnl_unlock();
2971
2972         ret = hclge_reset_prepare_wait(hdev);
2973         if (ret)
2974                 goto err_reset;
2975
2976         if (hclge_reset_wait(hdev)) {
2977                 is_timeout = true;
2978                 goto err_reset;
2979         }
2980
2981         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2982         if (ret)
2983                 goto err_reset;
2984
2985         rtnl_lock();
2986         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2987         if (ret)
2988                 goto err_reset_lock;
2989
2990         ret = hclge_reset_ae_dev(hdev->ae_dev);
2991         if (ret)
2992                 goto err_reset_lock;
2993
2994         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2995         if (ret)
2996                 goto err_reset_lock;
2997
2998         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
2999         if (ret)
3000                 goto err_reset_lock;
3001
3002         hclge_clear_reset_cause(hdev);
3003
3004         ret = hclge_reset_prepare_up(hdev);
3005         if (ret)
3006                 goto err_reset_lock;
3007
3008         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3009         if (ret)
3010                 goto err_reset_lock;
3011
3012         rtnl_unlock();
3013
3014         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3015         if (ret)
3016                 goto err_reset;
3017
3018         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3019         if (ret)
3020                 goto err_reset;
3021
3022         hdev->last_reset_time = jiffies;
3023         hdev->reset_fail_cnt = 0;
3024         ae_dev->reset_type = HNAE3_NONE_RESET;
3025         del_timer(&hdev->reset_timer);
3026
3027         return;
3028
3029 err_reset_lock:
3030         rtnl_unlock();
3031 err_reset:
3032         if (hclge_reset_err_handle(hdev, is_timeout))
3033                 hclge_reset_task_schedule(hdev);
3034 }
3035
3036 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3037 {
3038         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3039         struct hclge_dev *hdev = ae_dev->priv;
3040
3041         /* We might end up getting called broadly because of 2 below cases:
3042          * 1. Recoverable error was conveyed through APEI and only way to bring
3043          *    normalcy is to reset.
3044          * 2. A new reset request from the stack due to timeout
3045          *
3046          * For the first case,error event might not have ae handle available.
3047          * check if this is a new reset request and we are not here just because
3048          * last reset attempt did not succeed and watchdog hit us again. We will
3049          * know this if last reset request did not occur very recently (watchdog
3050          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3051          * In case of new request we reset the "reset level" to PF reset.
3052          * And if it is a repeat reset request of the most recent one then we
3053          * want to make sure we throttle the reset request. Therefore, we will
3054          * not allow it again before 3*HZ times.
3055          */
3056         if (!handle)
3057                 handle = &hdev->vport[0].nic;
3058
3059         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3060                 return;
3061         else if (hdev->default_reset_request)
3062                 hdev->reset_level =
3063                         hclge_get_reset_level(hdev,
3064                                               &hdev->default_reset_request);
3065         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3066                 hdev->reset_level = HNAE3_FUNC_RESET;
3067
3068         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3069                  hdev->reset_level);
3070
3071         /* request reset & schedule reset task */
3072         set_bit(hdev->reset_level, &hdev->reset_request);
3073         hclge_reset_task_schedule(hdev);
3074
3075         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3076                 hdev->reset_level++;
3077 }
3078
3079 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3080                                         enum hnae3_reset_type rst_type)
3081 {
3082         struct hclge_dev *hdev = ae_dev->priv;
3083
3084         set_bit(rst_type, &hdev->default_reset_request);
3085 }
3086
3087 static void hclge_reset_timer(struct timer_list *t)
3088 {
3089         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3090
3091         dev_info(&hdev->pdev->dev,
3092                  "triggering global reset in reset timer\n");
3093         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3094         hclge_reset_event(hdev->pdev, NULL);
3095 }
3096
3097 static void hclge_reset_subtask(struct hclge_dev *hdev)
3098 {
3099         /* check if there is any ongoing reset in the hardware. This status can
3100          * be checked from reset_pending. If there is then, we need to wait for
3101          * hardware to complete reset.
3102          *    a. If we are able to figure out in reasonable time that hardware
3103          *       has fully resetted then, we can proceed with driver, client
3104          *       reset.
3105          *    b. else, we can come back later to check this status so re-sched
3106          *       now.
3107          */
3108         hdev->last_reset_time = jiffies;
3109         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3110         if (hdev->reset_type != HNAE3_NONE_RESET)
3111                 hclge_reset(hdev);
3112
3113         /* check if we got any *new* reset requests to be honored */
3114         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3115         if (hdev->reset_type != HNAE3_NONE_RESET)
3116                 hclge_do_reset(hdev);
3117
3118         hdev->reset_type = HNAE3_NONE_RESET;
3119 }
3120
3121 static void hclge_reset_service_task(struct work_struct *work)
3122 {
3123         struct hclge_dev *hdev =
3124                 container_of(work, struct hclge_dev, rst_service_task);
3125
3126         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3127                 return;
3128
3129         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3130
3131         hclge_reset_subtask(hdev);
3132
3133         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3134 }
3135
3136 static void hclge_mailbox_service_task(struct work_struct *work)
3137 {
3138         struct hclge_dev *hdev =
3139                 container_of(work, struct hclge_dev, mbx_service_task);
3140
3141         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3142                 return;
3143
3144         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3145
3146         hclge_mbx_handler(hdev);
3147
3148         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3149 }
3150
3151 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3152 {
3153         int i;
3154
3155         /* start from vport 1 for PF is always alive */
3156         for (i = 1; i < hdev->num_alloc_vport; i++) {
3157                 struct hclge_vport *vport = &hdev->vport[i];
3158
3159                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3160                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3161
3162                 /* If vf is not alive, set to default value */
3163                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3164                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3165         }
3166 }
3167
3168 static void hclge_service_task(struct work_struct *work)
3169 {
3170         struct hclge_dev *hdev =
3171                 container_of(work, struct hclge_dev, service_task);
3172
3173         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3174                 hclge_update_stats_for_all(hdev);
3175                 hdev->hw_stats.stats_timer = 0;
3176         }
3177
3178         hclge_update_speed_duplex(hdev);
3179         hclge_update_link_status(hdev);
3180         hclge_update_vport_alive(hdev);
3181         hclge_service_complete(hdev);
3182 }
3183
3184 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3185 {
3186         /* VF handle has no client */
3187         if (!handle->client)
3188                 return container_of(handle, struct hclge_vport, nic);
3189         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3190                 return container_of(handle, struct hclge_vport, roce);
3191         else
3192                 return container_of(handle, struct hclge_vport, nic);
3193 }
3194
3195 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3196                             struct hnae3_vector_info *vector_info)
3197 {
3198         struct hclge_vport *vport = hclge_get_vport(handle);
3199         struct hnae3_vector_info *vector = vector_info;
3200         struct hclge_dev *hdev = vport->back;
3201         int alloc = 0;
3202         int i, j;
3203
3204         vector_num = min(hdev->num_msi_left, vector_num);
3205
3206         for (j = 0; j < vector_num; j++) {
3207                 for (i = 1; i < hdev->num_msi; i++) {
3208                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3209                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3210                                 vector->io_addr = hdev->hw.io_base +
3211                                         HCLGE_VECTOR_REG_BASE +
3212                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3213                                         vport->vport_id *
3214                                         HCLGE_VECTOR_VF_OFFSET;
3215                                 hdev->vector_status[i] = vport->vport_id;
3216                                 hdev->vector_irq[i] = vector->vector;
3217
3218                                 vector++;
3219                                 alloc++;
3220
3221                                 break;
3222                         }
3223                 }
3224         }
3225         hdev->num_msi_left -= alloc;
3226         hdev->num_msi_used += alloc;
3227
3228         return alloc;
3229 }
3230
3231 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3232 {
3233         int i;
3234
3235         for (i = 0; i < hdev->num_msi; i++)
3236                 if (vector == hdev->vector_irq[i])
3237                         return i;
3238
3239         return -EINVAL;
3240 }
3241
3242 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3243 {
3244         struct hclge_vport *vport = hclge_get_vport(handle);
3245         struct hclge_dev *hdev = vport->back;
3246         int vector_id;
3247
3248         vector_id = hclge_get_vector_index(hdev, vector);
3249         if (vector_id < 0) {
3250                 dev_err(&hdev->pdev->dev,
3251                         "Get vector index fail. vector_id =%d\n", vector_id);
3252                 return vector_id;
3253         }
3254
3255         hclge_free_vector(hdev, vector_id);
3256
3257         return 0;
3258 }
3259
3260 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3261 {
3262         return HCLGE_RSS_KEY_SIZE;
3263 }
3264
3265 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3266 {
3267         return HCLGE_RSS_IND_TBL_SIZE;
3268 }
3269
3270 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3271                                   const u8 hfunc, const u8 *key)
3272 {
3273         struct hclge_rss_config_cmd *req;
3274         struct hclge_desc desc;
3275         int key_offset;
3276         int key_size;
3277         int ret;
3278
3279         req = (struct hclge_rss_config_cmd *)desc.data;
3280
3281         for (key_offset = 0; key_offset < 3; key_offset++) {
3282                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3283                                            false);
3284
3285                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3286                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3287
3288                 if (key_offset == 2)
3289                         key_size =
3290                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3291                 else
3292                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3293
3294                 memcpy(req->hash_key,
3295                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3296
3297                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3298                 if (ret) {
3299                         dev_err(&hdev->pdev->dev,
3300                                 "Configure RSS config fail, status = %d\n",
3301                                 ret);
3302                         return ret;
3303                 }
3304         }
3305         return 0;
3306 }
3307
3308 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3309 {
3310         struct hclge_rss_indirection_table_cmd *req;
3311         struct hclge_desc desc;
3312         int i, j;
3313         int ret;
3314
3315         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3316
3317         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3318                 hclge_cmd_setup_basic_desc
3319                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3320
3321                 req->start_table_index =
3322                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3323                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3324
3325                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3326                         req->rss_result[j] =
3327                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3328
3329                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3330                 if (ret) {
3331                         dev_err(&hdev->pdev->dev,
3332                                 "Configure rss indir table fail,status = %d\n",
3333                                 ret);
3334                         return ret;
3335                 }
3336         }
3337         return 0;
3338 }
3339
3340 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3341                                  u16 *tc_size, u16 *tc_offset)
3342 {
3343         struct hclge_rss_tc_mode_cmd *req;
3344         struct hclge_desc desc;
3345         int ret;
3346         int i;
3347
3348         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3349         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3350
3351         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3352                 u16 mode = 0;
3353
3354                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3355                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3356                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3357                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3358                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3359
3360                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3361         }
3362
3363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3364         if (ret)
3365                 dev_err(&hdev->pdev->dev,
3366                         "Configure rss tc mode fail, status = %d\n", ret);
3367
3368         return ret;
3369 }
3370
3371 static void hclge_get_rss_type(struct hclge_vport *vport)
3372 {
3373         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3374             vport->rss_tuple_sets.ipv4_udp_en ||
3375             vport->rss_tuple_sets.ipv4_sctp_en ||
3376             vport->rss_tuple_sets.ipv6_tcp_en ||
3377             vport->rss_tuple_sets.ipv6_udp_en ||
3378             vport->rss_tuple_sets.ipv6_sctp_en)
3379                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3380         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3381                  vport->rss_tuple_sets.ipv6_fragment_en)
3382                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3383         else
3384                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3385 }
3386
3387 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3388 {
3389         struct hclge_rss_input_tuple_cmd *req;
3390         struct hclge_desc desc;
3391         int ret;
3392
3393         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3394
3395         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3396
3397         /* Get the tuple cfg from pf */
3398         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3399         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3400         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3401         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3402         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3403         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3404         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3405         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3406         hclge_get_rss_type(&hdev->vport[0]);
3407         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3408         if (ret)
3409                 dev_err(&hdev->pdev->dev,
3410                         "Configure rss input fail, status = %d\n", ret);
3411         return ret;
3412 }
3413
3414 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3415                          u8 *key, u8 *hfunc)
3416 {
3417         struct hclge_vport *vport = hclge_get_vport(handle);
3418         int i;
3419
3420         /* Get hash algorithm */
3421         if (hfunc) {
3422                 switch (vport->rss_algo) {
3423                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3424                         *hfunc = ETH_RSS_HASH_TOP;
3425                         break;
3426                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3427                         *hfunc = ETH_RSS_HASH_XOR;
3428                         break;
3429                 default:
3430                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3431                         break;
3432                 }
3433         }
3434
3435         /* Get the RSS Key required by the user */
3436         if (key)
3437                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3438
3439         /* Get indirect table */
3440         if (indir)
3441                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3442                         indir[i] =  vport->rss_indirection_tbl[i];
3443
3444         return 0;
3445 }
3446
3447 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3448                          const  u8 *key, const  u8 hfunc)
3449 {
3450         struct hclge_vport *vport = hclge_get_vport(handle);
3451         struct hclge_dev *hdev = vport->back;
3452         u8 hash_algo;
3453         int ret, i;
3454
3455         /* Set the RSS Hash Key if specififed by the user */
3456         if (key) {
3457                 switch (hfunc) {
3458                 case ETH_RSS_HASH_TOP:
3459                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3460                         break;
3461                 case ETH_RSS_HASH_XOR:
3462                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3463                         break;
3464                 case ETH_RSS_HASH_NO_CHANGE:
3465                         hash_algo = vport->rss_algo;
3466                         break;
3467                 default:
3468                         return -EINVAL;
3469                 }
3470
3471                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3472                 if (ret)
3473                         return ret;
3474
3475                 /* Update the shadow RSS key with user specified qids */
3476                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3477                 vport->rss_algo = hash_algo;
3478         }
3479
3480         /* Update the shadow RSS table with user specified qids */
3481         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3482                 vport->rss_indirection_tbl[i] = indir[i];
3483
3484         /* Update the hardware */
3485         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3486 }
3487
3488 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3489 {
3490         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3491
3492         if (nfc->data & RXH_L4_B_2_3)
3493                 hash_sets |= HCLGE_D_PORT_BIT;
3494         else
3495                 hash_sets &= ~HCLGE_D_PORT_BIT;
3496
3497         if (nfc->data & RXH_IP_SRC)
3498                 hash_sets |= HCLGE_S_IP_BIT;
3499         else
3500                 hash_sets &= ~HCLGE_S_IP_BIT;
3501
3502         if (nfc->data & RXH_IP_DST)
3503                 hash_sets |= HCLGE_D_IP_BIT;
3504         else
3505                 hash_sets &= ~HCLGE_D_IP_BIT;
3506
3507         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3508                 hash_sets |= HCLGE_V_TAG_BIT;
3509
3510         return hash_sets;
3511 }
3512
3513 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3514                                struct ethtool_rxnfc *nfc)
3515 {
3516         struct hclge_vport *vport = hclge_get_vport(handle);
3517         struct hclge_dev *hdev = vport->back;
3518         struct hclge_rss_input_tuple_cmd *req;
3519         struct hclge_desc desc;
3520         u8 tuple_sets;
3521         int ret;
3522
3523         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3524                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3525                 return -EINVAL;
3526
3527         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3528         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3529
3530         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3531         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3532         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3533         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3534         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3535         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3536         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3537         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3538
3539         tuple_sets = hclge_get_rss_hash_bits(nfc);
3540         switch (nfc->flow_type) {
3541         case TCP_V4_FLOW:
3542                 req->ipv4_tcp_en = tuple_sets;
3543                 break;
3544         case TCP_V6_FLOW:
3545                 req->ipv6_tcp_en = tuple_sets;
3546                 break;
3547         case UDP_V4_FLOW:
3548                 req->ipv4_udp_en = tuple_sets;
3549                 break;
3550         case UDP_V6_FLOW:
3551                 req->ipv6_udp_en = tuple_sets;
3552                 break;
3553         case SCTP_V4_FLOW:
3554                 req->ipv4_sctp_en = tuple_sets;
3555                 break;
3556         case SCTP_V6_FLOW:
3557                 if ((nfc->data & RXH_L4_B_0_1) ||
3558                     (nfc->data & RXH_L4_B_2_3))
3559                         return -EINVAL;
3560
3561                 req->ipv6_sctp_en = tuple_sets;
3562                 break;
3563         case IPV4_FLOW:
3564                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3565                 break;
3566         case IPV6_FLOW:
3567                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3568                 break;
3569         default:
3570                 return -EINVAL;
3571         }
3572
3573         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3574         if (ret) {
3575                 dev_err(&hdev->pdev->dev,
3576                         "Set rss tuple fail, status = %d\n", ret);
3577                 return ret;
3578         }
3579
3580         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3581         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3582         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3583         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3584         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3585         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3586         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3587         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3588         hclge_get_rss_type(vport);
3589         return 0;
3590 }
3591
3592 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3593                                struct ethtool_rxnfc *nfc)
3594 {
3595         struct hclge_vport *vport = hclge_get_vport(handle);
3596         u8 tuple_sets;
3597
3598         nfc->data = 0;
3599
3600         switch (nfc->flow_type) {
3601         case TCP_V4_FLOW:
3602                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3603                 break;
3604         case UDP_V4_FLOW:
3605                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3606                 break;
3607         case TCP_V6_FLOW:
3608                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3609                 break;
3610         case UDP_V6_FLOW:
3611                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3612                 break;
3613         case SCTP_V4_FLOW:
3614                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3615                 break;
3616         case SCTP_V6_FLOW:
3617                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3618                 break;
3619         case IPV4_FLOW:
3620         case IPV6_FLOW:
3621                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3622                 break;
3623         default:
3624                 return -EINVAL;
3625         }
3626
3627         if (!tuple_sets)
3628                 return 0;
3629
3630         if (tuple_sets & HCLGE_D_PORT_BIT)
3631                 nfc->data |= RXH_L4_B_2_3;
3632         if (tuple_sets & HCLGE_S_PORT_BIT)
3633                 nfc->data |= RXH_L4_B_0_1;
3634         if (tuple_sets & HCLGE_D_IP_BIT)
3635                 nfc->data |= RXH_IP_DST;
3636         if (tuple_sets & HCLGE_S_IP_BIT)
3637                 nfc->data |= RXH_IP_SRC;
3638
3639         return 0;
3640 }
3641
3642 static int hclge_get_tc_size(struct hnae3_handle *handle)
3643 {
3644         struct hclge_vport *vport = hclge_get_vport(handle);
3645         struct hclge_dev *hdev = vport->back;
3646
3647         return hdev->rss_size_max;
3648 }
3649
3650 int hclge_rss_init_hw(struct hclge_dev *hdev)
3651 {
3652         struct hclge_vport *vport = hdev->vport;
3653         u8 *rss_indir = vport[0].rss_indirection_tbl;
3654         u16 rss_size = vport[0].alloc_rss_size;
3655         u8 *key = vport[0].rss_hash_key;
3656         u8 hfunc = vport[0].rss_algo;
3657         u16 tc_offset[HCLGE_MAX_TC_NUM];
3658         u16 tc_valid[HCLGE_MAX_TC_NUM];
3659         u16 tc_size[HCLGE_MAX_TC_NUM];
3660         u16 roundup_size;
3661         int i, ret;
3662
3663         ret = hclge_set_rss_indir_table(hdev, rss_indir);
3664         if (ret)
3665                 return ret;
3666
3667         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3668         if (ret)
3669                 return ret;
3670
3671         ret = hclge_set_rss_input_tuple(hdev);
3672         if (ret)
3673                 return ret;
3674
3675         /* Each TC have the same queue size, and tc_size set to hardware is
3676          * the log2 of roundup power of two of rss_size, the acutal queue
3677          * size is limited by indirection table.
3678          */
3679         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3680                 dev_err(&hdev->pdev->dev,
3681                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3682                         rss_size);
3683                 return -EINVAL;
3684         }
3685
3686         roundup_size = roundup_pow_of_two(rss_size);
3687         roundup_size = ilog2(roundup_size);
3688
3689         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3690                 tc_valid[i] = 0;
3691
3692                 if (!(hdev->hw_tc_map & BIT(i)))
3693                         continue;
3694
3695                 tc_valid[i] = 1;
3696                 tc_size[i] = roundup_size;
3697                 tc_offset[i] = rss_size * i;
3698         }
3699
3700         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3701 }
3702
3703 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3704 {
3705         struct hclge_vport *vport = hdev->vport;
3706         int i, j;
3707
3708         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3709                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3710                         vport[j].rss_indirection_tbl[i] =
3711                                 i % vport[j].alloc_rss_size;
3712         }
3713 }
3714
3715 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3716 {
3717         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3718         struct hclge_vport *vport = hdev->vport;
3719
3720         if (hdev->pdev->revision >= 0x21)
3721                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3722
3723         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3724                 vport[i].rss_tuple_sets.ipv4_tcp_en =
3725                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3726                 vport[i].rss_tuple_sets.ipv4_udp_en =
3727                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3728                 vport[i].rss_tuple_sets.ipv4_sctp_en =
3729                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3730                 vport[i].rss_tuple_sets.ipv4_fragment_en =
3731                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3732                 vport[i].rss_tuple_sets.ipv6_tcp_en =
3733                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3734                 vport[i].rss_tuple_sets.ipv6_udp_en =
3735                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3736                 vport[i].rss_tuple_sets.ipv6_sctp_en =
3737                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3738                 vport[i].rss_tuple_sets.ipv6_fragment_en =
3739                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3740
3741                 vport[i].rss_algo = rss_algo;
3742
3743                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3744                        HCLGE_RSS_KEY_SIZE);
3745         }
3746
3747         hclge_rss_indir_init_cfg(hdev);
3748 }
3749
3750 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3751                                 int vector_id, bool en,
3752                                 struct hnae3_ring_chain_node *ring_chain)
3753 {
3754         struct hclge_dev *hdev = vport->back;
3755         struct hnae3_ring_chain_node *node;
3756         struct hclge_desc desc;
3757         struct hclge_ctrl_vector_chain_cmd *req
3758                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3759         enum hclge_cmd_status status;
3760         enum hclge_opcode_type op;
3761         u16 tqp_type_and_id;
3762         int i;
3763
3764         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3765         hclge_cmd_setup_basic_desc(&desc, op, false);
3766         req->int_vector_id = vector_id;
3767
3768         i = 0;
3769         for (node = ring_chain; node; node = node->next) {
3770                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3771                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3772                                 HCLGE_INT_TYPE_S,
3773                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3774                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3775                                 HCLGE_TQP_ID_S, node->tqp_index);
3776                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3777                                 HCLGE_INT_GL_IDX_S,
3778                                 hnae3_get_field(node->int_gl_idx,
3779                                                 HNAE3_RING_GL_IDX_M,
3780                                                 HNAE3_RING_GL_IDX_S));
3781                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3782                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3783                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3784                         req->vfid = vport->vport_id;
3785
3786                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
3787                         if (status) {
3788                                 dev_err(&hdev->pdev->dev,
3789                                         "Map TQP fail, status is %d.\n",
3790                                         status);
3791                                 return -EIO;
3792                         }
3793                         i = 0;
3794
3795                         hclge_cmd_setup_basic_desc(&desc,
3796                                                    op,
3797                                                    false);
3798                         req->int_vector_id = vector_id;
3799                 }
3800         }
3801
3802         if (i > 0) {
3803                 req->int_cause_num = i;
3804                 req->vfid = vport->vport_id;
3805                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3806                 if (status) {
3807                         dev_err(&hdev->pdev->dev,
3808                                 "Map TQP fail, status is %d.\n", status);
3809                         return -EIO;
3810                 }
3811         }
3812
3813         return 0;
3814 }
3815
3816 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3817                                     int vector,
3818                                     struct hnae3_ring_chain_node *ring_chain)
3819 {
3820         struct hclge_vport *vport = hclge_get_vport(handle);
3821         struct hclge_dev *hdev = vport->back;
3822         int vector_id;
3823
3824         vector_id = hclge_get_vector_index(hdev, vector);
3825         if (vector_id < 0) {
3826                 dev_err(&hdev->pdev->dev,
3827                         "Get vector index fail. vector_id =%d\n", vector_id);
3828                 return vector_id;
3829         }
3830
3831         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3832 }
3833
3834 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3835                                        int vector,
3836                                        struct hnae3_ring_chain_node *ring_chain)
3837 {
3838         struct hclge_vport *vport = hclge_get_vport(handle);
3839         struct hclge_dev *hdev = vport->back;
3840         int vector_id, ret;
3841
3842         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3843                 return 0;
3844
3845         vector_id = hclge_get_vector_index(hdev, vector);
3846         if (vector_id < 0) {
3847                 dev_err(&handle->pdev->dev,
3848                         "Get vector index fail. ret =%d\n", vector_id);
3849                 return vector_id;
3850         }
3851
3852         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3853         if (ret)
3854                 dev_err(&handle->pdev->dev,
3855                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3856                         vector_id,
3857                         ret);
3858
3859         return ret;
3860 }
3861
3862 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3863                                struct hclge_promisc_param *param)
3864 {
3865         struct hclge_promisc_cfg_cmd *req;
3866         struct hclge_desc desc;
3867         int ret;
3868
3869         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3870
3871         req = (struct hclge_promisc_cfg_cmd *)desc.data;
3872         req->vf_id = param->vf_id;
3873
3874         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3875          * pdev revision(0x20), new revision support them. The
3876          * value of this two fields will not return error when driver
3877          * send command to fireware in revision(0x20).
3878          */
3879         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3880                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3881
3882         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3883         if (ret)
3884                 dev_err(&hdev->pdev->dev,
3885                         "Set promisc mode fail, status is %d.\n", ret);
3886
3887         return ret;
3888 }
3889
3890 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3891                               bool en_mc, bool en_bc, int vport_id)
3892 {
3893         if (!param)
3894                 return;
3895
3896         memset(param, 0, sizeof(struct hclge_promisc_param));
3897         if (en_uc)
3898                 param->enable = HCLGE_PROMISC_EN_UC;
3899         if (en_mc)
3900                 param->enable |= HCLGE_PROMISC_EN_MC;
3901         if (en_bc)
3902                 param->enable |= HCLGE_PROMISC_EN_BC;
3903         param->vf_id = vport_id;
3904 }
3905
3906 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3907                                   bool en_mc_pmc)
3908 {
3909         struct hclge_vport *vport = hclge_get_vport(handle);
3910         struct hclge_dev *hdev = vport->back;
3911         struct hclge_promisc_param param;
3912         bool en_bc_pmc = true;
3913
3914         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
3915          * always bypassed. So broadcast promisc should be disabled until
3916          * user enable promisc mode
3917          */
3918         if (handle->pdev->revision == 0x20)
3919                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3920
3921         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3922                                  vport->vport_id);
3923         return hclge_cmd_set_promisc_mode(hdev, &param);
3924 }
3925
3926 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3927 {
3928         struct hclge_get_fd_mode_cmd *req;
3929         struct hclge_desc desc;
3930         int ret;
3931
3932         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3933
3934         req = (struct hclge_get_fd_mode_cmd *)desc.data;
3935
3936         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3937         if (ret) {
3938                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3939                 return ret;
3940         }
3941
3942         *fd_mode = req->mode;
3943
3944         return ret;
3945 }
3946
3947 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3948                                    u32 *stage1_entry_num,
3949                                    u32 *stage2_entry_num,
3950                                    u16 *stage1_counter_num,
3951                                    u16 *stage2_counter_num)
3952 {
3953         struct hclge_get_fd_allocation_cmd *req;
3954         struct hclge_desc desc;
3955         int ret;
3956
3957         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3958
3959         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3960
3961         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3962         if (ret) {
3963                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3964                         ret);
3965                 return ret;
3966         }
3967
3968         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3969         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3970         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3971         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3972
3973         return ret;
3974 }
3975
3976 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3977 {
3978         struct hclge_set_fd_key_config_cmd *req;
3979         struct hclge_fd_key_cfg *stage;
3980         struct hclge_desc desc;
3981         int ret;
3982
3983         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3984
3985         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3986         stage = &hdev->fd_cfg.key_cfg[stage_num];
3987         req->stage = stage_num;
3988         req->key_select = stage->key_sel;
3989         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3990         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3991         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3992         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3993         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3994         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3995
3996         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3997         if (ret)
3998                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3999
4000         return ret;
4001 }
4002
4003 static int hclge_init_fd_config(struct hclge_dev *hdev)
4004 {
4005 #define LOW_2_WORDS             0x03
4006         struct hclge_fd_key_cfg *key_cfg;
4007         int ret;
4008
4009         if (!hnae3_dev_fd_supported(hdev))
4010                 return 0;
4011
4012         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4013         if (ret)
4014                 return ret;
4015
4016         switch (hdev->fd_cfg.fd_mode) {
4017         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4018                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4019                 break;
4020         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4021                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4022                 break;
4023         default:
4024                 dev_err(&hdev->pdev->dev,
4025                         "Unsupported flow director mode %d\n",
4026                         hdev->fd_cfg.fd_mode);
4027                 return -EOPNOTSUPP;
4028         }
4029
4030         hdev->fd_cfg.proto_support =
4031                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4032                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4033         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4034         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4035         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4036         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4037         key_cfg->outer_sipv6_word_en = 0;
4038         key_cfg->outer_dipv6_word_en = 0;
4039
4040         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4041                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4042                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4043                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4044
4045         /* If use max 400bit key, we can support tuples for ether type */
4046         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4047                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4048                 key_cfg->tuple_active |=
4049                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4050         }
4051
4052         /* roce_type is used to filter roce frames
4053          * dst_vport is used to specify the rule
4054          */
4055         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4056
4057         ret = hclge_get_fd_allocation(hdev,
4058                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4059                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4060                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4061                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4062         if (ret)
4063                 return ret;
4064
4065         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4066 }
4067
4068 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4069                                 int loc, u8 *key, bool is_add)
4070 {
4071         struct hclge_fd_tcam_config_1_cmd *req1;
4072         struct hclge_fd_tcam_config_2_cmd *req2;
4073         struct hclge_fd_tcam_config_3_cmd *req3;
4074         struct hclge_desc desc[3];
4075         int ret;
4076
4077         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4078         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4079         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4080         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4081         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4082
4083         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4084         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4085         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4086
4087         req1->stage = stage;
4088         req1->xy_sel = sel_x ? 1 : 0;
4089         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4090         req1->index = cpu_to_le32(loc);
4091         req1->entry_vld = sel_x ? is_add : 0;
4092
4093         if (key) {
4094                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4095                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4096                        sizeof(req2->tcam_data));
4097                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4098                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4099         }
4100
4101         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4102         if (ret)
4103                 dev_err(&hdev->pdev->dev,
4104                         "config tcam key fail, ret=%d\n",
4105                         ret);
4106
4107         return ret;
4108 }
4109
4110 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4111                               struct hclge_fd_ad_data *action)
4112 {
4113         struct hclge_fd_ad_config_cmd *req;
4114         struct hclge_desc desc;
4115         u64 ad_data = 0;
4116         int ret;
4117
4118         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4119
4120         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4121         req->index = cpu_to_le32(loc);
4122         req->stage = stage;
4123
4124         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4125                       action->write_rule_id_to_bd);
4126         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4127                         action->rule_id);
4128         ad_data <<= 32;
4129         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4130         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4131                       action->forward_to_direct_queue);
4132         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4133                         action->queue_id);
4134         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4135         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4136                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4137         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4138         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4139                         action->counter_id);
4140
4141         req->ad_data = cpu_to_le64(ad_data);
4142         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4143         if (ret)
4144                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4145
4146         return ret;
4147 }
4148
4149 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4150                                    struct hclge_fd_rule *rule)
4151 {
4152         u16 tmp_x_s, tmp_y_s;
4153         u32 tmp_x_l, tmp_y_l;
4154         int i;
4155
4156         if (rule->unused_tuple & tuple_bit)
4157                 return true;
4158
4159         switch (tuple_bit) {
4160         case 0:
4161                 return false;
4162         case BIT(INNER_DST_MAC):
4163                 for (i = 0; i < 6; i++) {
4164                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4165                                rule->tuples_mask.dst_mac[i]);
4166                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4167                                rule->tuples_mask.dst_mac[i]);
4168                 }
4169
4170                 return true;
4171         case BIT(INNER_SRC_MAC):
4172                 for (i = 0; i < 6; i++) {
4173                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4174                                rule->tuples.src_mac[i]);
4175                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4176                                rule->tuples.src_mac[i]);
4177                 }
4178
4179                 return true;
4180         case BIT(INNER_VLAN_TAG_FST):
4181                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4182                        rule->tuples_mask.vlan_tag1);
4183                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4184                        rule->tuples_mask.vlan_tag1);
4185                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4186                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4187
4188                 return true;
4189         case BIT(INNER_ETH_TYPE):
4190                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4191                        rule->tuples_mask.ether_proto);
4192                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4193                        rule->tuples_mask.ether_proto);
4194                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4195                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4196
4197                 return true;
4198         case BIT(INNER_IP_TOS):
4199                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4200                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4201
4202                 return true;
4203         case BIT(INNER_IP_PROTO):
4204                 calc_x(*key_x, rule->tuples.ip_proto,
4205                        rule->tuples_mask.ip_proto);
4206                 calc_y(*key_y, rule->tuples.ip_proto,
4207                        rule->tuples_mask.ip_proto);
4208
4209                 return true;
4210         case BIT(INNER_SRC_IP):
4211                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4212                        rule->tuples_mask.src_ip[3]);
4213                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4214                        rule->tuples_mask.src_ip[3]);
4215                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4216                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4217
4218                 return true;
4219         case BIT(INNER_DST_IP):
4220                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4221                        rule->tuples_mask.dst_ip[3]);
4222                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4223                        rule->tuples_mask.dst_ip[3]);
4224                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4225                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4226
4227                 return true;
4228         case BIT(INNER_SRC_PORT):
4229                 calc_x(tmp_x_s, rule->tuples.src_port,
4230                        rule->tuples_mask.src_port);
4231                 calc_y(tmp_y_s, rule->tuples.src_port,
4232                        rule->tuples_mask.src_port);
4233                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4234                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4235
4236                 return true;
4237         case BIT(INNER_DST_PORT):
4238                 calc_x(tmp_x_s, rule->tuples.dst_port,
4239                        rule->tuples_mask.dst_port);
4240                 calc_y(tmp_y_s, rule->tuples.dst_port,
4241                        rule->tuples_mask.dst_port);
4242                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4243                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4244
4245                 return true;
4246         default:
4247                 return false;
4248         }
4249 }
4250
4251 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4252                                  u8 vf_id, u8 network_port_id)
4253 {
4254         u32 port_number = 0;
4255
4256         if (port_type == HOST_PORT) {
4257                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4258                                 pf_id);
4259                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4260                                 vf_id);
4261                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4262         } else {
4263                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4264                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4265                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4266         }
4267
4268         return port_number;
4269 }
4270
4271 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4272                                        __le32 *key_x, __le32 *key_y,
4273                                        struct hclge_fd_rule *rule)
4274 {
4275         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4276         u8 cur_pos = 0, tuple_size, shift_bits;
4277         int i;
4278
4279         for (i = 0; i < MAX_META_DATA; i++) {
4280                 tuple_size = meta_data_key_info[i].key_length;
4281                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4282
4283                 switch (tuple_bit) {
4284                 case BIT(ROCE_TYPE):
4285                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4286                         cur_pos += tuple_size;
4287                         break;
4288                 case BIT(DST_VPORT):
4289                         port_number = hclge_get_port_number(HOST_PORT, 0,
4290                                                             rule->vf_id, 0);
4291                         hnae3_set_field(meta_data,
4292                                         GENMASK(cur_pos + tuple_size, cur_pos),
4293                                         cur_pos, port_number);
4294                         cur_pos += tuple_size;
4295                         break;
4296                 default:
4297                         break;
4298                 }
4299         }
4300
4301         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4302         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4303         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4304
4305         *key_x = cpu_to_le32(tmp_x << shift_bits);
4306         *key_y = cpu_to_le32(tmp_y << shift_bits);
4307 }
4308
4309 /* A complete key is combined with meta data key and tuple key.
4310  * Meta data key is stored at the MSB region, and tuple key is stored at
4311  * the LSB region, unused bits will be filled 0.
4312  */
4313 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4314                             struct hclge_fd_rule *rule)
4315 {
4316         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4317         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4318         u8 *cur_key_x, *cur_key_y;
4319         int i, ret, tuple_size;
4320         u8 meta_data_region;
4321
4322         memset(key_x, 0, sizeof(key_x));
4323         memset(key_y, 0, sizeof(key_y));
4324         cur_key_x = key_x;
4325         cur_key_y = key_y;
4326
4327         for (i = 0 ; i < MAX_TUPLE; i++) {
4328                 bool tuple_valid;
4329                 u32 check_tuple;
4330
4331                 tuple_size = tuple_key_info[i].key_length / 8;
4332                 check_tuple = key_cfg->tuple_active & BIT(i);
4333
4334                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4335                                                      cur_key_y, rule);
4336                 if (tuple_valid) {
4337                         cur_key_x += tuple_size;
4338                         cur_key_y += tuple_size;
4339                 }
4340         }
4341
4342         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4343                         MAX_META_DATA_LENGTH / 8;
4344
4345         hclge_fd_convert_meta_data(key_cfg,
4346                                    (__le32 *)(key_x + meta_data_region),
4347                                    (__le32 *)(key_y + meta_data_region),
4348                                    rule);
4349
4350         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4351                                    true);
4352         if (ret) {
4353                 dev_err(&hdev->pdev->dev,
4354                         "fd key_y config fail, loc=%d, ret=%d\n",
4355                         rule->queue_id, ret);
4356                 return ret;
4357         }
4358
4359         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4360                                    true);
4361         if (ret)
4362                 dev_err(&hdev->pdev->dev,
4363                         "fd key_x config fail, loc=%d, ret=%d\n",
4364                         rule->queue_id, ret);
4365         return ret;
4366 }
4367
4368 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4369                                struct hclge_fd_rule *rule)
4370 {
4371         struct hclge_fd_ad_data ad_data;
4372
4373         ad_data.ad_id = rule->location;
4374
4375         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4376                 ad_data.drop_packet = true;
4377                 ad_data.forward_to_direct_queue = false;
4378                 ad_data.queue_id = 0;
4379         } else {
4380                 ad_data.drop_packet = false;
4381                 ad_data.forward_to_direct_queue = true;
4382                 ad_data.queue_id = rule->queue_id;
4383         }
4384
4385         ad_data.use_counter = false;
4386         ad_data.counter_id = 0;
4387
4388         ad_data.use_next_stage = false;
4389         ad_data.next_input_key = 0;
4390
4391         ad_data.write_rule_id_to_bd = true;
4392         ad_data.rule_id = rule->location;
4393
4394         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4395 }
4396
4397 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4398                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4399 {
4400         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4401         struct ethtool_usrip4_spec *usr_ip4_spec;
4402         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4403         struct ethtool_usrip6_spec *usr_ip6_spec;
4404         struct ethhdr *ether_spec;
4405
4406         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4407                 return -EINVAL;
4408
4409         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4410                 return -EOPNOTSUPP;
4411
4412         if ((fs->flow_type & FLOW_EXT) &&
4413             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4414                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4415                 return -EOPNOTSUPP;
4416         }
4417
4418         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4419         case SCTP_V4_FLOW:
4420         case TCP_V4_FLOW:
4421         case UDP_V4_FLOW:
4422                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4423                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4424
4425                 if (!tcp_ip4_spec->ip4src)
4426                         *unused |= BIT(INNER_SRC_IP);
4427
4428                 if (!tcp_ip4_spec->ip4dst)
4429                         *unused |= BIT(INNER_DST_IP);
4430
4431                 if (!tcp_ip4_spec->psrc)
4432                         *unused |= BIT(INNER_SRC_PORT);
4433
4434                 if (!tcp_ip4_spec->pdst)
4435                         *unused |= BIT(INNER_DST_PORT);
4436
4437                 if (!tcp_ip4_spec->tos)
4438                         *unused |= BIT(INNER_IP_TOS);
4439
4440                 break;
4441         case IP_USER_FLOW:
4442                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4443                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4444                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4445
4446                 if (!usr_ip4_spec->ip4src)
4447                         *unused |= BIT(INNER_SRC_IP);
4448
4449                 if (!usr_ip4_spec->ip4dst)
4450                         *unused |= BIT(INNER_DST_IP);
4451
4452                 if (!usr_ip4_spec->tos)
4453                         *unused |= BIT(INNER_IP_TOS);
4454
4455                 if (!usr_ip4_spec->proto)
4456                         *unused |= BIT(INNER_IP_PROTO);
4457
4458                 if (usr_ip4_spec->l4_4_bytes)
4459                         return -EOPNOTSUPP;
4460
4461                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4462                         return -EOPNOTSUPP;
4463
4464                 break;
4465         case SCTP_V6_FLOW:
4466         case TCP_V6_FLOW:
4467         case UDP_V6_FLOW:
4468                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4469                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4470                         BIT(INNER_IP_TOS);
4471
4472                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4473                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4474                         *unused |= BIT(INNER_SRC_IP);
4475
4476                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4477                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4478                         *unused |= BIT(INNER_DST_IP);
4479
4480                 if (!tcp_ip6_spec->psrc)
4481                         *unused |= BIT(INNER_SRC_PORT);
4482
4483                 if (!tcp_ip6_spec->pdst)
4484                         *unused |= BIT(INNER_DST_PORT);
4485
4486                 if (tcp_ip6_spec->tclass)
4487                         return -EOPNOTSUPP;
4488
4489                 break;
4490         case IPV6_USER_FLOW:
4491                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4492                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4493                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4494                         BIT(INNER_DST_PORT);
4495
4496                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4497                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4498                         *unused |= BIT(INNER_SRC_IP);
4499
4500                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4501                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4502                         *unused |= BIT(INNER_DST_IP);
4503
4504                 if (!usr_ip6_spec->l4_proto)
4505                         *unused |= BIT(INNER_IP_PROTO);
4506
4507                 if (usr_ip6_spec->tclass)
4508                         return -EOPNOTSUPP;
4509
4510                 if (usr_ip6_spec->l4_4_bytes)
4511                         return -EOPNOTSUPP;
4512
4513                 break;
4514         case ETHER_FLOW:
4515                 ether_spec = &fs->h_u.ether_spec;
4516                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4517                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4518                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4519
4520                 if (is_zero_ether_addr(ether_spec->h_source))
4521                         *unused |= BIT(INNER_SRC_MAC);
4522
4523                 if (is_zero_ether_addr(ether_spec->h_dest))
4524                         *unused |= BIT(INNER_DST_MAC);
4525
4526                 if (!ether_spec->h_proto)
4527                         *unused |= BIT(INNER_ETH_TYPE);
4528
4529                 break;
4530         default:
4531                 return -EOPNOTSUPP;
4532         }
4533
4534         if ((fs->flow_type & FLOW_EXT)) {
4535                 if (fs->h_ext.vlan_etype)
4536                         return -EOPNOTSUPP;
4537                 if (!fs->h_ext.vlan_tci)
4538                         *unused |= BIT(INNER_VLAN_TAG_FST);
4539
4540                 if (fs->m_ext.vlan_tci) {
4541                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4542                                 return -EINVAL;
4543                 }
4544         } else {
4545                 *unused |= BIT(INNER_VLAN_TAG_FST);
4546         }
4547
4548         if (fs->flow_type & FLOW_MAC_EXT) {
4549                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4550                         return -EOPNOTSUPP;
4551
4552                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4553                         *unused |= BIT(INNER_DST_MAC);
4554                 else
4555                         *unused &= ~(BIT(INNER_DST_MAC));
4556         }
4557
4558         return 0;
4559 }
4560
4561 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4562 {
4563         struct hclge_fd_rule *rule = NULL;
4564         struct hlist_node *node2;
4565
4566         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4567                 if (rule->location >= location)
4568                         break;
4569         }
4570
4571         return  rule && rule->location == location;
4572 }
4573
4574 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4575                                      struct hclge_fd_rule *new_rule,
4576                                      u16 location,
4577                                      bool is_add)
4578 {
4579         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4580         struct hlist_node *node2;
4581
4582         if (is_add && !new_rule)
4583                 return -EINVAL;
4584
4585         hlist_for_each_entry_safe(rule, node2,
4586                                   &hdev->fd_rule_list, rule_node) {
4587                 if (rule->location >= location)
4588                         break;
4589                 parent = rule;
4590         }
4591
4592         if (rule && rule->location == location) {
4593                 hlist_del(&rule->rule_node);
4594                 kfree(rule);
4595                 hdev->hclge_fd_rule_num--;
4596
4597                 if (!is_add)
4598                         return 0;
4599
4600         } else if (!is_add) {
4601                 dev_err(&hdev->pdev->dev,
4602                         "delete fail, rule %d is inexistent\n",
4603                         location);
4604                 return -EINVAL;
4605         }
4606
4607         INIT_HLIST_NODE(&new_rule->rule_node);
4608
4609         if (parent)
4610                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4611         else
4612                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4613
4614         hdev->hclge_fd_rule_num++;
4615
4616         return 0;
4617 }
4618
4619 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4620                               struct ethtool_rx_flow_spec *fs,
4621                               struct hclge_fd_rule *rule)
4622 {
4623         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4624
4625         switch (flow_type) {
4626         case SCTP_V4_FLOW:
4627         case TCP_V4_FLOW:
4628         case UDP_V4_FLOW:
4629                 rule->tuples.src_ip[3] =
4630                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4631                 rule->tuples_mask.src_ip[3] =
4632                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4633
4634                 rule->tuples.dst_ip[3] =
4635                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4636                 rule->tuples_mask.dst_ip[3] =
4637                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4638
4639                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4640                 rule->tuples_mask.src_port =
4641                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4642
4643                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4644                 rule->tuples_mask.dst_port =
4645                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4646
4647                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4648                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4649
4650                 rule->tuples.ether_proto = ETH_P_IP;
4651                 rule->tuples_mask.ether_proto = 0xFFFF;
4652
4653                 break;
4654         case IP_USER_FLOW:
4655                 rule->tuples.src_ip[3] =
4656                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4657                 rule->tuples_mask.src_ip[3] =
4658                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4659
4660                 rule->tuples.dst_ip[3] =
4661                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4662                 rule->tuples_mask.dst_ip[3] =
4663                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4664
4665                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4666                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4667
4668                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4669                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4670
4671                 rule->tuples.ether_proto = ETH_P_IP;
4672                 rule->tuples_mask.ether_proto = 0xFFFF;
4673
4674                 break;
4675         case SCTP_V6_FLOW:
4676         case TCP_V6_FLOW:
4677         case UDP_V6_FLOW:
4678                 be32_to_cpu_array(rule->tuples.src_ip,
4679                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
4680                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4681                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
4682
4683                 be32_to_cpu_array(rule->tuples.dst_ip,
4684                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
4685                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4686                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
4687
4688                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4689                 rule->tuples_mask.src_port =
4690                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4691
4692                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4693                 rule->tuples_mask.dst_port =
4694                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4695
4696                 rule->tuples.ether_proto = ETH_P_IPV6;
4697                 rule->tuples_mask.ether_proto = 0xFFFF;
4698
4699                 break;
4700         case IPV6_USER_FLOW:
4701                 be32_to_cpu_array(rule->tuples.src_ip,
4702                                   fs->h_u.usr_ip6_spec.ip6src, 4);
4703                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4704                                   fs->m_u.usr_ip6_spec.ip6src, 4);
4705
4706                 be32_to_cpu_array(rule->tuples.dst_ip,
4707                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
4708                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4709                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
4710
4711                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4712                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4713
4714                 rule->tuples.ether_proto = ETH_P_IPV6;
4715                 rule->tuples_mask.ether_proto = 0xFFFF;
4716
4717                 break;
4718         case ETHER_FLOW:
4719                 ether_addr_copy(rule->tuples.src_mac,
4720                                 fs->h_u.ether_spec.h_source);
4721                 ether_addr_copy(rule->tuples_mask.src_mac,
4722                                 fs->m_u.ether_spec.h_source);
4723
4724                 ether_addr_copy(rule->tuples.dst_mac,
4725                                 fs->h_u.ether_spec.h_dest);
4726                 ether_addr_copy(rule->tuples_mask.dst_mac,
4727                                 fs->m_u.ether_spec.h_dest);
4728
4729                 rule->tuples.ether_proto =
4730                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4731                 rule->tuples_mask.ether_proto =
4732                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4733
4734                 break;
4735         default:
4736                 return -EOPNOTSUPP;
4737         }
4738
4739         switch (flow_type) {
4740         case SCTP_V4_FLOW:
4741         case SCTP_V6_FLOW:
4742                 rule->tuples.ip_proto = IPPROTO_SCTP;
4743                 rule->tuples_mask.ip_proto = 0xFF;
4744                 break;
4745         case TCP_V4_FLOW:
4746         case TCP_V6_FLOW:
4747                 rule->tuples.ip_proto = IPPROTO_TCP;
4748                 rule->tuples_mask.ip_proto = 0xFF;
4749                 break;
4750         case UDP_V4_FLOW:
4751         case UDP_V6_FLOW:
4752                 rule->tuples.ip_proto = IPPROTO_UDP;
4753                 rule->tuples_mask.ip_proto = 0xFF;
4754                 break;
4755         default:
4756                 break;
4757         }
4758
4759         if ((fs->flow_type & FLOW_EXT)) {
4760                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4761                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4762         }
4763
4764         if (fs->flow_type & FLOW_MAC_EXT) {
4765                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4766                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4767         }
4768
4769         return 0;
4770 }
4771
4772 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4773                               struct ethtool_rxnfc *cmd)
4774 {
4775         struct hclge_vport *vport = hclge_get_vport(handle);
4776         struct hclge_dev *hdev = vport->back;
4777         u16 dst_vport_id = 0, q_index = 0;
4778         struct ethtool_rx_flow_spec *fs;
4779         struct hclge_fd_rule *rule;
4780         u32 unused = 0;
4781         u8 action;
4782         int ret;
4783
4784         if (!hnae3_dev_fd_supported(hdev))
4785                 return -EOPNOTSUPP;
4786
4787         if (!hdev->fd_en) {
4788                 dev_warn(&hdev->pdev->dev,
4789                          "Please enable flow director first\n");
4790                 return -EOPNOTSUPP;
4791         }
4792
4793         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4794
4795         ret = hclge_fd_check_spec(hdev, fs, &unused);
4796         if (ret) {
4797                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4798                 return ret;
4799         }
4800
4801         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4802                 action = HCLGE_FD_ACTION_DROP_PACKET;
4803         } else {
4804                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4805                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4806                 u16 tqps;
4807
4808                 if (vf > hdev->num_req_vfs) {
4809                         dev_err(&hdev->pdev->dev,
4810                                 "Error: vf id (%d) > max vf num (%d)\n",
4811                                 vf, hdev->num_req_vfs);
4812                         return -EINVAL;
4813                 }
4814
4815                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4816                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4817
4818                 if (ring >= tqps) {
4819                         dev_err(&hdev->pdev->dev,
4820                                 "Error: queue id (%d) > max tqp num (%d)\n",
4821                                 ring, tqps - 1);
4822                         return -EINVAL;
4823                 }
4824
4825                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4826                 q_index = ring;
4827         }
4828
4829         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4830         if (!rule)
4831                 return -ENOMEM;
4832
4833         ret = hclge_fd_get_tuple(hdev, fs, rule);
4834         if (ret)
4835                 goto free_rule;
4836
4837         rule->flow_type = fs->flow_type;
4838
4839         rule->location = fs->location;
4840         rule->unused_tuple = unused;
4841         rule->vf_id = dst_vport_id;
4842         rule->queue_id = q_index;
4843         rule->action = action;
4844
4845         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4846         if (ret)
4847                 goto free_rule;
4848
4849         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4850         if (ret)
4851                 goto free_rule;
4852
4853         ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4854         if (ret)
4855                 goto free_rule;
4856
4857         return ret;
4858
4859 free_rule:
4860         kfree(rule);
4861         return ret;
4862 }
4863
4864 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4865                               struct ethtool_rxnfc *cmd)
4866 {
4867         struct hclge_vport *vport = hclge_get_vport(handle);
4868         struct hclge_dev *hdev = vport->back;
4869         struct ethtool_rx_flow_spec *fs;
4870         int ret;
4871
4872         if (!hnae3_dev_fd_supported(hdev))
4873                 return -EOPNOTSUPP;
4874
4875         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4876
4877         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4878                 return -EINVAL;
4879
4880         if (!hclge_fd_rule_exist(hdev, fs->location)) {
4881                 dev_err(&hdev->pdev->dev,
4882                         "Delete fail, rule %d is inexistent\n",
4883                         fs->location);
4884                 return -ENOENT;
4885         }
4886
4887         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4888                                    fs->location, NULL, false);
4889         if (ret)
4890                 return ret;
4891
4892         return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4893                                          false);
4894 }
4895
4896 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4897                                      bool clear_list)
4898 {
4899         struct hclge_vport *vport = hclge_get_vport(handle);
4900         struct hclge_dev *hdev = vport->back;
4901         struct hclge_fd_rule *rule;
4902         struct hlist_node *node;
4903
4904         if (!hnae3_dev_fd_supported(hdev))
4905                 return;
4906
4907         if (clear_list) {
4908                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4909                                           rule_node) {
4910                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4911                                              rule->location, NULL, false);
4912                         hlist_del(&rule->rule_node);
4913                         kfree(rule);
4914                         hdev->hclge_fd_rule_num--;
4915                 }
4916         } else {
4917                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4918                                           rule_node)
4919                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4920                                              rule->location, NULL, false);
4921         }
4922 }
4923
4924 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4925 {
4926         struct hclge_vport *vport = hclge_get_vport(handle);
4927         struct hclge_dev *hdev = vport->back;
4928         struct hclge_fd_rule *rule;
4929         struct hlist_node *node;
4930         int ret;
4931
4932         /* Return ok here, because reset error handling will check this
4933          * return value. If error is returned here, the reset process will
4934          * fail.
4935          */
4936         if (!hnae3_dev_fd_supported(hdev))
4937                 return 0;
4938
4939         /* if fd is disabled, should not restore it when reset */
4940         if (!hdev->fd_en)
4941                 return 0;
4942
4943         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4944                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4945                 if (!ret)
4946                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4947
4948                 if (ret) {
4949                         dev_warn(&hdev->pdev->dev,
4950                                  "Restore rule %d failed, remove it\n",
4951                                  rule->location);
4952                         hlist_del(&rule->rule_node);
4953                         kfree(rule);
4954                         hdev->hclge_fd_rule_num--;
4955                 }
4956         }
4957         return 0;
4958 }
4959
4960 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4961                                  struct ethtool_rxnfc *cmd)
4962 {
4963         struct hclge_vport *vport = hclge_get_vport(handle);
4964         struct hclge_dev *hdev = vport->back;
4965
4966         if (!hnae3_dev_fd_supported(hdev))
4967                 return -EOPNOTSUPP;
4968
4969         cmd->rule_cnt = hdev->hclge_fd_rule_num;
4970         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4971
4972         return 0;
4973 }
4974
4975 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4976                                   struct ethtool_rxnfc *cmd)
4977 {
4978         struct hclge_vport *vport = hclge_get_vport(handle);
4979         struct hclge_fd_rule *rule = NULL;
4980         struct hclge_dev *hdev = vport->back;
4981         struct ethtool_rx_flow_spec *fs;
4982         struct hlist_node *node2;
4983
4984         if (!hnae3_dev_fd_supported(hdev))
4985                 return -EOPNOTSUPP;
4986
4987         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4988
4989         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4990                 if (rule->location >= fs->location)
4991                         break;
4992         }
4993
4994         if (!rule || fs->location != rule->location)
4995                 return -ENOENT;
4996
4997         fs->flow_type = rule->flow_type;
4998         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4999         case SCTP_V4_FLOW:
5000         case TCP_V4_FLOW:
5001         case UDP_V4_FLOW:
5002                 fs->h_u.tcp_ip4_spec.ip4src =
5003                                 cpu_to_be32(rule->tuples.src_ip[3]);
5004                 fs->m_u.tcp_ip4_spec.ip4src =
5005                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5006                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5007
5008                 fs->h_u.tcp_ip4_spec.ip4dst =
5009                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5010                 fs->m_u.tcp_ip4_spec.ip4dst =
5011                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5012                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5013
5014                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5015                 fs->m_u.tcp_ip4_spec.psrc =
5016                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5017                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5018
5019                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5020                 fs->m_u.tcp_ip4_spec.pdst =
5021                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5022                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5023
5024                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5025                 fs->m_u.tcp_ip4_spec.tos =
5026                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5027                                 0 : rule->tuples_mask.ip_tos;
5028
5029                 break;
5030         case IP_USER_FLOW:
5031                 fs->h_u.usr_ip4_spec.ip4src =
5032                                 cpu_to_be32(rule->tuples.src_ip[3]);
5033                 fs->m_u.tcp_ip4_spec.ip4src =
5034                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5035                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5036
5037                 fs->h_u.usr_ip4_spec.ip4dst =
5038                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5039                 fs->m_u.usr_ip4_spec.ip4dst =
5040                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5041                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5042
5043                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5044                 fs->m_u.usr_ip4_spec.tos =
5045                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5046                                 0 : rule->tuples_mask.ip_tos;
5047
5048                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5049                 fs->m_u.usr_ip4_spec.proto =
5050                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5051                                 0 : rule->tuples_mask.ip_proto;
5052
5053                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5054
5055                 break;
5056         case SCTP_V6_FLOW:
5057         case TCP_V6_FLOW:
5058         case UDP_V6_FLOW:
5059                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5060                                   rule->tuples.src_ip, 4);
5061                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5062                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5063                 else
5064                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5065                                           rule->tuples_mask.src_ip, 4);
5066
5067                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5068                                   rule->tuples.dst_ip, 4);
5069                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5070                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5071                 else
5072                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5073                                           rule->tuples_mask.dst_ip, 4);
5074
5075                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5076                 fs->m_u.tcp_ip6_spec.psrc =
5077                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5078                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5079
5080                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5081                 fs->m_u.tcp_ip6_spec.pdst =
5082                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5083                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5084
5085                 break;
5086         case IPV6_USER_FLOW:
5087                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5088                                   rule->tuples.src_ip, 4);
5089                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5090                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5091                 else
5092                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5093                                           rule->tuples_mask.src_ip, 4);
5094
5095                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5096                                   rule->tuples.dst_ip, 4);
5097                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5098                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5099                 else
5100                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5101                                           rule->tuples_mask.dst_ip, 4);
5102
5103                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5104                 fs->m_u.usr_ip6_spec.l4_proto =
5105                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5106                                 0 : rule->tuples_mask.ip_proto;
5107
5108                 break;
5109         case ETHER_FLOW:
5110                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5111                                 rule->tuples.src_mac);
5112                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5113                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5114                 else
5115                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5116                                         rule->tuples_mask.src_mac);
5117
5118                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5119                                 rule->tuples.dst_mac);
5120                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5121                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5122                 else
5123                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5124                                         rule->tuples_mask.dst_mac);
5125
5126                 fs->h_u.ether_spec.h_proto =
5127                                 cpu_to_be16(rule->tuples.ether_proto);
5128                 fs->m_u.ether_spec.h_proto =
5129                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5130                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5131
5132                 break;
5133         default:
5134                 return -EOPNOTSUPP;
5135         }
5136
5137         if (fs->flow_type & FLOW_EXT) {
5138                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5139                 fs->m_ext.vlan_tci =
5140                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5141                                 cpu_to_be16(VLAN_VID_MASK) :
5142                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5143         }
5144
5145         if (fs->flow_type & FLOW_MAC_EXT) {
5146                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5147                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5148                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5149                 else
5150                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5151                                         rule->tuples_mask.dst_mac);
5152         }
5153
5154         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5155                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5156         } else {
5157                 u64 vf_id;
5158
5159                 fs->ring_cookie = rule->queue_id;
5160                 vf_id = rule->vf_id;
5161                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5162                 fs->ring_cookie |= vf_id;
5163         }
5164
5165         return 0;
5166 }
5167
5168 static int hclge_get_all_rules(struct hnae3_handle *handle,
5169                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5170 {
5171         struct hclge_vport *vport = hclge_get_vport(handle);
5172         struct hclge_dev *hdev = vport->back;
5173         struct hclge_fd_rule *rule;
5174         struct hlist_node *node2;
5175         int cnt = 0;
5176
5177         if (!hnae3_dev_fd_supported(hdev))
5178                 return -EOPNOTSUPP;
5179
5180         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5181
5182         hlist_for_each_entry_safe(rule, node2,
5183                                   &hdev->fd_rule_list, rule_node) {
5184                 if (cnt == cmd->rule_cnt)
5185                         return -EMSGSIZE;
5186
5187                 rule_locs[cnt] = rule->location;
5188                 cnt++;
5189         }
5190
5191         cmd->rule_cnt = cnt;
5192
5193         return 0;
5194 }
5195
5196 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5197 {
5198         struct hclge_vport *vport = hclge_get_vport(handle);
5199         struct hclge_dev *hdev = vport->back;
5200
5201         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5202                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5203 }
5204
5205 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5206 {
5207         struct hclge_vport *vport = hclge_get_vport(handle);
5208         struct hclge_dev *hdev = vport->back;
5209
5210         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5211 }
5212
5213 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5214 {
5215         struct hclge_vport *vport = hclge_get_vport(handle);
5216         struct hclge_dev *hdev = vport->back;
5217
5218         return hdev->reset_count;
5219 }
5220
5221 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5222 {
5223         struct hclge_vport *vport = hclge_get_vport(handle);
5224         struct hclge_dev *hdev = vport->back;
5225
5226         hdev->fd_en = enable;
5227         if (!enable)
5228                 hclge_del_all_fd_entries(handle, false);
5229         else
5230                 hclge_restore_fd_entries(handle);
5231 }
5232
5233 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5234 {
5235         struct hclge_desc desc;
5236         struct hclge_config_mac_mode_cmd *req =
5237                 (struct hclge_config_mac_mode_cmd *)desc.data;
5238         u32 loop_en = 0;
5239         int ret;
5240
5241         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5242         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5243         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5244         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5245         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5246         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5247         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5248         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5249         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5250         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5251         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5252         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5253         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5254         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5255         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5256         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5257
5258         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5259         if (ret)
5260                 dev_err(&hdev->pdev->dev,
5261                         "mac enable fail, ret =%d.\n", ret);
5262 }
5263
5264 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5265 {
5266         struct hclge_config_mac_mode_cmd *req;
5267         struct hclge_desc desc;
5268         u32 loop_en;
5269         int ret;
5270
5271         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5272         /* 1 Read out the MAC mode config at first */
5273         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5274         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5275         if (ret) {
5276                 dev_err(&hdev->pdev->dev,
5277                         "mac loopback get fail, ret =%d.\n", ret);
5278                 return ret;
5279         }
5280
5281         /* 2 Then setup the loopback flag */
5282         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5283         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5284         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5285         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5286
5287         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5288
5289         /* 3 Config mac work mode with loopback flag
5290          * and its original configure parameters
5291          */
5292         hclge_cmd_reuse_desc(&desc, false);
5293         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5294         if (ret)
5295                 dev_err(&hdev->pdev->dev,
5296                         "mac loopback set fail, ret =%d.\n", ret);
5297         return ret;
5298 }
5299
5300 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5301                                      enum hnae3_loop loop_mode)
5302 {
5303 #define HCLGE_SERDES_RETRY_MS   10
5304 #define HCLGE_SERDES_RETRY_NUM  100
5305
5306 #define HCLGE_MAC_LINK_STATUS_MS   20
5307 #define HCLGE_MAC_LINK_STATUS_NUM  10
5308 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5309 #define HCLGE_MAC_LINK_STATUS_UP   1
5310
5311         struct hclge_serdes_lb_cmd *req;
5312         struct hclge_desc desc;
5313         int mac_link_ret = 0;
5314         int ret, i = 0;
5315         u8 loop_mode_b;
5316
5317         req = (struct hclge_serdes_lb_cmd *)desc.data;
5318         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5319
5320         switch (loop_mode) {
5321         case HNAE3_LOOP_SERIAL_SERDES:
5322                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5323                 break;
5324         case HNAE3_LOOP_PARALLEL_SERDES:
5325                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5326                 break;
5327         default:
5328                 dev_err(&hdev->pdev->dev,
5329                         "unsupported serdes loopback mode %d\n", loop_mode);
5330                 return -ENOTSUPP;
5331         }
5332
5333         if (en) {
5334                 req->enable = loop_mode_b;
5335                 req->mask = loop_mode_b;
5336                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5337         } else {
5338                 req->mask = loop_mode_b;
5339                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5340         }
5341
5342         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5343         if (ret) {
5344                 dev_err(&hdev->pdev->dev,
5345                         "serdes loopback set fail, ret = %d\n", ret);
5346                 return ret;
5347         }
5348
5349         do {
5350                 msleep(HCLGE_SERDES_RETRY_MS);
5351                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5352                                            true);
5353                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5354                 if (ret) {
5355                         dev_err(&hdev->pdev->dev,
5356                                 "serdes loopback get, ret = %d\n", ret);
5357                         return ret;
5358                 }
5359         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5360                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5361
5362         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5363                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5364                 return -EBUSY;
5365         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5366                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5367                 return -EIO;
5368         }
5369
5370         hclge_cfg_mac_mode(hdev, en);
5371
5372         i = 0;
5373         do {
5374                 /* serdes Internal loopback, independent of the network cable.*/
5375                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5376                 ret = hclge_get_mac_link_status(hdev);
5377                 if (ret == mac_link_ret)
5378                         return 0;
5379         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5380
5381         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5382
5383         return -EBUSY;
5384 }
5385
5386 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5387                             int stream_id, bool enable)
5388 {
5389         struct hclge_desc desc;
5390         struct hclge_cfg_com_tqp_queue_cmd *req =
5391                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5392         int ret;
5393
5394         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5395         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5396         req->stream_id = cpu_to_le16(stream_id);
5397         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5398
5399         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5400         if (ret)
5401                 dev_err(&hdev->pdev->dev,
5402                         "Tqp enable fail, status =%d.\n", ret);
5403         return ret;
5404 }
5405
5406 static int hclge_set_loopback(struct hnae3_handle *handle,
5407                               enum hnae3_loop loop_mode, bool en)
5408 {
5409         struct hclge_vport *vport = hclge_get_vport(handle);
5410         struct hnae3_knic_private_info *kinfo;
5411         struct hclge_dev *hdev = vport->back;
5412         int i, ret;
5413
5414         switch (loop_mode) {
5415         case HNAE3_LOOP_APP:
5416                 ret = hclge_set_app_loopback(hdev, en);
5417                 break;
5418         case HNAE3_LOOP_SERIAL_SERDES:
5419         case HNAE3_LOOP_PARALLEL_SERDES:
5420                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5421                 break;
5422         default:
5423                 ret = -ENOTSUPP;
5424                 dev_err(&hdev->pdev->dev,
5425                         "loop_mode %d is not supported\n", loop_mode);
5426                 break;
5427         }
5428
5429         if (ret)
5430                 return ret;
5431
5432         kinfo = &vport->nic.kinfo;
5433         for (i = 0; i < kinfo->num_tqps; i++) {
5434                 ret = hclge_tqp_enable(hdev, i, 0, en);
5435                 if (ret)
5436                         return ret;
5437         }
5438
5439         return 0;
5440 }
5441
5442 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5443 {
5444         struct hclge_vport *vport = hclge_get_vport(handle);
5445         struct hnae3_knic_private_info *kinfo;
5446         struct hnae3_queue *queue;
5447         struct hclge_tqp *tqp;
5448         int i;
5449
5450         kinfo = &vport->nic.kinfo;
5451         for (i = 0; i < kinfo->num_tqps; i++) {
5452                 queue = handle->kinfo.tqp[i];
5453                 tqp = container_of(queue, struct hclge_tqp, q);
5454                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5455         }
5456 }
5457
5458 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5459 {
5460         struct hclge_vport *vport = hclge_get_vport(handle);
5461         struct hclge_dev *hdev = vport->back;
5462
5463         if (enable) {
5464                 mod_timer(&hdev->service_timer, jiffies + HZ);
5465         } else {
5466                 del_timer_sync(&hdev->service_timer);
5467                 cancel_work_sync(&hdev->service_task);
5468                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5469         }
5470 }
5471
5472 static int hclge_ae_start(struct hnae3_handle *handle)
5473 {
5474         struct hclge_vport *vport = hclge_get_vport(handle);
5475         struct hclge_dev *hdev = vport->back;
5476
5477         /* mac enable */
5478         hclge_cfg_mac_mode(hdev, true);
5479         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5480         hdev->hw.mac.link = 0;
5481
5482         /* reset tqp stats */
5483         hclge_reset_tqp_stats(handle);
5484
5485         hclge_mac_start_phy(hdev);
5486
5487         return 0;
5488 }
5489
5490 static void hclge_ae_stop(struct hnae3_handle *handle)
5491 {
5492         struct hclge_vport *vport = hclge_get_vport(handle);
5493         struct hclge_dev *hdev = vport->back;
5494         int i;
5495
5496         set_bit(HCLGE_STATE_DOWN, &hdev->state);
5497
5498         /* If it is not PF reset, the firmware will disable the MAC,
5499          * so it only need to stop phy here.
5500          */
5501         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5502             hdev->reset_type != HNAE3_FUNC_RESET) {
5503                 hclge_mac_stop_phy(hdev);
5504                 return;
5505         }
5506
5507         for (i = 0; i < handle->kinfo.num_tqps; i++)
5508                 hclge_reset_tqp(handle, i);
5509
5510         /* Mac disable */
5511         hclge_cfg_mac_mode(hdev, false);
5512
5513         hclge_mac_stop_phy(hdev);
5514
5515         /* reset tqp stats */
5516         hclge_reset_tqp_stats(handle);
5517         hclge_update_link_status(hdev);
5518 }
5519
5520 int hclge_vport_start(struct hclge_vport *vport)
5521 {
5522         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5523         vport->last_active_jiffies = jiffies;
5524         return 0;
5525 }
5526
5527 void hclge_vport_stop(struct hclge_vport *vport)
5528 {
5529         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5530 }
5531
5532 static int hclge_client_start(struct hnae3_handle *handle)
5533 {
5534         struct hclge_vport *vport = hclge_get_vport(handle);
5535
5536         return hclge_vport_start(vport);
5537 }
5538
5539 static void hclge_client_stop(struct hnae3_handle *handle)
5540 {
5541         struct hclge_vport *vport = hclge_get_vport(handle);
5542
5543         hclge_vport_stop(vport);
5544 }
5545
5546 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5547                                          u16 cmdq_resp, u8  resp_code,
5548                                          enum hclge_mac_vlan_tbl_opcode op)
5549 {
5550         struct hclge_dev *hdev = vport->back;
5551         int return_status = -EIO;
5552
5553         if (cmdq_resp) {
5554                 dev_err(&hdev->pdev->dev,
5555                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5556                         cmdq_resp);
5557                 return -EIO;
5558         }
5559
5560         if (op == HCLGE_MAC_VLAN_ADD) {
5561                 if ((!resp_code) || (resp_code == 1)) {
5562                         return_status = 0;
5563                 } else if (resp_code == 2) {
5564                         return_status = -ENOSPC;
5565                         dev_err(&hdev->pdev->dev,
5566                                 "add mac addr failed for uc_overflow.\n");
5567                 } else if (resp_code == 3) {
5568                         return_status = -ENOSPC;
5569                         dev_err(&hdev->pdev->dev,
5570                                 "add mac addr failed for mc_overflow.\n");
5571                 } else {
5572                         dev_err(&hdev->pdev->dev,
5573                                 "add mac addr failed for undefined, code=%d.\n",
5574                                 resp_code);
5575                 }
5576         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5577                 if (!resp_code) {
5578                         return_status = 0;
5579                 } else if (resp_code == 1) {
5580                         return_status = -ENOENT;
5581                         dev_dbg(&hdev->pdev->dev,
5582                                 "remove mac addr failed for miss.\n");
5583                 } else {
5584                         dev_err(&hdev->pdev->dev,
5585                                 "remove mac addr failed for undefined, code=%d.\n",
5586                                 resp_code);
5587                 }
5588         } else if (op == HCLGE_MAC_VLAN_LKUP) {
5589                 if (!resp_code) {
5590                         return_status = 0;
5591                 } else if (resp_code == 1) {
5592                         return_status = -ENOENT;
5593                         dev_dbg(&hdev->pdev->dev,
5594                                 "lookup mac addr failed for miss.\n");
5595                 } else {
5596                         dev_err(&hdev->pdev->dev,
5597                                 "lookup mac addr failed for undefined, code=%d.\n",
5598                                 resp_code);
5599                 }
5600         } else {
5601                 return_status = -EINVAL;
5602                 dev_err(&hdev->pdev->dev,
5603                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5604                         op);
5605         }
5606
5607         return return_status;
5608 }
5609
5610 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5611 {
5612         int word_num;
5613         int bit_num;
5614
5615         if (vfid > 255 || vfid < 0)
5616                 return -EIO;
5617
5618         if (vfid >= 0 && vfid <= 191) {
5619                 word_num = vfid / 32;
5620                 bit_num  = vfid % 32;
5621                 if (clr)
5622                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5623                 else
5624                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5625         } else {
5626                 word_num = (vfid - 192) / 32;
5627                 bit_num  = vfid % 32;
5628                 if (clr)
5629                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5630                 else
5631                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5632         }
5633
5634         return 0;
5635 }
5636
5637 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5638 {
5639 #define HCLGE_DESC_NUMBER 3
5640 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5641         int i, j;
5642
5643         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5644                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5645                         if (desc[i].data[j])
5646                                 return false;
5647
5648         return true;
5649 }
5650
5651 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5652                                    const u8 *addr, bool is_mc)
5653 {
5654         const unsigned char *mac_addr = addr;
5655         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5656                        (mac_addr[0]) | (mac_addr[1] << 8);
5657         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5658
5659         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5660         if (is_mc) {
5661                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5662                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5663         }
5664
5665         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5666         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5667 }
5668
5669 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5670                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
5671 {
5672         struct hclge_dev *hdev = vport->back;
5673         struct hclge_desc desc;
5674         u8 resp_code;
5675         u16 retval;
5676         int ret;
5677
5678         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5679
5680         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5681
5682         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5683         if (ret) {
5684                 dev_err(&hdev->pdev->dev,
5685                         "del mac addr failed for cmd_send, ret =%d.\n",
5686                         ret);
5687                 return ret;
5688         }
5689         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5690         retval = le16_to_cpu(desc.retval);
5691
5692         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5693                                              HCLGE_MAC_VLAN_REMOVE);
5694 }
5695
5696 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5697                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
5698                                      struct hclge_desc *desc,
5699                                      bool is_mc)
5700 {
5701         struct hclge_dev *hdev = vport->back;
5702         u8 resp_code;
5703         u16 retval;
5704         int ret;
5705
5706         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5707         if (is_mc) {
5708                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5709                 memcpy(desc[0].data,
5710                        req,
5711                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5712                 hclge_cmd_setup_basic_desc(&desc[1],
5713                                            HCLGE_OPC_MAC_VLAN_ADD,
5714                                            true);
5715                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5716                 hclge_cmd_setup_basic_desc(&desc[2],
5717                                            HCLGE_OPC_MAC_VLAN_ADD,
5718                                            true);
5719                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5720         } else {
5721                 memcpy(desc[0].data,
5722                        req,
5723                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5724                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5725         }
5726         if (ret) {
5727                 dev_err(&hdev->pdev->dev,
5728                         "lookup mac addr failed for cmd_send, ret =%d.\n",
5729                         ret);
5730                 return ret;
5731         }
5732         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5733         retval = le16_to_cpu(desc[0].retval);
5734
5735         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5736                                              HCLGE_MAC_VLAN_LKUP);
5737 }
5738
5739 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5740                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
5741                                   struct hclge_desc *mc_desc)
5742 {
5743         struct hclge_dev *hdev = vport->back;
5744         int cfg_status;
5745         u8 resp_code;
5746         u16 retval;
5747         int ret;
5748
5749         if (!mc_desc) {
5750                 struct hclge_desc desc;
5751
5752                 hclge_cmd_setup_basic_desc(&desc,
5753                                            HCLGE_OPC_MAC_VLAN_ADD,
5754                                            false);
5755                 memcpy(desc.data, req,
5756                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5757                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5758                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5759                 retval = le16_to_cpu(desc.retval);
5760
5761                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5762                                                            resp_code,
5763                                                            HCLGE_MAC_VLAN_ADD);
5764         } else {
5765                 hclge_cmd_reuse_desc(&mc_desc[0], false);
5766                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5767                 hclge_cmd_reuse_desc(&mc_desc[1], false);
5768                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5769                 hclge_cmd_reuse_desc(&mc_desc[2], false);
5770                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5771                 memcpy(mc_desc[0].data, req,
5772                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5773                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5774                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5775                 retval = le16_to_cpu(mc_desc[0].retval);
5776
5777                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5778                                                            resp_code,
5779                                                            HCLGE_MAC_VLAN_ADD);
5780         }
5781
5782         if (ret) {
5783                 dev_err(&hdev->pdev->dev,
5784                         "add mac addr failed for cmd_send, ret =%d.\n",
5785                         ret);
5786                 return ret;
5787         }
5788
5789         return cfg_status;
5790 }
5791
5792 static int hclge_init_umv_space(struct hclge_dev *hdev)
5793 {
5794         u16 allocated_size = 0;
5795         int ret;
5796
5797         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5798                                   true);
5799         if (ret)
5800                 return ret;
5801
5802         if (allocated_size < hdev->wanted_umv_size)
5803                 dev_warn(&hdev->pdev->dev,
5804                          "Alloc umv space failed, want %d, get %d\n",
5805                          hdev->wanted_umv_size, allocated_size);
5806
5807         mutex_init(&hdev->umv_mutex);
5808         hdev->max_umv_size = allocated_size;
5809         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5810         hdev->share_umv_size = hdev->priv_umv_size +
5811                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5812
5813         return 0;
5814 }
5815
5816 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5817 {
5818         int ret;
5819
5820         if (hdev->max_umv_size > 0) {
5821                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5822                                           false);
5823                 if (ret)
5824                         return ret;
5825                 hdev->max_umv_size = 0;
5826         }
5827         mutex_destroy(&hdev->umv_mutex);
5828
5829         return 0;
5830 }
5831
5832 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5833                                u16 *allocated_size, bool is_alloc)
5834 {
5835         struct hclge_umv_spc_alc_cmd *req;
5836         struct hclge_desc desc;
5837         int ret;
5838
5839         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5840         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5841         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5842         req->space_size = cpu_to_le32(space_size);
5843
5844         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5845         if (ret) {
5846                 dev_err(&hdev->pdev->dev,
5847                         "%s umv space failed for cmd_send, ret =%d\n",
5848                         is_alloc ? "allocate" : "free", ret);
5849                 return ret;
5850         }
5851
5852         if (is_alloc && allocated_size)
5853                 *allocated_size = le32_to_cpu(desc.data[1]);
5854
5855         return 0;
5856 }
5857
5858 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5859 {
5860         struct hclge_vport *vport;
5861         int i;
5862
5863         for (i = 0; i < hdev->num_alloc_vport; i++) {
5864                 vport = &hdev->vport[i];
5865                 vport->used_umv_num = 0;
5866         }
5867
5868         mutex_lock(&hdev->umv_mutex);
5869         hdev->share_umv_size = hdev->priv_umv_size +
5870                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5871         mutex_unlock(&hdev->umv_mutex);
5872 }
5873
5874 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5875 {
5876         struct hclge_dev *hdev = vport->back;
5877         bool is_full;
5878
5879         mutex_lock(&hdev->umv_mutex);
5880         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5881                    hdev->share_umv_size == 0);
5882         mutex_unlock(&hdev->umv_mutex);
5883
5884         return is_full;
5885 }
5886
5887 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5888 {
5889         struct hclge_dev *hdev = vport->back;
5890
5891         mutex_lock(&hdev->umv_mutex);
5892         if (is_free) {
5893                 if (vport->used_umv_num > hdev->priv_umv_size)
5894                         hdev->share_umv_size++;
5895
5896                 if (vport->used_umv_num > 0)
5897                         vport->used_umv_num--;
5898         } else {
5899                 if (vport->used_umv_num >= hdev->priv_umv_size &&
5900                     hdev->share_umv_size > 0)
5901                         hdev->share_umv_size--;
5902                 vport->used_umv_num++;
5903         }
5904         mutex_unlock(&hdev->umv_mutex);
5905 }
5906
5907 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5908                              const unsigned char *addr)
5909 {
5910         struct hclge_vport *vport = hclge_get_vport(handle);
5911
5912         return hclge_add_uc_addr_common(vport, addr);
5913 }
5914
5915 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5916                              const unsigned char *addr)
5917 {
5918         struct hclge_dev *hdev = vport->back;
5919         struct hclge_mac_vlan_tbl_entry_cmd req;
5920         struct hclge_desc desc;
5921         u16 egress_port = 0;
5922         int ret;
5923
5924         /* mac addr check */
5925         if (is_zero_ether_addr(addr) ||
5926             is_broadcast_ether_addr(addr) ||
5927             is_multicast_ether_addr(addr)) {
5928                 dev_err(&hdev->pdev->dev,
5929                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5930                          addr,
5931                          is_zero_ether_addr(addr),
5932                          is_broadcast_ether_addr(addr),
5933                          is_multicast_ether_addr(addr));
5934                 return -EINVAL;
5935         }
5936
5937         memset(&req, 0, sizeof(req));
5938
5939         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5940                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5941
5942         req.egress_port = cpu_to_le16(egress_port);
5943
5944         hclge_prepare_mac_addr(&req, addr, false);
5945
5946         /* Lookup the mac address in the mac_vlan table, and add
5947          * it if the entry is inexistent. Repeated unicast entry
5948          * is not allowed in the mac vlan table.
5949          */
5950         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5951         if (ret == -ENOENT) {
5952                 if (!hclge_is_umv_space_full(vport)) {
5953                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5954                         if (!ret)
5955                                 hclge_update_umv_space(vport, false);
5956                         return ret;
5957                 }
5958
5959                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5960                         hdev->priv_umv_size);
5961
5962                 return -ENOSPC;
5963         }
5964
5965         /* check if we just hit the duplicate */
5966         if (!ret) {
5967                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
5968                          vport->vport_id, addr);
5969                 return 0;
5970         }
5971
5972         dev_err(&hdev->pdev->dev,
5973                 "PF failed to add unicast entry(%pM) in the MAC table\n",
5974                 addr);
5975
5976         return ret;
5977 }
5978
5979 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5980                             const unsigned char *addr)
5981 {
5982         struct hclge_vport *vport = hclge_get_vport(handle);
5983
5984         return hclge_rm_uc_addr_common(vport, addr);
5985 }
5986
5987 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5988                             const unsigned char *addr)
5989 {
5990         struct hclge_dev *hdev = vport->back;
5991         struct hclge_mac_vlan_tbl_entry_cmd req;
5992         int ret;
5993
5994         /* mac addr check */
5995         if (is_zero_ether_addr(addr) ||
5996             is_broadcast_ether_addr(addr) ||
5997             is_multicast_ether_addr(addr)) {
5998                 dev_dbg(&hdev->pdev->dev,
5999                         "Remove mac err! invalid mac:%pM.\n",
6000                          addr);
6001                 return -EINVAL;
6002         }
6003
6004         memset(&req, 0, sizeof(req));
6005         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6006         hclge_prepare_mac_addr(&req, addr, false);
6007         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6008         if (!ret)
6009                 hclge_update_umv_space(vport, true);
6010
6011         return ret;
6012 }
6013
6014 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6015                              const unsigned char *addr)
6016 {
6017         struct hclge_vport *vport = hclge_get_vport(handle);
6018
6019         return hclge_add_mc_addr_common(vport, addr);
6020 }
6021
6022 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6023                              const unsigned char *addr)
6024 {
6025         struct hclge_dev *hdev = vport->back;
6026         struct hclge_mac_vlan_tbl_entry_cmd req;
6027         struct hclge_desc desc[3];
6028         int status;
6029
6030         /* mac addr check */
6031         if (!is_multicast_ether_addr(addr)) {
6032                 dev_err(&hdev->pdev->dev,
6033                         "Add mc mac err! invalid mac:%pM.\n",
6034                          addr);
6035                 return -EINVAL;
6036         }
6037         memset(&req, 0, sizeof(req));
6038         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6039         hclge_prepare_mac_addr(&req, addr, true);
6040         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6041         if (!status) {
6042                 /* This mac addr exist, update VFID for it */
6043                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6044                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6045         } else {
6046                 /* This mac addr do not exist, add new entry for it */
6047                 memset(desc[0].data, 0, sizeof(desc[0].data));
6048                 memset(desc[1].data, 0, sizeof(desc[0].data));
6049                 memset(desc[2].data, 0, sizeof(desc[0].data));
6050                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6051                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6052         }
6053
6054         if (status == -ENOSPC)
6055                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6056
6057         return status;
6058 }
6059
6060 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6061                             const unsigned char *addr)
6062 {
6063         struct hclge_vport *vport = hclge_get_vport(handle);
6064
6065         return hclge_rm_mc_addr_common(vport, addr);
6066 }
6067
6068 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6069                             const unsigned char *addr)
6070 {
6071         struct hclge_dev *hdev = vport->back;
6072         struct hclge_mac_vlan_tbl_entry_cmd req;
6073         enum hclge_cmd_status status;
6074         struct hclge_desc desc[3];
6075
6076         /* mac addr check */
6077         if (!is_multicast_ether_addr(addr)) {
6078                 dev_dbg(&hdev->pdev->dev,
6079                         "Remove mc mac err! invalid mac:%pM.\n",
6080                          addr);
6081                 return -EINVAL;
6082         }
6083
6084         memset(&req, 0, sizeof(req));
6085         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6086         hclge_prepare_mac_addr(&req, addr, true);
6087         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6088         if (!status) {
6089                 /* This mac addr exist, remove this handle's VFID for it */
6090                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6091
6092                 if (hclge_is_all_function_id_zero(desc))
6093                         /* All the vfid is zero, so need to delete this entry */
6094                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6095                 else
6096                         /* Not all the vfid is zero, update the vfid */
6097                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6098
6099         } else {
6100                 /* Maybe this mac address is in mta table, but it cannot be
6101                  * deleted here because an entry of mta represents an address
6102                  * range rather than a specific address. the delete action to
6103                  * all entries will take effect in update_mta_status called by
6104                  * hns3_nic_set_rx_mode.
6105                  */
6106                 status = 0;
6107         }
6108
6109         return status;
6110 }
6111
6112 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6113                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6114 {
6115         struct hclge_vport_mac_addr_cfg *mac_cfg;
6116         struct list_head *list;
6117
6118         if (!vport->vport_id)
6119                 return;
6120
6121         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6122         if (!mac_cfg)
6123                 return;
6124
6125         mac_cfg->hd_tbl_status = true;
6126         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6127
6128         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6129                &vport->uc_mac_list : &vport->mc_mac_list;
6130
6131         list_add_tail(&mac_cfg->node, list);
6132 }
6133
6134 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6135                               bool is_write_tbl,
6136                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6137 {
6138         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6139         struct list_head *list;
6140         bool uc_flag, mc_flag;
6141
6142         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6143                &vport->uc_mac_list : &vport->mc_mac_list;
6144
6145         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6146         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6147
6148         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6149                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6150                         if (uc_flag && mac_cfg->hd_tbl_status)
6151                                 hclge_rm_uc_addr_common(vport, mac_addr);
6152
6153                         if (mc_flag && mac_cfg->hd_tbl_status)
6154                                 hclge_rm_mc_addr_common(vport, mac_addr);
6155
6156                         list_del(&mac_cfg->node);
6157                         kfree(mac_cfg);
6158                         break;
6159                 }
6160         }
6161 }
6162
6163 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6164                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6165 {
6166         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6167         struct list_head *list;
6168
6169         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6170                &vport->uc_mac_list : &vport->mc_mac_list;
6171
6172         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6173                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6174                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6175
6176                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6177                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6178
6179                 mac_cfg->hd_tbl_status = false;
6180                 if (is_del_list) {
6181                         list_del(&mac_cfg->node);
6182                         kfree(mac_cfg);
6183                 }
6184         }
6185 }
6186
6187 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6188 {
6189         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6190         struct hclge_vport *vport;
6191         int i;
6192
6193         mutex_lock(&hdev->vport_cfg_mutex);
6194         for (i = 0; i < hdev->num_alloc_vport; i++) {
6195                 vport = &hdev->vport[i];
6196                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6197                         list_del(&mac->node);
6198                         kfree(mac);
6199                 }
6200
6201                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6202                         list_del(&mac->node);
6203                         kfree(mac);
6204                 }
6205         }
6206         mutex_unlock(&hdev->vport_cfg_mutex);
6207 }
6208
6209 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6210                                               u16 cmdq_resp, u8 resp_code)
6211 {
6212 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6213 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6214 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6215 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6216
6217         int return_status;
6218
6219         if (cmdq_resp) {
6220                 dev_err(&hdev->pdev->dev,
6221                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6222                         cmdq_resp);
6223                 return -EIO;
6224         }
6225
6226         switch (resp_code) {
6227         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6228         case HCLGE_ETHERTYPE_ALREADY_ADD:
6229                 return_status = 0;
6230                 break;
6231         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6232                 dev_err(&hdev->pdev->dev,
6233                         "add mac ethertype failed for manager table overflow.\n");
6234                 return_status = -EIO;
6235                 break;
6236         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6237                 dev_err(&hdev->pdev->dev,
6238                         "add mac ethertype failed for key conflict.\n");
6239                 return_status = -EIO;
6240                 break;
6241         default:
6242                 dev_err(&hdev->pdev->dev,
6243                         "add mac ethertype failed for undefined, code=%d.\n",
6244                         resp_code);
6245                 return_status = -EIO;
6246         }
6247
6248         return return_status;
6249 }
6250
6251 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6252                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6253 {
6254         struct hclge_desc desc;
6255         u8 resp_code;
6256         u16 retval;
6257         int ret;
6258
6259         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6260         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6261
6262         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6263         if (ret) {
6264                 dev_err(&hdev->pdev->dev,
6265                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6266                         ret);
6267                 return ret;
6268         }
6269
6270         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6271         retval = le16_to_cpu(desc.retval);
6272
6273         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6274 }
6275
6276 static int init_mgr_tbl(struct hclge_dev *hdev)
6277 {
6278         int ret;
6279         int i;
6280
6281         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6282                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6283                 if (ret) {
6284                         dev_err(&hdev->pdev->dev,
6285                                 "add mac ethertype failed, ret =%d.\n",
6286                                 ret);
6287                         return ret;
6288                 }
6289         }
6290
6291         return 0;
6292 }
6293
6294 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6295 {
6296         struct hclge_vport *vport = hclge_get_vport(handle);
6297         struct hclge_dev *hdev = vport->back;
6298
6299         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6300 }
6301
6302 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6303                               bool is_first)
6304 {
6305         const unsigned char *new_addr = (const unsigned char *)p;
6306         struct hclge_vport *vport = hclge_get_vport(handle);
6307         struct hclge_dev *hdev = vport->back;
6308         int ret;
6309
6310         /* mac addr check */
6311         if (is_zero_ether_addr(new_addr) ||
6312             is_broadcast_ether_addr(new_addr) ||
6313             is_multicast_ether_addr(new_addr)) {
6314                 dev_err(&hdev->pdev->dev,
6315                         "Change uc mac err! invalid mac:%p.\n",
6316                          new_addr);
6317                 return -EINVAL;
6318         }
6319
6320         if ((!is_first || is_kdump_kernel()) &&
6321             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6322                 dev_warn(&hdev->pdev->dev,
6323                          "remove old uc mac address fail.\n");
6324
6325         ret = hclge_add_uc_addr(handle, new_addr);
6326         if (ret) {
6327                 dev_err(&hdev->pdev->dev,
6328                         "add uc mac address fail, ret =%d.\n",
6329                         ret);
6330
6331                 if (!is_first &&
6332                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6333                         dev_err(&hdev->pdev->dev,
6334                                 "restore uc mac address fail.\n");
6335
6336                 return -EIO;
6337         }
6338
6339         ret = hclge_pause_addr_cfg(hdev, new_addr);
6340         if (ret) {
6341                 dev_err(&hdev->pdev->dev,
6342                         "configure mac pause address fail, ret =%d.\n",
6343                         ret);
6344                 return -EIO;
6345         }
6346
6347         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6348
6349         return 0;
6350 }
6351
6352 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6353                           int cmd)
6354 {
6355         struct hclge_vport *vport = hclge_get_vport(handle);
6356         struct hclge_dev *hdev = vport->back;
6357
6358         if (!hdev->hw.mac.phydev)
6359                 return -EOPNOTSUPP;
6360
6361         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6362 }
6363
6364 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6365                                       u8 fe_type, bool filter_en, u8 vf_id)
6366 {
6367         struct hclge_vlan_filter_ctrl_cmd *req;
6368         struct hclge_desc desc;
6369         int ret;
6370
6371         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6372
6373         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6374         req->vlan_type = vlan_type;
6375         req->vlan_fe = filter_en ? fe_type : 0;
6376         req->vf_id = vf_id;
6377
6378         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6379         if (ret)
6380                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6381                         ret);
6382
6383         return ret;
6384 }
6385
6386 #define HCLGE_FILTER_TYPE_VF            0
6387 #define HCLGE_FILTER_TYPE_PORT          1
6388 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
6389 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
6390 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
6391 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
6392 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
6393 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
6394                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6395 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
6396                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6397
6398 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6399 {
6400         struct hclge_vport *vport = hclge_get_vport(handle);
6401         struct hclge_dev *hdev = vport->back;
6402
6403         if (hdev->pdev->revision >= 0x21) {
6404                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6405                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
6406                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6407                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
6408         } else {
6409                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6410                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6411                                            0);
6412         }
6413         if (enable)
6414                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6415         else
6416                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6417 }
6418
6419 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6420                                     bool is_kill, u16 vlan, u8 qos,
6421                                     __be16 proto)
6422 {
6423 #define HCLGE_MAX_VF_BYTES  16
6424         struct hclge_vlan_filter_vf_cfg_cmd *req0;
6425         struct hclge_vlan_filter_vf_cfg_cmd *req1;
6426         struct hclge_desc desc[2];
6427         u8 vf_byte_val;
6428         u8 vf_byte_off;
6429         int ret;
6430
6431         hclge_cmd_setup_basic_desc(&desc[0],
6432                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6433         hclge_cmd_setup_basic_desc(&desc[1],
6434                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6435
6436         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6437
6438         vf_byte_off = vfid / 8;
6439         vf_byte_val = 1 << (vfid % 8);
6440
6441         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6442         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6443
6444         req0->vlan_id  = cpu_to_le16(vlan);
6445         req0->vlan_cfg = is_kill;
6446
6447         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6448                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6449         else
6450                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6451
6452         ret = hclge_cmd_send(&hdev->hw, desc, 2);
6453         if (ret) {
6454                 dev_err(&hdev->pdev->dev,
6455                         "Send vf vlan command fail, ret =%d.\n",
6456                         ret);
6457                 return ret;
6458         }
6459
6460         if (!is_kill) {
6461 #define HCLGE_VF_VLAN_NO_ENTRY  2
6462                 if (!req0->resp_code || req0->resp_code == 1)
6463                         return 0;
6464
6465                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6466                         dev_warn(&hdev->pdev->dev,
6467                                  "vf vlan table is full, vf vlan filter is disabled\n");
6468                         return 0;
6469                 }
6470
6471                 dev_err(&hdev->pdev->dev,
6472                         "Add vf vlan filter fail, ret =%d.\n",
6473                         req0->resp_code);
6474         } else {
6475 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
6476                 if (!req0->resp_code)
6477                         return 0;
6478
6479                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6480                         dev_warn(&hdev->pdev->dev,
6481                                  "vlan %d filter is not in vf vlan table\n",
6482                                  vlan);
6483                         return 0;
6484                 }
6485
6486                 dev_err(&hdev->pdev->dev,
6487                         "Kill vf vlan filter fail, ret =%d.\n",
6488                         req0->resp_code);
6489         }
6490
6491         return -EIO;
6492 }
6493
6494 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6495                                       u16 vlan_id, bool is_kill)
6496 {
6497         struct hclge_vlan_filter_pf_cfg_cmd *req;
6498         struct hclge_desc desc;
6499         u8 vlan_offset_byte_val;
6500         u8 vlan_offset_byte;
6501         u8 vlan_offset_160;
6502         int ret;
6503
6504         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6505
6506         vlan_offset_160 = vlan_id / 160;
6507         vlan_offset_byte = (vlan_id % 160) / 8;
6508         vlan_offset_byte_val = 1 << (vlan_id % 8);
6509
6510         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6511         req->vlan_offset = vlan_offset_160;
6512         req->vlan_cfg = is_kill;
6513         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6514
6515         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6516         if (ret)
6517                 dev_err(&hdev->pdev->dev,
6518                         "port vlan command, send fail, ret =%d.\n", ret);
6519         return ret;
6520 }
6521
6522 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6523                                     u16 vport_id, u16 vlan_id, u8 qos,
6524                                     bool is_kill)
6525 {
6526         u16 vport_idx, vport_num = 0;
6527         int ret;
6528
6529         if (is_kill && !vlan_id)
6530                 return 0;
6531
6532         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6533                                        0, proto);
6534         if (ret) {
6535                 dev_err(&hdev->pdev->dev,
6536                         "Set %d vport vlan filter config fail, ret =%d.\n",
6537                         vport_id, ret);
6538                 return ret;
6539         }
6540
6541         /* vlan 0 may be added twice when 8021q module is enabled */
6542         if (!is_kill && !vlan_id &&
6543             test_bit(vport_id, hdev->vlan_table[vlan_id]))
6544                 return 0;
6545
6546         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6547                 dev_err(&hdev->pdev->dev,
6548                         "Add port vlan failed, vport %d is already in vlan %d\n",
6549                         vport_id, vlan_id);
6550                 return -EINVAL;
6551         }
6552
6553         if (is_kill &&
6554             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6555                 dev_err(&hdev->pdev->dev,
6556                         "Delete port vlan failed, vport %d is not in vlan %d\n",
6557                         vport_id, vlan_id);
6558                 return -EINVAL;
6559         }
6560
6561         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6562                 vport_num++;
6563
6564         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6565                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6566                                                  is_kill);
6567
6568         return ret;
6569 }
6570
6571 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6572                           u16 vlan_id, bool is_kill)
6573 {
6574         struct hclge_vport *vport = hclge_get_vport(handle);
6575         struct hclge_dev *hdev = vport->back;
6576
6577         return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6578                                         0, is_kill);
6579 }
6580
6581 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6582                                     u16 vlan, u8 qos, __be16 proto)
6583 {
6584         struct hclge_vport *vport = hclge_get_vport(handle);
6585         struct hclge_dev *hdev = vport->back;
6586
6587         if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6588                 return -EINVAL;
6589         if (proto != htons(ETH_P_8021Q))
6590                 return -EPROTONOSUPPORT;
6591
6592         return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6593 }
6594
6595 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6596 {
6597         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6598         struct hclge_vport_vtag_tx_cfg_cmd *req;
6599         struct hclge_dev *hdev = vport->back;
6600         struct hclge_desc desc;
6601         int status;
6602
6603         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6604
6605         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6606         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6607         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6608         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6609                       vcfg->accept_tag1 ? 1 : 0);
6610         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6611                       vcfg->accept_untag1 ? 1 : 0);
6612         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6613                       vcfg->accept_tag2 ? 1 : 0);
6614         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6615                       vcfg->accept_untag2 ? 1 : 0);
6616         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6617                       vcfg->insert_tag1_en ? 1 : 0);
6618         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6619                       vcfg->insert_tag2_en ? 1 : 0);
6620         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6621
6622         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6623         req->vf_bitmap[req->vf_offset] =
6624                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6625
6626         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6627         if (status)
6628                 dev_err(&hdev->pdev->dev,
6629                         "Send port txvlan cfg command fail, ret =%d\n",
6630                         status);
6631
6632         return status;
6633 }
6634
6635 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6636 {
6637         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6638         struct hclge_vport_vtag_rx_cfg_cmd *req;
6639         struct hclge_dev *hdev = vport->back;
6640         struct hclge_desc desc;
6641         int status;
6642
6643         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6644
6645         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6646         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6647                       vcfg->strip_tag1_en ? 1 : 0);
6648         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6649                       vcfg->strip_tag2_en ? 1 : 0);
6650         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6651                       vcfg->vlan1_vlan_prionly ? 1 : 0);
6652         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6653                       vcfg->vlan2_vlan_prionly ? 1 : 0);
6654
6655         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6656         req->vf_bitmap[req->vf_offset] =
6657                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6658
6659         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6660         if (status)
6661                 dev_err(&hdev->pdev->dev,
6662                         "Send port rxvlan cfg command fail, ret =%d\n",
6663                         status);
6664
6665         return status;
6666 }
6667
6668 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6669 {
6670         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6671         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6672         struct hclge_desc desc;
6673         int status;
6674
6675         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6676         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6677         rx_req->ot_fst_vlan_type =
6678                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6679         rx_req->ot_sec_vlan_type =
6680                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6681         rx_req->in_fst_vlan_type =
6682                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6683         rx_req->in_sec_vlan_type =
6684                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6685
6686         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6687         if (status) {
6688                 dev_err(&hdev->pdev->dev,
6689                         "Send rxvlan protocol type command fail, ret =%d\n",
6690                         status);
6691                 return status;
6692         }
6693
6694         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6695
6696         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6697         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6698         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6699
6700         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6701         if (status)
6702                 dev_err(&hdev->pdev->dev,
6703                         "Send txvlan protocol type command fail, ret =%d\n",
6704                         status);
6705
6706         return status;
6707 }
6708
6709 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6710 {
6711 #define HCLGE_DEF_VLAN_TYPE             0x8100
6712
6713         struct hnae3_handle *handle = &hdev->vport[0].nic;
6714         struct hclge_vport *vport;
6715         int ret;
6716         int i;
6717
6718         if (hdev->pdev->revision >= 0x21) {
6719                 /* for revision 0x21, vf vlan filter is per function */
6720                 for (i = 0; i < hdev->num_alloc_vport; i++) {
6721                         vport = &hdev->vport[i];
6722                         ret = hclge_set_vlan_filter_ctrl(hdev,
6723                                                          HCLGE_FILTER_TYPE_VF,
6724                                                          HCLGE_FILTER_FE_EGRESS,
6725                                                          true,
6726                                                          vport->vport_id);
6727                         if (ret)
6728                                 return ret;
6729                 }
6730
6731                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6732                                                  HCLGE_FILTER_FE_INGRESS, true,
6733                                                  0);
6734                 if (ret)
6735                         return ret;
6736         } else {
6737                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6738                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
6739                                                  true, 0);
6740                 if (ret)
6741                         return ret;
6742         }
6743
6744         handle->netdev_flags |= HNAE3_VLAN_FLTR;
6745
6746         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6747         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6748         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6749         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6750         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6751         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6752
6753         ret = hclge_set_vlan_protocol_type(hdev);
6754         if (ret)
6755                 return ret;
6756
6757         for (i = 0; i < hdev->num_alloc_vport; i++) {
6758                 vport = &hdev->vport[i];
6759                 vport->txvlan_cfg.accept_tag1 = true;
6760                 vport->txvlan_cfg.accept_untag1 = true;
6761
6762                 /* accept_tag2 and accept_untag2 are not supported on
6763                  * pdev revision(0x20), new revision support them. The
6764                  * value of this two fields will not return error when driver
6765                  * send command to fireware in revision(0x20).
6766                  * This two fields can not configured by user.
6767                  */
6768                 vport->txvlan_cfg.accept_tag2 = true;
6769                 vport->txvlan_cfg.accept_untag2 = true;
6770
6771                 vport->txvlan_cfg.insert_tag1_en = false;
6772                 vport->txvlan_cfg.insert_tag2_en = false;
6773                 vport->txvlan_cfg.default_tag1 = 0;
6774                 vport->txvlan_cfg.default_tag2 = 0;
6775
6776                 ret = hclge_set_vlan_tx_offload_cfg(vport);
6777                 if (ret)
6778                         return ret;
6779
6780                 vport->rxvlan_cfg.strip_tag1_en = false;
6781                 vport->rxvlan_cfg.strip_tag2_en = true;
6782                 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6783                 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6784
6785                 ret = hclge_set_vlan_rx_offload_cfg(vport);
6786                 if (ret)
6787                         return ret;
6788         }
6789
6790         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6791 }
6792
6793 void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
6794 {
6795         struct hclge_vport_vlan_cfg *vlan;
6796
6797         /* vlan 0 is reserved */
6798         if (!vlan_id)
6799                 return;
6800
6801         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6802         if (!vlan)
6803                 return;
6804
6805         vlan->hd_tbl_status = true;
6806         vlan->vlan_id = vlan_id;
6807
6808         list_add_tail(&vlan->node, &vport->vlan_list);
6809 }
6810
6811 void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6812                                bool is_write_tbl)
6813 {
6814         struct hclge_vport_vlan_cfg *vlan, *tmp;
6815         struct hclge_dev *hdev = vport->back;
6816
6817         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6818                 if (vlan->vlan_id == vlan_id) {
6819                         if (is_write_tbl && vlan->hd_tbl_status)
6820                                 hclge_set_vlan_filter_hw(hdev,
6821                                                          htons(ETH_P_8021Q),
6822                                                          vport->vport_id,
6823                                                          vlan_id, 0,
6824                                                          true);
6825
6826                         list_del(&vlan->node);
6827                         kfree(vlan);
6828                         break;
6829                 }
6830         }
6831 }
6832
6833 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6834 {
6835         struct hclge_vport_vlan_cfg *vlan, *tmp;
6836         struct hclge_dev *hdev = vport->back;
6837
6838         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6839                 if (vlan->hd_tbl_status)
6840                         hclge_set_vlan_filter_hw(hdev,
6841                                                  htons(ETH_P_8021Q),
6842                                                  vport->vport_id,
6843                                                  vlan->vlan_id, 0,
6844                                                  true);
6845
6846                 vlan->hd_tbl_status = false;
6847                 if (is_del_list) {
6848                         list_del(&vlan->node);
6849                         kfree(vlan);
6850                 }
6851         }
6852 }
6853
6854 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6855 {
6856         struct hclge_vport_vlan_cfg *vlan, *tmp;
6857         struct hclge_vport *vport;
6858         int i;
6859
6860         mutex_lock(&hdev->vport_cfg_mutex);
6861         for (i = 0; i < hdev->num_alloc_vport; i++) {
6862                 vport = &hdev->vport[i];
6863                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6864                         list_del(&vlan->node);
6865                         kfree(vlan);
6866                 }
6867         }
6868         mutex_unlock(&hdev->vport_cfg_mutex);
6869 }
6870
6871 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6872 {
6873         struct hclge_vport *vport = hclge_get_vport(handle);
6874
6875         vport->rxvlan_cfg.strip_tag1_en = false;
6876         vport->rxvlan_cfg.strip_tag2_en = enable;
6877         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6878         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6879
6880         return hclge_set_vlan_rx_offload_cfg(vport);
6881 }
6882
6883 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6884 {
6885         struct hclge_config_max_frm_size_cmd *req;
6886         struct hclge_desc desc;
6887
6888         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6889
6890         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6891         req->max_frm_size = cpu_to_le16(new_mps);
6892         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6893
6894         return hclge_cmd_send(&hdev->hw, &desc, 1);
6895 }
6896
6897 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6898 {
6899         struct hclge_vport *vport = hclge_get_vport(handle);
6900
6901         return hclge_set_vport_mtu(vport, new_mtu);
6902 }
6903
6904 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6905 {
6906         struct hclge_dev *hdev = vport->back;
6907         int i, max_frm_size, ret = 0;
6908
6909         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6910         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6911             max_frm_size > HCLGE_MAC_MAX_FRAME)
6912                 return -EINVAL;
6913
6914         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6915         mutex_lock(&hdev->vport_lock);
6916         /* VF's mps must fit within hdev->mps */
6917         if (vport->vport_id && max_frm_size > hdev->mps) {
6918                 mutex_unlock(&hdev->vport_lock);
6919                 return -EINVAL;
6920         } else if (vport->vport_id) {
6921                 vport->mps = max_frm_size;
6922                 mutex_unlock(&hdev->vport_lock);
6923                 return 0;
6924         }
6925
6926         /* PF's mps must be greater then VF's mps */
6927         for (i = 1; i < hdev->num_alloc_vport; i++)
6928                 if (max_frm_size < hdev->vport[i].mps) {
6929                         mutex_unlock(&hdev->vport_lock);
6930                         return -EINVAL;
6931                 }
6932
6933         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6934
6935         ret = hclge_set_mac_mtu(hdev, max_frm_size);
6936         if (ret) {
6937                 dev_err(&hdev->pdev->dev,
6938                         "Change mtu fail, ret =%d\n", ret);
6939                 goto out;
6940         }
6941
6942         hdev->mps = max_frm_size;
6943         vport->mps = max_frm_size;
6944
6945         ret = hclge_buffer_alloc(hdev);
6946         if (ret)
6947                 dev_err(&hdev->pdev->dev,
6948                         "Allocate buffer fail, ret =%d\n", ret);
6949
6950 out:
6951         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6952         mutex_unlock(&hdev->vport_lock);
6953         return ret;
6954 }
6955
6956 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6957                                     bool enable)
6958 {
6959         struct hclge_reset_tqp_queue_cmd *req;
6960         struct hclge_desc desc;
6961         int ret;
6962
6963         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6964
6965         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6966         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6967         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6968
6969         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6970         if (ret) {
6971                 dev_err(&hdev->pdev->dev,
6972                         "Send tqp reset cmd error, status =%d\n", ret);
6973                 return ret;
6974         }
6975
6976         return 0;
6977 }
6978
6979 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6980 {
6981         struct hclge_reset_tqp_queue_cmd *req;
6982         struct hclge_desc desc;
6983         int ret;
6984
6985         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6986
6987         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6988         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6989
6990         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6991         if (ret) {
6992                 dev_err(&hdev->pdev->dev,
6993                         "Get reset status error, status =%d\n", ret);
6994                 return ret;
6995         }
6996
6997         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6998 }
6999
7000 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7001 {
7002         struct hnae3_queue *queue;
7003         struct hclge_tqp *tqp;
7004
7005         queue = handle->kinfo.tqp[queue_id];
7006         tqp = container_of(queue, struct hclge_tqp, q);
7007
7008         return tqp->index;
7009 }
7010
7011 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7012 {
7013         struct hclge_vport *vport = hclge_get_vport(handle);
7014         struct hclge_dev *hdev = vport->back;
7015         int reset_try_times = 0;
7016         int reset_status;
7017         u16 queue_gid;
7018         int ret = 0;
7019
7020         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7021
7022         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7023         if (ret) {
7024                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7025                 return ret;
7026         }
7027
7028         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7029         if (ret) {
7030                 dev_err(&hdev->pdev->dev,
7031                         "Send reset tqp cmd fail, ret = %d\n", ret);
7032                 return ret;
7033         }
7034
7035         reset_try_times = 0;
7036         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7037                 /* Wait for tqp hw reset */
7038                 msleep(20);
7039                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7040                 if (reset_status)
7041                         break;
7042         }
7043
7044         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7045                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7046                 return ret;
7047         }
7048
7049         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7050         if (ret)
7051                 dev_err(&hdev->pdev->dev,
7052                         "Deassert the soft reset fail, ret = %d\n", ret);
7053
7054         return ret;
7055 }
7056
7057 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7058 {
7059         struct hclge_dev *hdev = vport->back;
7060         int reset_try_times = 0;
7061         int reset_status;
7062         u16 queue_gid;
7063         int ret;
7064
7065         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7066
7067         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7068         if (ret) {
7069                 dev_warn(&hdev->pdev->dev,
7070                          "Send reset tqp cmd fail, ret = %d\n", ret);
7071                 return;
7072         }
7073
7074         reset_try_times = 0;
7075         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7076                 /* Wait for tqp hw reset */
7077                 msleep(20);
7078                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7079                 if (reset_status)
7080                         break;
7081         }
7082
7083         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7084                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7085                 return;
7086         }
7087
7088         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7089         if (ret)
7090                 dev_warn(&hdev->pdev->dev,
7091                          "Deassert the soft reset fail, ret = %d\n", ret);
7092 }
7093
7094 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7095 {
7096         struct hclge_vport *vport = hclge_get_vport(handle);
7097         struct hclge_dev *hdev = vport->back;
7098
7099         return hdev->fw_version;
7100 }
7101
7102 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7103 {
7104         struct phy_device *phydev = hdev->hw.mac.phydev;
7105
7106         if (!phydev)
7107                 return;
7108
7109         phy_set_asym_pause(phydev, rx_en, tx_en);
7110 }
7111
7112 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7113 {
7114         int ret;
7115
7116         if (rx_en && tx_en)
7117                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7118         else if (rx_en && !tx_en)
7119                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7120         else if (!rx_en && tx_en)
7121                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7122         else
7123                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7124
7125         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7126                 return 0;
7127
7128         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7129         if (ret) {
7130                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7131                         ret);
7132                 return ret;
7133         }
7134
7135         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7136
7137         return 0;
7138 }
7139
7140 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7141 {
7142         struct phy_device *phydev = hdev->hw.mac.phydev;
7143         u16 remote_advertising = 0;
7144         u16 local_advertising = 0;
7145         u32 rx_pause, tx_pause;
7146         u8 flowctl;
7147
7148         if (!phydev->link || !phydev->autoneg)
7149                 return 0;
7150
7151         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7152
7153         if (phydev->pause)
7154                 remote_advertising = LPA_PAUSE_CAP;
7155
7156         if (phydev->asym_pause)
7157                 remote_advertising |= LPA_PAUSE_ASYM;
7158
7159         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7160                                            remote_advertising);
7161         tx_pause = flowctl & FLOW_CTRL_TX;
7162         rx_pause = flowctl & FLOW_CTRL_RX;
7163
7164         if (phydev->duplex == HCLGE_MAC_HALF) {
7165                 tx_pause = 0;
7166                 rx_pause = 0;
7167         }
7168
7169         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7170 }
7171
7172 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7173                                  u32 *rx_en, u32 *tx_en)
7174 {
7175         struct hclge_vport *vport = hclge_get_vport(handle);
7176         struct hclge_dev *hdev = vport->back;
7177
7178         *auto_neg = hclge_get_autoneg(handle);
7179
7180         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7181                 *rx_en = 0;
7182                 *tx_en = 0;
7183                 return;
7184         }
7185
7186         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7187                 *rx_en = 1;
7188                 *tx_en = 0;
7189         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7190                 *tx_en = 1;
7191                 *rx_en = 0;
7192         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7193                 *rx_en = 1;
7194                 *tx_en = 1;
7195         } else {
7196                 *rx_en = 0;
7197                 *tx_en = 0;
7198         }
7199 }
7200
7201 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7202                                 u32 rx_en, u32 tx_en)
7203 {
7204         struct hclge_vport *vport = hclge_get_vport(handle);
7205         struct hclge_dev *hdev = vport->back;
7206         struct phy_device *phydev = hdev->hw.mac.phydev;
7207         u32 fc_autoneg;
7208
7209         fc_autoneg = hclge_get_autoneg(handle);
7210         if (auto_neg != fc_autoneg) {
7211                 dev_info(&hdev->pdev->dev,
7212                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7213                 return -EOPNOTSUPP;
7214         }
7215
7216         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7217                 dev_info(&hdev->pdev->dev,
7218                          "Priority flow control enabled. Cannot set link flow control.\n");
7219                 return -EOPNOTSUPP;
7220         }
7221
7222         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7223
7224         if (!fc_autoneg)
7225                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7226
7227         /* Only support flow control negotiation for netdev with
7228          * phy attached for now.
7229          */
7230         if (!phydev)
7231                 return -EOPNOTSUPP;
7232
7233         return phy_start_aneg(phydev);
7234 }
7235
7236 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7237                                           u8 *auto_neg, u32 *speed, u8 *duplex)
7238 {
7239         struct hclge_vport *vport = hclge_get_vport(handle);
7240         struct hclge_dev *hdev = vport->back;
7241
7242         if (speed)
7243                 *speed = hdev->hw.mac.speed;
7244         if (duplex)
7245                 *duplex = hdev->hw.mac.duplex;
7246         if (auto_neg)
7247                 *auto_neg = hdev->hw.mac.autoneg;
7248 }
7249
7250 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7251 {
7252         struct hclge_vport *vport = hclge_get_vport(handle);
7253         struct hclge_dev *hdev = vport->back;
7254
7255         if (media_type)
7256                 *media_type = hdev->hw.mac.media_type;
7257 }
7258
7259 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7260                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7261 {
7262         struct hclge_vport *vport = hclge_get_vport(handle);
7263         struct hclge_dev *hdev = vport->back;
7264         struct phy_device *phydev = hdev->hw.mac.phydev;
7265         int mdix_ctrl, mdix, retval, is_resolved;
7266
7267         if (!phydev) {
7268                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7269                 *tp_mdix = ETH_TP_MDI_INVALID;
7270                 return;
7271         }
7272
7273         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7274
7275         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7276         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7277                                     HCLGE_PHY_MDIX_CTRL_S);
7278
7279         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7280         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7281         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7282
7283         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7284
7285         switch (mdix_ctrl) {
7286         case 0x0:
7287                 *tp_mdix_ctrl = ETH_TP_MDI;
7288                 break;
7289         case 0x1:
7290                 *tp_mdix_ctrl = ETH_TP_MDI_X;
7291                 break;
7292         case 0x3:
7293                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7294                 break;
7295         default:
7296                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7297                 break;
7298         }
7299
7300         if (!is_resolved)
7301                 *tp_mdix = ETH_TP_MDI_INVALID;
7302         else if (mdix)
7303                 *tp_mdix = ETH_TP_MDI_X;
7304         else
7305                 *tp_mdix = ETH_TP_MDI;
7306 }
7307
7308 static int hclge_init_client_instance(struct hnae3_client *client,
7309                                       struct hnae3_ae_dev *ae_dev)
7310 {
7311         struct hclge_dev *hdev = ae_dev->priv;
7312         struct hclge_vport *vport;
7313         int i, ret;
7314
7315         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7316                 vport = &hdev->vport[i];
7317
7318                 switch (client->type) {
7319                 case HNAE3_CLIENT_KNIC:
7320
7321                         hdev->nic_client = client;
7322                         vport->nic.client = client;
7323                         ret = client->ops->init_instance(&vport->nic);
7324                         if (ret)
7325                                 goto clear_nic;
7326
7327                         hnae3_set_client_init_flag(client, ae_dev, 1);
7328
7329                         if (hdev->roce_client &&
7330                             hnae3_dev_roce_supported(hdev)) {
7331                                 struct hnae3_client *rc = hdev->roce_client;
7332
7333                                 ret = hclge_init_roce_base_info(vport);
7334                                 if (ret)
7335                                         goto clear_roce;
7336
7337                                 ret = rc->ops->init_instance(&vport->roce);
7338                                 if (ret)
7339                                         goto clear_roce;
7340
7341                                 hnae3_set_client_init_flag(hdev->roce_client,
7342                                                            ae_dev, 1);
7343                         }
7344
7345                         break;
7346                 case HNAE3_CLIENT_UNIC:
7347                         hdev->nic_client = client;
7348                         vport->nic.client = client;
7349
7350                         ret = client->ops->init_instance(&vport->nic);
7351                         if (ret)
7352                                 goto clear_nic;
7353
7354                         hnae3_set_client_init_flag(client, ae_dev, 1);
7355
7356                         break;
7357                 case HNAE3_CLIENT_ROCE:
7358                         if (hnae3_dev_roce_supported(hdev)) {
7359                                 hdev->roce_client = client;
7360                                 vport->roce.client = client;
7361                         }
7362
7363                         if (hdev->roce_client && hdev->nic_client) {
7364                                 ret = hclge_init_roce_base_info(vport);
7365                                 if (ret)
7366                                         goto clear_roce;
7367
7368                                 ret = client->ops->init_instance(&vport->roce);
7369                                 if (ret)
7370                                         goto clear_roce;
7371
7372                                 hnae3_set_client_init_flag(client, ae_dev, 1);
7373                         }
7374
7375                         break;
7376                 default:
7377                         return -EINVAL;
7378                 }
7379         }
7380
7381         return 0;
7382
7383 clear_nic:
7384         hdev->nic_client = NULL;
7385         vport->nic.client = NULL;
7386         return ret;
7387 clear_roce:
7388         hdev->roce_client = NULL;
7389         vport->roce.client = NULL;
7390         return ret;
7391 }
7392
7393 static void hclge_uninit_client_instance(struct hnae3_client *client,
7394                                          struct hnae3_ae_dev *ae_dev)
7395 {
7396         struct hclge_dev *hdev = ae_dev->priv;
7397         struct hclge_vport *vport;
7398         int i;
7399
7400         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7401                 vport = &hdev->vport[i];
7402                 if (hdev->roce_client) {
7403                         hdev->roce_client->ops->uninit_instance(&vport->roce,
7404                                                                 0);
7405                         hdev->roce_client = NULL;
7406                         vport->roce.client = NULL;
7407                 }
7408                 if (client->type == HNAE3_CLIENT_ROCE)
7409                         return;
7410                 if (hdev->nic_client && client->ops->uninit_instance) {
7411                         client->ops->uninit_instance(&vport->nic, 0);
7412                         hdev->nic_client = NULL;
7413                         vport->nic.client = NULL;
7414                 }
7415         }
7416 }
7417
7418 static int hclge_pci_init(struct hclge_dev *hdev)
7419 {
7420         struct pci_dev *pdev = hdev->pdev;
7421         struct hclge_hw *hw;
7422         int ret;
7423
7424         ret = pci_enable_device(pdev);
7425         if (ret) {
7426                 dev_err(&pdev->dev, "failed to enable PCI device\n");
7427                 return ret;
7428         }
7429
7430         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7431         if (ret) {
7432                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7433                 if (ret) {
7434                         dev_err(&pdev->dev,
7435                                 "can't set consistent PCI DMA");
7436                         goto err_disable_device;
7437                 }
7438                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7439         }
7440
7441         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7442         if (ret) {
7443                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7444                 goto err_disable_device;
7445         }
7446
7447         pci_set_master(pdev);
7448         hw = &hdev->hw;
7449         hw->io_base = pcim_iomap(pdev, 2, 0);
7450         if (!hw->io_base) {
7451                 dev_err(&pdev->dev, "Can't map configuration register space\n");
7452                 ret = -ENOMEM;
7453                 goto err_clr_master;
7454         }
7455
7456         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7457
7458         return 0;
7459 err_clr_master:
7460         pci_clear_master(pdev);
7461         pci_release_regions(pdev);
7462 err_disable_device:
7463         pci_disable_device(pdev);
7464
7465         return ret;
7466 }
7467
7468 static void hclge_pci_uninit(struct hclge_dev *hdev)
7469 {
7470         struct pci_dev *pdev = hdev->pdev;
7471
7472         pcim_iounmap(pdev, hdev->hw.io_base);
7473         pci_free_irq_vectors(pdev);
7474         pci_clear_master(pdev);
7475         pci_release_mem_regions(pdev);
7476         pci_disable_device(pdev);
7477 }
7478
7479 static void hclge_state_init(struct hclge_dev *hdev)
7480 {
7481         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7482         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7483         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7484         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7485         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7486         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7487 }
7488
7489 static void hclge_state_uninit(struct hclge_dev *hdev)
7490 {
7491         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7492
7493         if (hdev->service_timer.function)
7494                 del_timer_sync(&hdev->service_timer);
7495         if (hdev->reset_timer.function)
7496                 del_timer_sync(&hdev->reset_timer);
7497         if (hdev->service_task.func)
7498                 cancel_work_sync(&hdev->service_task);
7499         if (hdev->rst_service_task.func)
7500                 cancel_work_sync(&hdev->rst_service_task);
7501         if (hdev->mbx_service_task.func)
7502                 cancel_work_sync(&hdev->mbx_service_task);
7503 }
7504
7505 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7506 {
7507 #define HCLGE_FLR_WAIT_MS       100
7508 #define HCLGE_FLR_WAIT_CNT      50
7509         struct hclge_dev *hdev = ae_dev->priv;
7510         int cnt = 0;
7511
7512         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7513         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7514         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7515         hclge_reset_event(hdev->pdev, NULL);
7516
7517         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7518                cnt++ < HCLGE_FLR_WAIT_CNT)
7519                 msleep(HCLGE_FLR_WAIT_MS);
7520
7521         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7522                 dev_err(&hdev->pdev->dev,
7523                         "flr wait down timeout: %d\n", cnt);
7524 }
7525
7526 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7527 {
7528         struct hclge_dev *hdev = ae_dev->priv;
7529
7530         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7531 }
7532
7533 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7534 {
7535         struct pci_dev *pdev = ae_dev->pdev;
7536         struct hclge_dev *hdev;
7537         int ret;
7538
7539         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7540         if (!hdev) {
7541                 ret = -ENOMEM;
7542                 goto out;
7543         }
7544
7545         hdev->pdev = pdev;
7546         hdev->ae_dev = ae_dev;
7547         hdev->reset_type = HNAE3_NONE_RESET;
7548         hdev->reset_level = HNAE3_FUNC_RESET;
7549         ae_dev->priv = hdev;
7550         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7551
7552         mutex_init(&hdev->vport_lock);
7553         mutex_init(&hdev->vport_cfg_mutex);
7554
7555         ret = hclge_pci_init(hdev);
7556         if (ret) {
7557                 dev_err(&pdev->dev, "PCI init failed\n");
7558                 goto out;
7559         }
7560
7561         /* Firmware command queue initialize */
7562         ret = hclge_cmd_queue_init(hdev);
7563         if (ret) {
7564                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7565                 goto err_pci_uninit;
7566         }
7567
7568         /* Firmware command initialize */
7569         ret = hclge_cmd_init(hdev);
7570         if (ret)
7571                 goto err_cmd_uninit;
7572
7573         ret = hclge_get_cap(hdev);
7574         if (ret) {
7575                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7576                         ret);
7577                 goto err_cmd_uninit;
7578         }
7579
7580         ret = hclge_configure(hdev);
7581         if (ret) {
7582                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7583                 goto err_cmd_uninit;
7584         }
7585
7586         ret = hclge_init_msi(hdev);
7587         if (ret) {
7588                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7589                 goto err_cmd_uninit;
7590         }
7591
7592         ret = hclge_misc_irq_init(hdev);
7593         if (ret) {
7594                 dev_err(&pdev->dev,
7595                         "Misc IRQ(vector0) init error, ret = %d.\n",
7596                         ret);
7597                 goto err_msi_uninit;
7598         }
7599
7600         ret = hclge_alloc_tqps(hdev);
7601         if (ret) {
7602                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7603                 goto err_msi_irq_uninit;
7604         }
7605
7606         ret = hclge_alloc_vport(hdev);
7607         if (ret) {
7608                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7609                 goto err_msi_irq_uninit;
7610         }
7611
7612         ret = hclge_map_tqp(hdev);
7613         if (ret) {
7614                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7615                 goto err_msi_irq_uninit;
7616         }
7617
7618         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7619                 ret = hclge_mac_mdio_config(hdev);
7620                 if (ret) {
7621                         dev_err(&hdev->pdev->dev,
7622                                 "mdio config fail ret=%d\n", ret);
7623                         goto err_msi_irq_uninit;
7624                 }
7625         }
7626
7627         ret = hclge_init_umv_space(hdev);
7628         if (ret) {
7629                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7630                 goto err_mdiobus_unreg;
7631         }
7632
7633         ret = hclge_mac_init(hdev);
7634         if (ret) {
7635                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7636                 goto err_mdiobus_unreg;
7637         }
7638
7639         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7640         if (ret) {
7641                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7642                 goto err_mdiobus_unreg;
7643         }
7644
7645         ret = hclge_config_gro(hdev, true);
7646         if (ret)
7647                 goto err_mdiobus_unreg;
7648
7649         ret = hclge_init_vlan_config(hdev);
7650         if (ret) {
7651                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7652                 goto err_mdiobus_unreg;
7653         }
7654
7655         ret = hclge_tm_schd_init(hdev);
7656         if (ret) {
7657                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7658                 goto err_mdiobus_unreg;
7659         }
7660
7661         hclge_rss_init_cfg(hdev);
7662         ret = hclge_rss_init_hw(hdev);
7663         if (ret) {
7664                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7665                 goto err_mdiobus_unreg;
7666         }
7667
7668         ret = init_mgr_tbl(hdev);
7669         if (ret) {
7670                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7671                 goto err_mdiobus_unreg;
7672         }
7673
7674         ret = hclge_init_fd_config(hdev);
7675         if (ret) {
7676                 dev_err(&pdev->dev,
7677                         "fd table init fail, ret=%d\n", ret);
7678                 goto err_mdiobus_unreg;
7679         }
7680
7681         ret = hclge_hw_error_set_state(hdev, true);
7682         if (ret) {
7683                 dev_err(&pdev->dev,
7684                         "fail(%d) to enable hw error interrupts\n", ret);
7685                 goto err_mdiobus_unreg;
7686         }
7687
7688         hclge_dcb_ops_set(hdev);
7689
7690         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7691         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7692         INIT_WORK(&hdev->service_task, hclge_service_task);
7693         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7694         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7695
7696         hclge_clear_all_event_cause(hdev);
7697
7698         /* Enable MISC vector(vector0) */
7699         hclge_enable_vector(&hdev->misc_vector, true);
7700
7701         hclge_state_init(hdev);
7702         hdev->last_reset_time = jiffies;
7703
7704         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7705         return 0;
7706
7707 err_mdiobus_unreg:
7708         if (hdev->hw.mac.phydev)
7709                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7710 err_msi_irq_uninit:
7711         hclge_misc_irq_uninit(hdev);
7712 err_msi_uninit:
7713         pci_free_irq_vectors(pdev);
7714 err_cmd_uninit:
7715         hclge_cmd_uninit(hdev);
7716 err_pci_uninit:
7717         pcim_iounmap(pdev, hdev->hw.io_base);
7718         pci_clear_master(pdev);
7719         pci_release_regions(pdev);
7720         pci_disable_device(pdev);
7721 out:
7722         return ret;
7723 }
7724
7725 static void hclge_stats_clear(struct hclge_dev *hdev)
7726 {
7727         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7728 }
7729
7730 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7731 {
7732         struct hclge_vport *vport = hdev->vport;
7733         int i;
7734
7735         for (i = 0; i < hdev->num_alloc_vport; i++) {
7736                 hclge_vport_stop(vport);
7737                 vport++;
7738         }
7739 }
7740
7741 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7742 {
7743         struct hclge_dev *hdev = ae_dev->priv;
7744         struct pci_dev *pdev = ae_dev->pdev;
7745         int ret;
7746
7747         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7748
7749         hclge_stats_clear(hdev);
7750         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7751
7752         ret = hclge_cmd_init(hdev);
7753         if (ret) {
7754                 dev_err(&pdev->dev, "Cmd queue init failed\n");
7755                 return ret;
7756         }
7757
7758         ret = hclge_map_tqp(hdev);
7759         if (ret) {
7760                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7761                 return ret;
7762         }
7763
7764         hclge_reset_umv_space(hdev);
7765
7766         ret = hclge_mac_init(hdev);
7767         if (ret) {
7768                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7769                 return ret;
7770         }
7771
7772         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7773         if (ret) {
7774                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7775                 return ret;
7776         }
7777
7778         ret = hclge_config_gro(hdev, true);
7779         if (ret)
7780                 return ret;
7781
7782         ret = hclge_init_vlan_config(hdev);
7783         if (ret) {
7784                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7785                 return ret;
7786         }
7787
7788         ret = hclge_tm_init_hw(hdev, true);
7789         if (ret) {
7790                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7791                 return ret;
7792         }
7793
7794         ret = hclge_rss_init_hw(hdev);
7795         if (ret) {
7796                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7797                 return ret;
7798         }
7799
7800         ret = hclge_init_fd_config(hdev);
7801         if (ret) {
7802                 dev_err(&pdev->dev,
7803                         "fd table init fail, ret=%d\n", ret);
7804                 return ret;
7805         }
7806
7807         /* Re-enable the hw error interrupts because
7808          * the interrupts get disabled on core/global reset.
7809          */
7810         ret = hclge_hw_error_set_state(hdev, true);
7811         if (ret) {
7812                 dev_err(&pdev->dev,
7813                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
7814                 return ret;
7815         }
7816
7817         hclge_reset_vport_state(hdev);
7818
7819         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7820                  HCLGE_DRIVER_NAME);
7821
7822         return 0;
7823 }
7824
7825 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7826 {
7827         struct hclge_dev *hdev = ae_dev->priv;
7828         struct hclge_mac *mac = &hdev->hw.mac;
7829
7830         hclge_state_uninit(hdev);
7831
7832         if (mac->phydev)
7833                 mdiobus_unregister(mac->mdio_bus);
7834
7835         hclge_uninit_umv_space(hdev);
7836
7837         /* Disable MISC vector(vector0) */
7838         hclge_enable_vector(&hdev->misc_vector, false);
7839         synchronize_irq(hdev->misc_vector.vector_irq);
7840
7841         hclge_hw_error_set_state(hdev, false);
7842         hclge_cmd_uninit(hdev);
7843         hclge_misc_irq_uninit(hdev);
7844         hclge_pci_uninit(hdev);
7845         mutex_destroy(&hdev->vport_lock);
7846         hclge_uninit_vport_mac_table(hdev);
7847         hclge_uninit_vport_vlan_table(hdev);
7848         mutex_destroy(&hdev->vport_cfg_mutex);
7849         ae_dev->priv = NULL;
7850 }
7851
7852 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7853 {
7854         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7855         struct hclge_vport *vport = hclge_get_vport(handle);
7856         struct hclge_dev *hdev = vport->back;
7857
7858         return min_t(u32, hdev->rss_size_max,
7859                      vport->alloc_tqps / kinfo->num_tc);
7860 }
7861
7862 static void hclge_get_channels(struct hnae3_handle *handle,
7863                                struct ethtool_channels *ch)
7864 {
7865         ch->max_combined = hclge_get_max_channels(handle);
7866         ch->other_count = 1;
7867         ch->max_other = 1;
7868         ch->combined_count = handle->kinfo.rss_size;
7869 }
7870
7871 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7872                                         u16 *alloc_tqps, u16 *max_rss_size)
7873 {
7874         struct hclge_vport *vport = hclge_get_vport(handle);
7875         struct hclge_dev *hdev = vport->back;
7876
7877         *alloc_tqps = vport->alloc_tqps;
7878         *max_rss_size = hdev->rss_size_max;
7879 }
7880
7881 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7882                               bool rxfh_configured)
7883 {
7884         struct hclge_vport *vport = hclge_get_vport(handle);
7885         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7886         struct hclge_dev *hdev = vport->back;
7887         int cur_rss_size = kinfo->rss_size;
7888         int cur_tqps = kinfo->num_tqps;
7889         u16 tc_offset[HCLGE_MAX_TC_NUM];
7890         u16 tc_valid[HCLGE_MAX_TC_NUM];
7891         u16 tc_size[HCLGE_MAX_TC_NUM];
7892         u16 roundup_size;
7893         u32 *rss_indir;
7894         int ret, i;
7895
7896         kinfo->req_rss_size = new_tqps_num;
7897
7898         ret = hclge_tm_vport_map_update(hdev);
7899         if (ret) {
7900                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7901                 return ret;
7902         }
7903
7904         roundup_size = roundup_pow_of_two(kinfo->rss_size);
7905         roundup_size = ilog2(roundup_size);
7906         /* Set the RSS TC mode according to the new RSS size */
7907         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7908                 tc_valid[i] = 0;
7909
7910                 if (!(hdev->hw_tc_map & BIT(i)))
7911                         continue;
7912
7913                 tc_valid[i] = 1;
7914                 tc_size[i] = roundup_size;
7915                 tc_offset[i] = kinfo->rss_size * i;
7916         }
7917         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7918         if (ret)
7919                 return ret;
7920
7921         /* RSS indirection table has been configuared by user */
7922         if (rxfh_configured)
7923                 goto out;
7924
7925         /* Reinitializes the rss indirect table according to the new RSS size */
7926         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7927         if (!rss_indir)
7928                 return -ENOMEM;
7929
7930         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7931                 rss_indir[i] = i % kinfo->rss_size;
7932
7933         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7934         if (ret)
7935                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7936                         ret);
7937
7938         kfree(rss_indir);
7939
7940 out:
7941         if (!ret)
7942                 dev_info(&hdev->pdev->dev,
7943                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7944                          cur_rss_size, kinfo->rss_size,
7945                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
7946
7947         return ret;
7948 }
7949
7950 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7951                               u32 *regs_num_64_bit)
7952 {
7953         struct hclge_desc desc;
7954         u32 total_num;
7955         int ret;
7956
7957         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7958         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7959         if (ret) {
7960                 dev_err(&hdev->pdev->dev,
7961                         "Query register number cmd failed, ret = %d.\n", ret);
7962                 return ret;
7963         }
7964
7965         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7966         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7967
7968         total_num = *regs_num_32_bit + *regs_num_64_bit;
7969         if (!total_num)
7970                 return -EINVAL;
7971
7972         return 0;
7973 }
7974
7975 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7976                                  void *data)
7977 {
7978 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7979
7980         struct hclge_desc *desc;
7981         u32 *reg_val = data;
7982         __le32 *desc_data;
7983         int cmd_num;
7984         int i, k, n;
7985         int ret;
7986
7987         if (regs_num == 0)
7988                 return 0;
7989
7990         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7991         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7992         if (!desc)
7993                 return -ENOMEM;
7994
7995         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7996         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7997         if (ret) {
7998                 dev_err(&hdev->pdev->dev,
7999                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8000                 kfree(desc);
8001                 return ret;
8002         }
8003
8004         for (i = 0; i < cmd_num; i++) {
8005                 if (i == 0) {
8006                         desc_data = (__le32 *)(&desc[i].data[0]);
8007                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8008                 } else {
8009                         desc_data = (__le32 *)(&desc[i]);
8010                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8011                 }
8012                 for (k = 0; k < n; k++) {
8013                         *reg_val++ = le32_to_cpu(*desc_data++);
8014
8015                         regs_num--;
8016                         if (!regs_num)
8017                                 break;
8018                 }
8019         }
8020
8021         kfree(desc);
8022         return 0;
8023 }
8024
8025 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8026                                  void *data)
8027 {
8028 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8029
8030         struct hclge_desc *desc;
8031         u64 *reg_val = data;
8032         __le64 *desc_data;
8033         int cmd_num;
8034         int i, k, n;
8035         int ret;
8036
8037         if (regs_num == 0)
8038                 return 0;
8039
8040         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8041         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8042         if (!desc)
8043                 return -ENOMEM;
8044
8045         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8046         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8047         if (ret) {
8048                 dev_err(&hdev->pdev->dev,
8049                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8050                 kfree(desc);
8051                 return ret;
8052         }
8053
8054         for (i = 0; i < cmd_num; i++) {
8055                 if (i == 0) {
8056                         desc_data = (__le64 *)(&desc[i].data[0]);
8057                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8058                 } else {
8059                         desc_data = (__le64 *)(&desc[i]);
8060                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8061                 }
8062                 for (k = 0; k < n; k++) {
8063                         *reg_val++ = le64_to_cpu(*desc_data++);
8064
8065                         regs_num--;
8066                         if (!regs_num)
8067                                 break;
8068                 }
8069         }
8070
8071         kfree(desc);
8072         return 0;
8073 }
8074
8075 #define MAX_SEPARATE_NUM        4
8076 #define SEPARATOR_VALUE         0xFFFFFFFF
8077 #define REG_NUM_PER_LINE        4
8078 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8079
8080 static int hclge_get_regs_len(struct hnae3_handle *handle)
8081 {
8082         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8083         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8084         struct hclge_vport *vport = hclge_get_vport(handle);
8085         struct hclge_dev *hdev = vport->back;
8086         u32 regs_num_32_bit, regs_num_64_bit;
8087         int ret;
8088
8089         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8090         if (ret) {
8091                 dev_err(&hdev->pdev->dev,
8092                         "Get register number failed, ret = %d.\n", ret);
8093                 return -EOPNOTSUPP;
8094         }
8095
8096         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8097         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8098         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8099         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8100
8101         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8102                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8103                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8104 }
8105
8106 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8107                            void *data)
8108 {
8109         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8110         struct hclge_vport *vport = hclge_get_vport(handle);
8111         struct hclge_dev *hdev = vport->back;
8112         u32 regs_num_32_bit, regs_num_64_bit;
8113         int i, j, reg_um, separator_num;
8114         u32 *reg = data;
8115         int ret;
8116
8117         *version = hdev->fw_version;
8118
8119         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8120         if (ret) {
8121                 dev_err(&hdev->pdev->dev,
8122                         "Get register number failed, ret = %d.\n", ret);
8123                 return;
8124         }
8125
8126         /* fetching per-PF registers valus from PF PCIe register space */
8127         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8128         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8129         for (i = 0; i < reg_um; i++)
8130                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8131         for (i = 0; i < separator_num; i++)
8132                 *reg++ = SEPARATOR_VALUE;
8133
8134         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8135         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8136         for (i = 0; i < reg_um; i++)
8137                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8138         for (i = 0; i < separator_num; i++)
8139                 *reg++ = SEPARATOR_VALUE;
8140
8141         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8142         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8143         for (j = 0; j < kinfo->num_tqps; j++) {
8144                 for (i = 0; i < reg_um; i++)
8145                         *reg++ = hclge_read_dev(&hdev->hw,
8146                                                 ring_reg_addr_list[i] +
8147                                                 0x200 * j);
8148                 for (i = 0; i < separator_num; i++)
8149                         *reg++ = SEPARATOR_VALUE;
8150         }
8151
8152         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8153         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8154         for (j = 0; j < hdev->num_msi_used - 1; j++) {
8155                 for (i = 0; i < reg_um; i++)
8156                         *reg++ = hclge_read_dev(&hdev->hw,
8157                                                 tqp_intr_reg_addr_list[i] +
8158                                                 4 * j);
8159                 for (i = 0; i < separator_num; i++)
8160                         *reg++ = SEPARATOR_VALUE;
8161         }
8162
8163         /* fetching PF common registers values from firmware */
8164         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8165         if (ret) {
8166                 dev_err(&hdev->pdev->dev,
8167                         "Get 32 bit register failed, ret = %d.\n", ret);
8168                 return;
8169         }
8170
8171         reg += regs_num_32_bit;
8172         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8173         if (ret)
8174                 dev_err(&hdev->pdev->dev,
8175                         "Get 64 bit register failed, ret = %d.\n", ret);
8176 }
8177
8178 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8179 {
8180         struct hclge_set_led_state_cmd *req;
8181         struct hclge_desc desc;
8182         int ret;
8183
8184         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8185
8186         req = (struct hclge_set_led_state_cmd *)desc.data;
8187         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8188                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8189
8190         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8191         if (ret)
8192                 dev_err(&hdev->pdev->dev,
8193                         "Send set led state cmd error, ret =%d\n", ret);
8194
8195         return ret;
8196 }
8197
8198 enum hclge_led_status {
8199         HCLGE_LED_OFF,
8200         HCLGE_LED_ON,
8201         HCLGE_LED_NO_CHANGE = 0xFF,
8202 };
8203
8204 static int hclge_set_led_id(struct hnae3_handle *handle,
8205                             enum ethtool_phys_id_state status)
8206 {
8207         struct hclge_vport *vport = hclge_get_vport(handle);
8208         struct hclge_dev *hdev = vport->back;
8209
8210         switch (status) {
8211         case ETHTOOL_ID_ACTIVE:
8212                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8213         case ETHTOOL_ID_INACTIVE:
8214                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8215         default:
8216                 return -EINVAL;
8217         }
8218 }
8219
8220 static void hclge_get_link_mode(struct hnae3_handle *handle,
8221                                 unsigned long *supported,
8222                                 unsigned long *advertising)
8223 {
8224         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8225         struct hclge_vport *vport = hclge_get_vport(handle);
8226         struct hclge_dev *hdev = vport->back;
8227         unsigned int idx = 0;
8228
8229         for (; idx < size; idx++) {
8230                 supported[idx] = hdev->hw.mac.supported[idx];
8231                 advertising[idx] = hdev->hw.mac.advertising[idx];
8232         }
8233 }
8234
8235 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8236 {
8237         struct hclge_vport *vport = hclge_get_vport(handle);
8238         struct hclge_dev *hdev = vport->back;
8239
8240         return hclge_config_gro(hdev, enable);
8241 }
8242
8243 static const struct hnae3_ae_ops hclge_ops = {
8244         .init_ae_dev = hclge_init_ae_dev,
8245         .uninit_ae_dev = hclge_uninit_ae_dev,
8246         .flr_prepare = hclge_flr_prepare,
8247         .flr_done = hclge_flr_done,
8248         .init_client_instance = hclge_init_client_instance,
8249         .uninit_client_instance = hclge_uninit_client_instance,
8250         .map_ring_to_vector = hclge_map_ring_to_vector,
8251         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8252         .get_vector = hclge_get_vector,
8253         .put_vector = hclge_put_vector,
8254         .set_promisc_mode = hclge_set_promisc_mode,
8255         .set_loopback = hclge_set_loopback,
8256         .start = hclge_ae_start,
8257         .stop = hclge_ae_stop,
8258         .client_start = hclge_client_start,
8259         .client_stop = hclge_client_stop,
8260         .get_status = hclge_get_status,
8261         .get_ksettings_an_result = hclge_get_ksettings_an_result,
8262         .update_speed_duplex_h = hclge_update_speed_duplex_h,
8263         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8264         .get_media_type = hclge_get_media_type,
8265         .get_rss_key_size = hclge_get_rss_key_size,
8266         .get_rss_indir_size = hclge_get_rss_indir_size,
8267         .get_rss = hclge_get_rss,
8268         .set_rss = hclge_set_rss,
8269         .set_rss_tuple = hclge_set_rss_tuple,
8270         .get_rss_tuple = hclge_get_rss_tuple,
8271         .get_tc_size = hclge_get_tc_size,
8272         .get_mac_addr = hclge_get_mac_addr,
8273         .set_mac_addr = hclge_set_mac_addr,
8274         .do_ioctl = hclge_do_ioctl,
8275         .add_uc_addr = hclge_add_uc_addr,
8276         .rm_uc_addr = hclge_rm_uc_addr,
8277         .add_mc_addr = hclge_add_mc_addr,
8278         .rm_mc_addr = hclge_rm_mc_addr,
8279         .set_autoneg = hclge_set_autoneg,
8280         .get_autoneg = hclge_get_autoneg,
8281         .get_pauseparam = hclge_get_pauseparam,
8282         .set_pauseparam = hclge_set_pauseparam,
8283         .set_mtu = hclge_set_mtu,
8284         .reset_queue = hclge_reset_tqp,
8285         .get_stats = hclge_get_stats,
8286         .update_stats = hclge_update_stats,
8287         .get_strings = hclge_get_strings,
8288         .get_sset_count = hclge_get_sset_count,
8289         .get_fw_version = hclge_get_fw_version,
8290         .get_mdix_mode = hclge_get_mdix_mode,
8291         .enable_vlan_filter = hclge_enable_vlan_filter,
8292         .set_vlan_filter = hclge_set_vlan_filter,
8293         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8294         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8295         .reset_event = hclge_reset_event,
8296         .set_default_reset_request = hclge_set_def_reset_request,
8297         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8298         .set_channels = hclge_set_channels,
8299         .get_channels = hclge_get_channels,
8300         .get_regs_len = hclge_get_regs_len,
8301         .get_regs = hclge_get_regs,
8302         .set_led_id = hclge_set_led_id,
8303         .get_link_mode = hclge_get_link_mode,
8304         .add_fd_entry = hclge_add_fd_entry,
8305         .del_fd_entry = hclge_del_fd_entry,
8306         .del_all_fd_entries = hclge_del_all_fd_entries,
8307         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8308         .get_fd_rule_info = hclge_get_fd_rule_info,
8309         .get_fd_all_rules = hclge_get_all_rules,
8310         .restore_fd_rules = hclge_restore_fd_entries,
8311         .enable_fd = hclge_enable_fd,
8312         .dbg_run_cmd = hclge_dbg_run_cmd,
8313         .handle_hw_ras_error = hclge_handle_hw_ras_error,
8314         .get_hw_reset_stat = hclge_get_hw_reset_stat,
8315         .ae_dev_resetting = hclge_ae_dev_resetting,
8316         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8317         .set_gro_en = hclge_gro_en,
8318         .get_global_queue_id = hclge_covert_handle_qid_global,
8319         .set_timer_task = hclge_set_timer_task,
8320         .mac_connect_phy = hclge_mac_connect_phy,
8321         .mac_disconnect_phy = hclge_mac_disconnect_phy,
8322 };
8323
8324 static struct hnae3_ae_algo ae_algo = {
8325         .ops = &hclge_ops,
8326         .pdev_id_table = ae_algo_pci_tbl,
8327 };
8328
8329 static int hclge_init(void)
8330 {
8331         pr_info("%s is initializing\n", HCLGE_NAME);
8332
8333         hnae3_register_ae_algo(&ae_algo);
8334
8335         return 0;
8336 }
8337
8338 static void hclge_exit(void)
8339 {
8340         hnae3_unregister_ae_algo(&ae_algo);
8341 }
8342 module_init(hclge_init);
8343 module_exit(hclge_exit);
8344
8345 MODULE_LICENSE("GPL");
8346 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8347 MODULE_DESCRIPTION("HCLGE Driver");
8348 MODULE_VERSION(HCLGE_MOD_VERSION);