]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
babec3b6ab16c8523efd843b9afbfe784e4cc446
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66                                u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70                                                    unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72
73 static struct hnae3_ae_algo ae_algo;
74
75 static const struct pci_device_id ae_algo_pci_tbl[] = {
76         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
77         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
83         /* required last entry */
84         {0, }
85 };
86
87 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
88
89 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
90                                          HCLGE_CMDQ_TX_ADDR_H_REG,
91                                          HCLGE_CMDQ_TX_DEPTH_REG,
92                                          HCLGE_CMDQ_TX_TAIL_REG,
93                                          HCLGE_CMDQ_TX_HEAD_REG,
94                                          HCLGE_CMDQ_RX_ADDR_L_REG,
95                                          HCLGE_CMDQ_RX_ADDR_H_REG,
96                                          HCLGE_CMDQ_RX_DEPTH_REG,
97                                          HCLGE_CMDQ_RX_TAIL_REG,
98                                          HCLGE_CMDQ_RX_HEAD_REG,
99                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
100                                          HCLGE_CMDQ_INTR_STS_REG,
101                                          HCLGE_CMDQ_INTR_EN_REG,
102                                          HCLGE_CMDQ_INTR_GEN_REG};
103
104 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
105                                            HCLGE_VECTOR0_OTER_EN_REG,
106                                            HCLGE_MISC_RESET_STS_REG,
107                                            HCLGE_MISC_VECTOR_INT_STS,
108                                            HCLGE_GLOBAL_RESET_REG,
109                                            HCLGE_FUN_RST_ING,
110                                            HCLGE_GRO_EN_REG};
111
112 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
113                                          HCLGE_RING_RX_ADDR_H_REG,
114                                          HCLGE_RING_RX_BD_NUM_REG,
115                                          HCLGE_RING_RX_BD_LENGTH_REG,
116                                          HCLGE_RING_RX_MERGE_EN_REG,
117                                          HCLGE_RING_RX_TAIL_REG,
118                                          HCLGE_RING_RX_HEAD_REG,
119                                          HCLGE_RING_RX_FBD_NUM_REG,
120                                          HCLGE_RING_RX_OFFSET_REG,
121                                          HCLGE_RING_RX_FBD_OFFSET_REG,
122                                          HCLGE_RING_RX_STASH_REG,
123                                          HCLGE_RING_RX_BD_ERR_REG,
124                                          HCLGE_RING_TX_ADDR_L_REG,
125                                          HCLGE_RING_TX_ADDR_H_REG,
126                                          HCLGE_RING_TX_BD_NUM_REG,
127                                          HCLGE_RING_TX_PRIORITY_REG,
128                                          HCLGE_RING_TX_TC_REG,
129                                          HCLGE_RING_TX_MERGE_EN_REG,
130                                          HCLGE_RING_TX_TAIL_REG,
131                                          HCLGE_RING_TX_HEAD_REG,
132                                          HCLGE_RING_TX_FBD_NUM_REG,
133                                          HCLGE_RING_TX_OFFSET_REG,
134                                          HCLGE_RING_TX_EBD_NUM_REG,
135                                          HCLGE_RING_TX_EBD_OFFSET_REG,
136                                          HCLGE_RING_TX_BD_ERR_REG,
137                                          HCLGE_RING_EN_REG};
138
139 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
140                                              HCLGE_TQP_INTR_GL0_REG,
141                                              HCLGE_TQP_INTR_GL1_REG,
142                                              HCLGE_TQP_INTR_GL2_REG,
143                                              HCLGE_TQP_INTR_RL_REG};
144
145 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
146         "App    Loopback test",
147         "Serdes serial Loopback test",
148         "Serdes parallel Loopback test",
149         "Phy    Loopback test"
150 };
151
152 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
153         {"mac_tx_mac_pause_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
155         {"mac_rx_mac_pause_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
157         {"mac_tx_control_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
159         {"mac_rx_control_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
161         {"mac_tx_pfc_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
163         {"mac_tx_pfc_pri0_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
165         {"mac_tx_pfc_pri1_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
167         {"mac_tx_pfc_pri2_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
169         {"mac_tx_pfc_pri3_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
171         {"mac_tx_pfc_pri4_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
173         {"mac_tx_pfc_pri5_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
175         {"mac_tx_pfc_pri6_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
177         {"mac_tx_pfc_pri7_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
179         {"mac_rx_pfc_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
181         {"mac_rx_pfc_pri0_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
183         {"mac_rx_pfc_pri1_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
185         {"mac_rx_pfc_pri2_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
187         {"mac_rx_pfc_pri3_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
189         {"mac_rx_pfc_pri4_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
191         {"mac_rx_pfc_pri5_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
193         {"mac_rx_pfc_pri6_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
195         {"mac_rx_pfc_pri7_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
197         {"mac_tx_total_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
199         {"mac_tx_total_oct_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
201         {"mac_tx_good_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
203         {"mac_tx_bad_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
205         {"mac_tx_good_oct_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
207         {"mac_tx_bad_oct_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
209         {"mac_tx_uni_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
211         {"mac_tx_multi_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
213         {"mac_tx_broad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
215         {"mac_tx_undersize_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
217         {"mac_tx_oversize_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
219         {"mac_tx_64_oct_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
221         {"mac_tx_65_127_oct_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
223         {"mac_tx_128_255_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
225         {"mac_tx_256_511_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
227         {"mac_tx_512_1023_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
229         {"mac_tx_1024_1518_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
231         {"mac_tx_1519_2047_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
233         {"mac_tx_2048_4095_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
235         {"mac_tx_4096_8191_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
237         {"mac_tx_8192_9216_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
239         {"mac_tx_9217_12287_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
241         {"mac_tx_12288_16383_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
243         {"mac_tx_1519_max_good_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
245         {"mac_tx_1519_max_bad_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
247         {"mac_rx_total_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
249         {"mac_rx_total_oct_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
251         {"mac_rx_good_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
253         {"mac_rx_bad_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
255         {"mac_rx_good_oct_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
257         {"mac_rx_bad_oct_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
259         {"mac_rx_uni_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
261         {"mac_rx_multi_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
263         {"mac_rx_broad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
265         {"mac_rx_undersize_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
267         {"mac_rx_oversize_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
269         {"mac_rx_64_oct_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
271         {"mac_rx_65_127_oct_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
273         {"mac_rx_128_255_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
275         {"mac_rx_256_511_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
277         {"mac_rx_512_1023_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
279         {"mac_rx_1024_1518_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
281         {"mac_rx_1519_2047_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
283         {"mac_rx_2048_4095_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
285         {"mac_rx_4096_8191_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
287         {"mac_rx_8192_9216_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
289         {"mac_rx_9217_12287_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
291         {"mac_rx_12288_16383_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
293         {"mac_rx_1519_max_good_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
295         {"mac_rx_1519_max_bad_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
297
298         {"mac_tx_fragment_pkt_num",
299                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
300         {"mac_tx_undermin_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
302         {"mac_tx_jabber_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
304         {"mac_tx_err_all_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
306         {"mac_tx_from_app_good_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
308         {"mac_tx_from_app_bad_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
310         {"mac_rx_fragment_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
312         {"mac_rx_undermin_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
314         {"mac_rx_jabber_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
316         {"mac_rx_fcs_err_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
318         {"mac_rx_send_app_good_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
320         {"mac_rx_send_app_bad_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
322 };
323
324 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
325         {
326                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
327                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
328                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
329                 .i_port_bitmap = 0x1,
330         },
331 };
332
333 static const u8 hclge_hash_key[] = {
334         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
335         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
336         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
337         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
338         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
339 };
340
341 static const u32 hclge_dfx_bd_offset_list[] = {
342         HCLGE_DFX_BIOS_BD_OFFSET,
343         HCLGE_DFX_SSU_0_BD_OFFSET,
344         HCLGE_DFX_SSU_1_BD_OFFSET,
345         HCLGE_DFX_IGU_BD_OFFSET,
346         HCLGE_DFX_RPU_0_BD_OFFSET,
347         HCLGE_DFX_RPU_1_BD_OFFSET,
348         HCLGE_DFX_NCSI_BD_OFFSET,
349         HCLGE_DFX_RTC_BD_OFFSET,
350         HCLGE_DFX_PPP_BD_OFFSET,
351         HCLGE_DFX_RCB_BD_OFFSET,
352         HCLGE_DFX_TQP_BD_OFFSET,
353         HCLGE_DFX_SSU_2_BD_OFFSET
354 };
355
356 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
357         HCLGE_OPC_DFX_BIOS_COMMON_REG,
358         HCLGE_OPC_DFX_SSU_REG_0,
359         HCLGE_OPC_DFX_SSU_REG_1,
360         HCLGE_OPC_DFX_IGU_EGU_REG,
361         HCLGE_OPC_DFX_RPU_REG_0,
362         HCLGE_OPC_DFX_RPU_REG_1,
363         HCLGE_OPC_DFX_NCSI_REG,
364         HCLGE_OPC_DFX_RTC_REG,
365         HCLGE_OPC_DFX_PPP_REG,
366         HCLGE_OPC_DFX_RCB_REG,
367         HCLGE_OPC_DFX_TQP_REG,
368         HCLGE_OPC_DFX_SSU_REG_2
369 };
370
371 static const struct key_info meta_data_key_info[] = {
372         { PACKET_TYPE_ID, 6},
373         { IP_FRAGEMENT, 1},
374         { ROCE_TYPE, 1},
375         { NEXT_KEY, 5},
376         { VLAN_NUMBER, 2},
377         { SRC_VPORT, 12},
378         { DST_VPORT, 12},
379         { TUNNEL_PACKET, 1},
380 };
381
382 static const struct key_info tuple_key_info[] = {
383         { OUTER_DST_MAC, 48},
384         { OUTER_SRC_MAC, 48},
385         { OUTER_VLAN_TAG_FST, 16},
386         { OUTER_VLAN_TAG_SEC, 16},
387         { OUTER_ETH_TYPE, 16},
388         { OUTER_L2_RSV, 16},
389         { OUTER_IP_TOS, 8},
390         { OUTER_IP_PROTO, 8},
391         { OUTER_SRC_IP, 32},
392         { OUTER_DST_IP, 32},
393         { OUTER_L3_RSV, 16},
394         { OUTER_SRC_PORT, 16},
395         { OUTER_DST_PORT, 16},
396         { OUTER_L4_RSV, 32},
397         { OUTER_TUN_VNI, 24},
398         { OUTER_TUN_FLOW_ID, 8},
399         { INNER_DST_MAC, 48},
400         { INNER_SRC_MAC, 48},
401         { INNER_VLAN_TAG_FST, 16},
402         { INNER_VLAN_TAG_SEC, 16},
403         { INNER_ETH_TYPE, 16},
404         { INNER_L2_RSV, 16},
405         { INNER_IP_TOS, 8},
406         { INNER_IP_PROTO, 8},
407         { INNER_SRC_IP, 32},
408         { INNER_DST_IP, 32},
409         { INNER_L3_RSV, 16},
410         { INNER_SRC_PORT, 16},
411         { INNER_DST_PORT, 16},
412         { INNER_L4_RSV, 32},
413 };
414
415 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
416 {
417 #define HCLGE_MAC_CMD_NUM 21
418
419         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
420         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
421         __le64 *desc_data;
422         int i, k, n;
423         int ret;
424
425         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
426         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
427         if (ret) {
428                 dev_err(&hdev->pdev->dev,
429                         "Get MAC pkt stats fail, status = %d.\n", ret);
430
431                 return ret;
432         }
433
434         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
435                 /* for special opcode 0032, only the first desc has the head */
436                 if (unlikely(i == 0)) {
437                         desc_data = (__le64 *)(&desc[i].data[0]);
438                         n = HCLGE_RD_FIRST_STATS_NUM;
439                 } else {
440                         desc_data = (__le64 *)(&desc[i]);
441                         n = HCLGE_RD_OTHER_STATS_NUM;
442                 }
443
444                 for (k = 0; k < n; k++) {
445                         *data += le64_to_cpu(*desc_data);
446                         data++;
447                         desc_data++;
448                 }
449         }
450
451         return 0;
452 }
453
454 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
455 {
456         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
457         struct hclge_desc *desc;
458         __le64 *desc_data;
459         u16 i, k, n;
460         int ret;
461
462         /* This may be called inside atomic sections,
463          * so GFP_ATOMIC is more suitalbe here
464          */
465         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
466         if (!desc)
467                 return -ENOMEM;
468
469         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
470         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
471         if (ret) {
472                 kfree(desc);
473                 return ret;
474         }
475
476         for (i = 0; i < desc_num; i++) {
477                 /* for special opcode 0034, only the first desc has the head */
478                 if (i == 0) {
479                         desc_data = (__le64 *)(&desc[i].data[0]);
480                         n = HCLGE_RD_FIRST_STATS_NUM;
481                 } else {
482                         desc_data = (__le64 *)(&desc[i]);
483                         n = HCLGE_RD_OTHER_STATS_NUM;
484                 }
485
486                 for (k = 0; k < n; k++) {
487                         *data += le64_to_cpu(*desc_data);
488                         data++;
489                         desc_data++;
490                 }
491         }
492
493         kfree(desc);
494
495         return 0;
496 }
497
498 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
499 {
500         struct hclge_desc desc;
501         __le32 *desc_data;
502         u32 reg_num;
503         int ret;
504
505         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
506         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
507         if (ret)
508                 return ret;
509
510         desc_data = (__le32 *)(&desc.data[0]);
511         reg_num = le32_to_cpu(*desc_data);
512
513         *desc_num = 1 + ((reg_num - 3) >> 2) +
514                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
515
516         return 0;
517 }
518
519 static int hclge_mac_update_stats(struct hclge_dev *hdev)
520 {
521         u32 desc_num;
522         int ret;
523
524         ret = hclge_mac_query_reg_num(hdev, &desc_num);
525
526         /* The firmware supports the new statistics acquisition method */
527         if (!ret)
528                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
529         else if (ret == -EOPNOTSUPP)
530                 ret = hclge_mac_update_stats_defective(hdev);
531         else
532                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
533
534         return ret;
535 }
536
537 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
538 {
539         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
540         struct hclge_vport *vport = hclge_get_vport(handle);
541         struct hclge_dev *hdev = vport->back;
542         struct hnae3_queue *queue;
543         struct hclge_desc desc[1];
544         struct hclge_tqp *tqp;
545         int ret, i;
546
547         for (i = 0; i < kinfo->num_tqps; i++) {
548                 queue = handle->kinfo.tqp[i];
549                 tqp = container_of(queue, struct hclge_tqp, q);
550                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
551                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
552                                            true);
553
554                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
555                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
556                 if (ret) {
557                         dev_err(&hdev->pdev->dev,
558                                 "Query tqp stat fail, status = %d,queue = %d\n",
559                                 ret, i);
560                         return ret;
561                 }
562                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
563                         le32_to_cpu(desc[0].data[1]);
564         }
565
566         for (i = 0; i < kinfo->num_tqps; i++) {
567                 queue = handle->kinfo.tqp[i];
568                 tqp = container_of(queue, struct hclge_tqp, q);
569                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
570                 hclge_cmd_setup_basic_desc(&desc[0],
571                                            HCLGE_OPC_QUERY_TX_STATUS,
572                                            true);
573
574                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
575                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
576                 if (ret) {
577                         dev_err(&hdev->pdev->dev,
578                                 "Query tqp stat fail, status = %d,queue = %d\n",
579                                 ret, i);
580                         return ret;
581                 }
582                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
583                         le32_to_cpu(desc[0].data[1]);
584         }
585
586         return 0;
587 }
588
589 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
590 {
591         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
592         struct hclge_tqp *tqp;
593         u64 *buff = data;
594         int i;
595
596         for (i = 0; i < kinfo->num_tqps; i++) {
597                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
598                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
599         }
600
601         for (i = 0; i < kinfo->num_tqps; i++) {
602                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
604         }
605
606         return buff;
607 }
608
609 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
610 {
611         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
612
613         /* each tqp has TX & RX two queues */
614         return kinfo->num_tqps * (2);
615 }
616
617 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
618 {
619         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
620         u8 *buff = data;
621         int i = 0;
622
623         for (i = 0; i < kinfo->num_tqps; i++) {
624                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
625                         struct hclge_tqp, q);
626                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
627                          tqp->index);
628                 buff = buff + ETH_GSTRING_LEN;
629         }
630
631         for (i = 0; i < kinfo->num_tqps; i++) {
632                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
633                         struct hclge_tqp, q);
634                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
635                          tqp->index);
636                 buff = buff + ETH_GSTRING_LEN;
637         }
638
639         return buff;
640 }
641
642 static u64 *hclge_comm_get_stats(const void *comm_stats,
643                                  const struct hclge_comm_stats_str strs[],
644                                  int size, u64 *data)
645 {
646         u64 *buf = data;
647         u32 i;
648
649         for (i = 0; i < size; i++)
650                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
651
652         return buf + size;
653 }
654
655 static u8 *hclge_comm_get_strings(u32 stringset,
656                                   const struct hclge_comm_stats_str strs[],
657                                   int size, u8 *data)
658 {
659         char *buff = (char *)data;
660         u32 i;
661
662         if (stringset != ETH_SS_STATS)
663                 return buff;
664
665         for (i = 0; i < size; i++) {
666                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
667                 buff = buff + ETH_GSTRING_LEN;
668         }
669
670         return (u8 *)buff;
671 }
672
673 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
674 {
675         struct hnae3_handle *handle;
676         int status;
677
678         handle = &hdev->vport[0].nic;
679         if (handle->client) {
680                 status = hclge_tqps_update_stats(handle);
681                 if (status) {
682                         dev_err(&hdev->pdev->dev,
683                                 "Update TQPS stats fail, status = %d.\n",
684                                 status);
685                 }
686         }
687
688         status = hclge_mac_update_stats(hdev);
689         if (status)
690                 dev_err(&hdev->pdev->dev,
691                         "Update MAC stats fail, status = %d.\n", status);
692 }
693
694 static void hclge_update_stats(struct hnae3_handle *handle,
695                                struct net_device_stats *net_stats)
696 {
697         struct hclge_vport *vport = hclge_get_vport(handle);
698         struct hclge_dev *hdev = vport->back;
699         int status;
700
701         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
702                 return;
703
704         status = hclge_mac_update_stats(hdev);
705         if (status)
706                 dev_err(&hdev->pdev->dev,
707                         "Update MAC stats fail, status = %d.\n",
708                         status);
709
710         status = hclge_tqps_update_stats(handle);
711         if (status)
712                 dev_err(&hdev->pdev->dev,
713                         "Update TQPS stats fail, status = %d.\n",
714                         status);
715
716         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
717 }
718
719 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
720 {
721 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
722                 HNAE3_SUPPORT_PHY_LOOPBACK |\
723                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
724                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
725
726         struct hclge_vport *vport = hclge_get_vport(handle);
727         struct hclge_dev *hdev = vport->back;
728         int count = 0;
729
730         /* Loopback test support rules:
731          * mac: only GE mode support
732          * serdes: all mac mode will support include GE/XGE/LGE/CGE
733          * phy: only support when phy device exist on board
734          */
735         if (stringset == ETH_SS_TEST) {
736                 /* clear loopback bit flags at first */
737                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
738                 if (hdev->pdev->revision >= 0x21 ||
739                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
740                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
741                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
742                         count += 1;
743                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
744                 }
745
746                 count += 2;
747                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
748                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
749
750                 if (hdev->hw.mac.phydev) {
751                         count += 1;
752                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
753                 }
754
755         } else if (stringset == ETH_SS_STATS) {
756                 count = ARRAY_SIZE(g_mac_stats_string) +
757                         hclge_tqps_get_sset_count(handle, stringset);
758         }
759
760         return count;
761 }
762
763 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
764                               u8 *data)
765 {
766         u8 *p = (char *)data;
767         int size;
768
769         if (stringset == ETH_SS_STATS) {
770                 size = ARRAY_SIZE(g_mac_stats_string);
771                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
772                                            size, p);
773                 p = hclge_tqps_get_strings(handle, p);
774         } else if (stringset == ETH_SS_TEST) {
775                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
776                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
777                                ETH_GSTRING_LEN);
778                         p += ETH_GSTRING_LEN;
779                 }
780                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
781                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
782                                ETH_GSTRING_LEN);
783                         p += ETH_GSTRING_LEN;
784                 }
785                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
786                         memcpy(p,
787                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
788                                ETH_GSTRING_LEN);
789                         p += ETH_GSTRING_LEN;
790                 }
791                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
792                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
793                                ETH_GSTRING_LEN);
794                         p += ETH_GSTRING_LEN;
795                 }
796         }
797 }
798
799 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
800 {
801         struct hclge_vport *vport = hclge_get_vport(handle);
802         struct hclge_dev *hdev = vport->back;
803         u64 *p;
804
805         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
806                                  ARRAY_SIZE(g_mac_stats_string), data);
807         p = hclge_tqps_get_stats(handle, p);
808 }
809
810 static void hclge_get_mac_stat(struct hnae3_handle *handle,
811                                struct hns3_mac_stats *mac_stats)
812 {
813         struct hclge_vport *vport = hclge_get_vport(handle);
814         struct hclge_dev *hdev = vport->back;
815
816         hclge_update_stats(handle, NULL);
817
818         mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
819         mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
820 }
821
822 static int hclge_parse_func_status(struct hclge_dev *hdev,
823                                    struct hclge_func_status_cmd *status)
824 {
825         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
826                 return -EINVAL;
827
828         /* Set the pf to main pf */
829         if (status->pf_state & HCLGE_PF_STATE_MAIN)
830                 hdev->flag |= HCLGE_FLAG_MAIN;
831         else
832                 hdev->flag &= ~HCLGE_FLAG_MAIN;
833
834         return 0;
835 }
836
837 static int hclge_query_function_status(struct hclge_dev *hdev)
838 {
839 #define HCLGE_QUERY_MAX_CNT     5
840
841         struct hclge_func_status_cmd *req;
842         struct hclge_desc desc;
843         int timeout = 0;
844         int ret;
845
846         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
847         req = (struct hclge_func_status_cmd *)desc.data;
848
849         do {
850                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
851                 if (ret) {
852                         dev_err(&hdev->pdev->dev,
853                                 "query function status failed %d.\n", ret);
854                         return ret;
855                 }
856
857                 /* Check pf reset is done */
858                 if (req->pf_state)
859                         break;
860                 usleep_range(1000, 2000);
861         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
862
863         ret = hclge_parse_func_status(hdev, req);
864
865         return ret;
866 }
867
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
869 {
870         struct hclge_pf_res_cmd *req;
871         struct hclge_desc desc;
872         int ret;
873
874         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
876         if (ret) {
877                 dev_err(&hdev->pdev->dev,
878                         "query pf resource failed %d.\n", ret);
879                 return ret;
880         }
881
882         req = (struct hclge_pf_res_cmd *)desc.data;
883         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
884         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
885
886         if (req->tx_buf_size)
887                 hdev->tx_buf_size =
888                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
889         else
890                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
891
892         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
893
894         if (req->dv_buf_size)
895                 hdev->dv_buf_size =
896                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
897         else
898                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
899
900         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
901
902         if (hnae3_dev_roce_supported(hdev)) {
903                 hdev->roce_base_msix_offset =
904                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
905                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
906                 hdev->num_roce_msi =
907                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
908                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
909
910                 /* nic's msix numbers is always equals to the roce's. */
911                 hdev->num_nic_msi = hdev->num_roce_msi;
912
913                 /* PF should have NIC vectors and Roce vectors,
914                  * NIC vectors are queued before Roce vectors.
915                  */
916                 hdev->num_msi = hdev->num_roce_msi +
917                                 hdev->roce_base_msix_offset;
918         } else {
919                 hdev->num_msi =
920                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
921                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
922
923                 hdev->num_nic_msi = hdev->num_msi;
924         }
925
926         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927                 dev_err(&hdev->pdev->dev,
928                         "Just %u msi resources, not enough for pf(min:2).\n",
929                         hdev->num_nic_msi);
930                 return -EINVAL;
931         }
932
933         return 0;
934 }
935
936 static int hclge_parse_speed(int speed_cmd, int *speed)
937 {
938         switch (speed_cmd) {
939         case 6:
940                 *speed = HCLGE_MAC_SPEED_10M;
941                 break;
942         case 7:
943                 *speed = HCLGE_MAC_SPEED_100M;
944                 break;
945         case 0:
946                 *speed = HCLGE_MAC_SPEED_1G;
947                 break;
948         case 1:
949                 *speed = HCLGE_MAC_SPEED_10G;
950                 break;
951         case 2:
952                 *speed = HCLGE_MAC_SPEED_25G;
953                 break;
954         case 3:
955                 *speed = HCLGE_MAC_SPEED_40G;
956                 break;
957         case 4:
958                 *speed = HCLGE_MAC_SPEED_50G;
959                 break;
960         case 5:
961                 *speed = HCLGE_MAC_SPEED_100G;
962                 break;
963         default:
964                 return -EINVAL;
965         }
966
967         return 0;
968 }
969
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 {
972         struct hclge_vport *vport = hclge_get_vport(handle);
973         struct hclge_dev *hdev = vport->back;
974         u32 speed_ability = hdev->hw.mac.speed_ability;
975         u32 speed_bit = 0;
976
977         switch (speed) {
978         case HCLGE_MAC_SPEED_10M:
979                 speed_bit = HCLGE_SUPPORT_10M_BIT;
980                 break;
981         case HCLGE_MAC_SPEED_100M:
982                 speed_bit = HCLGE_SUPPORT_100M_BIT;
983                 break;
984         case HCLGE_MAC_SPEED_1G:
985                 speed_bit = HCLGE_SUPPORT_1G_BIT;
986                 break;
987         case HCLGE_MAC_SPEED_10G:
988                 speed_bit = HCLGE_SUPPORT_10G_BIT;
989                 break;
990         case HCLGE_MAC_SPEED_25G:
991                 speed_bit = HCLGE_SUPPORT_25G_BIT;
992                 break;
993         case HCLGE_MAC_SPEED_40G:
994                 speed_bit = HCLGE_SUPPORT_40G_BIT;
995                 break;
996         case HCLGE_MAC_SPEED_50G:
997                 speed_bit = HCLGE_SUPPORT_50G_BIT;
998                 break;
999         case HCLGE_MAC_SPEED_100G:
1000                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001                 break;
1002         default:
1003                 return -EINVAL;
1004         }
1005
1006         if (speed_bit & speed_ability)
1007                 return 0;
1008
1009         return -EINVAL;
1010 }
1011
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1013 {
1014         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1016                                  mac->supported);
1017         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1019                                  mac->supported);
1020         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1022                                  mac->supported);
1023         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1025                                  mac->supported);
1026         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1028                                  mac->supported);
1029 }
1030
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1032 {
1033         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1035                                  mac->supported);
1036         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1038                                  mac->supported);
1039         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1041                                  mac->supported);
1042         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1047                                  mac->supported);
1048 }
1049
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1051 {
1052         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1054                                  mac->supported);
1055         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1057                                  mac->supported);
1058         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1060                                  mac->supported);
1061         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1063                                  mac->supported);
1064         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1066                                  mac->supported);
1067 }
1068
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1070 {
1071         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1079                                  mac->supported);
1080         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1082                                  mac->supported);
1083         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1085                                  mac->supported);
1086         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1088                                  mac->supported);
1089 }
1090
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1092 {
1093         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1095
1096         switch (mac->speed) {
1097         case HCLGE_MAC_SPEED_10G:
1098         case HCLGE_MAC_SPEED_40G:
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1100                                  mac->supported);
1101                 mac->fec_ability =
1102                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1103                 break;
1104         case HCLGE_MAC_SPEED_25G:
1105         case HCLGE_MAC_SPEED_50G:
1106                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1107                                  mac->supported);
1108                 mac->fec_ability =
1109                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110                         BIT(HNAE3_FEC_AUTO);
1111                 break;
1112         case HCLGE_MAC_SPEED_100G:
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1115                 break;
1116         default:
1117                 mac->fec_ability = 0;
1118                 break;
1119         }
1120 }
1121
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1123                                         u8 speed_ability)
1124 {
1125         struct hclge_mac *mac = &hdev->hw.mac;
1126
1127         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1129                                  mac->supported);
1130
1131         hclge_convert_setting_sr(mac, speed_ability);
1132         hclge_convert_setting_lr(mac, speed_ability);
1133         hclge_convert_setting_cr(mac, speed_ability);
1134         if (hdev->pdev->revision >= 0x21)
1135                 hclge_convert_setting_fec(mac);
1136
1137         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1140 }
1141
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1143                                             u8 speed_ability)
1144 {
1145         struct hclge_mac *mac = &hdev->hw.mac;
1146
1147         hclge_convert_setting_kr(mac, speed_ability);
1148         if (hdev->pdev->revision >= 0x21)
1149                 hclge_convert_setting_fec(mac);
1150         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1153 }
1154
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1156                                          u8 speed_ability)
1157 {
1158         unsigned long *supported = hdev->hw.mac.supported;
1159
1160         /* default to support all speed for GE port */
1161         if (!speed_ability)
1162                 speed_ability = HCLGE_SUPPORT_GE;
1163
1164         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1166                                  supported);
1167
1168         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1170                                  supported);
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1172                                  supported);
1173         }
1174
1175         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1178         }
1179
1180         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1184 }
1185
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1187 {
1188         u8 media_type = hdev->hw.mac.media_type;
1189
1190         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193                 hclge_parse_copper_link_mode(hdev, speed_ability);
1194         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1196 }
1197
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1199 {
1200         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201                 return HCLGE_MAC_SPEED_100G;
1202
1203         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204                 return HCLGE_MAC_SPEED_50G;
1205
1206         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207                 return HCLGE_MAC_SPEED_40G;
1208
1209         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210                 return HCLGE_MAC_SPEED_25G;
1211
1212         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213                 return HCLGE_MAC_SPEED_10G;
1214
1215         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216                 return HCLGE_MAC_SPEED_1G;
1217
1218         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219                 return HCLGE_MAC_SPEED_100M;
1220
1221         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222                 return HCLGE_MAC_SPEED_10M;
1223
1224         return HCLGE_MAC_SPEED_1G;
1225 }
1226
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1228 {
1229         struct hclge_cfg_param_cmd *req;
1230         u64 mac_addr_tmp_high;
1231         u64 mac_addr_tmp;
1232         unsigned int i;
1233
1234         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1235
1236         /* get the configuration */
1237         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1238                                               HCLGE_CFG_VMDQ_M,
1239                                               HCLGE_CFG_VMDQ_S);
1240         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                             HCLGE_CFG_TQP_DESC_N_M,
1244                                             HCLGE_CFG_TQP_DESC_N_S);
1245
1246         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247                                         HCLGE_CFG_PHY_ADDR_M,
1248                                         HCLGE_CFG_PHY_ADDR_S);
1249         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250                                           HCLGE_CFG_MEDIA_TP_M,
1251                                           HCLGE_CFG_MEDIA_TP_S);
1252         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253                                           HCLGE_CFG_RX_BUF_LEN_M,
1254                                           HCLGE_CFG_RX_BUF_LEN_S);
1255         /* get mac_address */
1256         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258                                             HCLGE_CFG_MAC_ADDR_H_M,
1259                                             HCLGE_CFG_MAC_ADDR_H_S);
1260
1261         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1262
1263         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264                                              HCLGE_CFG_DEFAULT_SPEED_M,
1265                                              HCLGE_CFG_DEFAULT_SPEED_S);
1266         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267                                             HCLGE_CFG_RSS_SIZE_M,
1268                                             HCLGE_CFG_RSS_SIZE_S);
1269
1270         for (i = 0; i < ETH_ALEN; i++)
1271                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1272
1273         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1275
1276         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277                                              HCLGE_CFG_SPEED_ABILITY_M,
1278                                              HCLGE_CFG_SPEED_ABILITY_S);
1279         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1281                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1282         if (!cfg->umv_space)
1283                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1284 }
1285
1286 /* hclge_get_cfg: query the static parameter from flash
1287  * @hdev: pointer to struct hclge_dev
1288  * @hcfg: the config structure to be getted
1289  */
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1291 {
1292         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293         struct hclge_cfg_param_cmd *req;
1294         unsigned int i;
1295         int ret;
1296
1297         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1298                 u32 offset = 0;
1299
1300                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1302                                            true);
1303                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305                 /* Len should be united by 4 bytes when send to hardware */
1306                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308                 req->offset = cpu_to_le32(offset);
1309         }
1310
1311         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1312         if (ret) {
1313                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1314                 return ret;
1315         }
1316
1317         hclge_parse_cfg(hcfg, desc);
1318
1319         return 0;
1320 }
1321
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1323 {
1324         int ret;
1325
1326         ret = hclge_query_function_status(hdev);
1327         if (ret) {
1328                 dev_err(&hdev->pdev->dev,
1329                         "query function status error %d.\n", ret);
1330                 return ret;
1331         }
1332
1333         /* get pf resource */
1334         ret = hclge_query_pf_resource(hdev);
1335         if (ret)
1336                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1337
1338         return ret;
1339 }
1340
1341 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1342 {
1343 #define HCLGE_MIN_TX_DESC       64
1344 #define HCLGE_MIN_RX_DESC       64
1345
1346         if (!is_kdump_kernel())
1347                 return;
1348
1349         dev_info(&hdev->pdev->dev,
1350                  "Running kdump kernel. Using minimal resources\n");
1351
1352         /* minimal queue pairs equals to the number of vports */
1353         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1354         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1355         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1356 }
1357
1358 static int hclge_configure(struct hclge_dev *hdev)
1359 {
1360         struct hclge_cfg cfg;
1361         unsigned int i;
1362         int ret;
1363
1364         ret = hclge_get_cfg(hdev, &cfg);
1365         if (ret) {
1366                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1367                 return ret;
1368         }
1369
1370         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1371         hdev->base_tqp_pid = 0;
1372         hdev->rss_size_max = cfg.rss_size_max;
1373         hdev->rx_buf_len = cfg.rx_buf_len;
1374         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1375         hdev->hw.mac.media_type = cfg.media_type;
1376         hdev->hw.mac.phy_addr = cfg.phy_addr;
1377         hdev->num_tx_desc = cfg.tqp_desc_num;
1378         hdev->num_rx_desc = cfg.tqp_desc_num;
1379         hdev->tm_info.num_pg = 1;
1380         hdev->tc_max = cfg.tc_num;
1381         hdev->tm_info.hw_pfc_map = 0;
1382         hdev->wanted_umv_size = cfg.umv_space;
1383
1384         if (hnae3_dev_fd_supported(hdev)) {
1385                 hdev->fd_en = true;
1386                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1387         }
1388
1389         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1390         if (ret) {
1391                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1392                 return ret;
1393         }
1394
1395         hclge_parse_link_mode(hdev, cfg.speed_ability);
1396
1397         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1398
1399         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1400             (hdev->tc_max < 1)) {
1401                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1402                          hdev->tc_max);
1403                 hdev->tc_max = 1;
1404         }
1405
1406         /* Dev does not support DCB */
1407         if (!hnae3_dev_dcb_supported(hdev)) {
1408                 hdev->tc_max = 1;
1409                 hdev->pfc_max = 0;
1410         } else {
1411                 hdev->pfc_max = hdev->tc_max;
1412         }
1413
1414         hdev->tm_info.num_tc = 1;
1415
1416         /* Currently not support uncontiuous tc */
1417         for (i = 0; i < hdev->tm_info.num_tc; i++)
1418                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1419
1420         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1421
1422         hclge_init_kdump_kernel_config(hdev);
1423
1424         /* Set the init affinity based on pci func number */
1425         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1426         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1427         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1428                         &hdev->affinity_mask);
1429
1430         return ret;
1431 }
1432
1433 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1434                             unsigned int tso_mss_max)
1435 {
1436         struct hclge_cfg_tso_status_cmd *req;
1437         struct hclge_desc desc;
1438         u16 tso_mss;
1439
1440         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1441
1442         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1443
1444         tso_mss = 0;
1445         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1446                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1447         req->tso_mss_min = cpu_to_le16(tso_mss);
1448
1449         tso_mss = 0;
1450         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1451                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1452         req->tso_mss_max = cpu_to_le16(tso_mss);
1453
1454         return hclge_cmd_send(&hdev->hw, &desc, 1);
1455 }
1456
1457 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1458 {
1459         struct hclge_cfg_gro_status_cmd *req;
1460         struct hclge_desc desc;
1461         int ret;
1462
1463         if (!hnae3_dev_gro_supported(hdev))
1464                 return 0;
1465
1466         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1467         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1468
1469         req->gro_en = cpu_to_le16(en ? 1 : 0);
1470
1471         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1472         if (ret)
1473                 dev_err(&hdev->pdev->dev,
1474                         "GRO hardware config cmd failed, ret = %d\n", ret);
1475
1476         return ret;
1477 }
1478
1479 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1480 {
1481         struct hclge_tqp *tqp;
1482         int i;
1483
1484         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1485                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1486         if (!hdev->htqp)
1487                 return -ENOMEM;
1488
1489         tqp = hdev->htqp;
1490
1491         for (i = 0; i < hdev->num_tqps; i++) {
1492                 tqp->dev = &hdev->pdev->dev;
1493                 tqp->index = i;
1494
1495                 tqp->q.ae_algo = &ae_algo;
1496                 tqp->q.buf_size = hdev->rx_buf_len;
1497                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1498                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1499                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1500                         i * HCLGE_TQP_REG_SIZE;
1501
1502                 tqp++;
1503         }
1504
1505         return 0;
1506 }
1507
1508 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1509                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1510 {
1511         struct hclge_tqp_map_cmd *req;
1512         struct hclge_desc desc;
1513         int ret;
1514
1515         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1516
1517         req = (struct hclge_tqp_map_cmd *)desc.data;
1518         req->tqp_id = cpu_to_le16(tqp_pid);
1519         req->tqp_vf = func_id;
1520         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1521         if (!is_pf)
1522                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1523         req->tqp_vid = cpu_to_le16(tqp_vid);
1524
1525         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1526         if (ret)
1527                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1528
1529         return ret;
1530 }
1531
1532 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1533 {
1534         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1535         struct hclge_dev *hdev = vport->back;
1536         int i, alloced;
1537
1538         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1539              alloced < num_tqps; i++) {
1540                 if (!hdev->htqp[i].alloced) {
1541                         hdev->htqp[i].q.handle = &vport->nic;
1542                         hdev->htqp[i].q.tqp_index = alloced;
1543                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1544                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1545                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1546                         hdev->htqp[i].alloced = true;
1547                         alloced++;
1548                 }
1549         }
1550         vport->alloc_tqps = alloced;
1551         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1552                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1553
1554         /* ensure one to one mapping between irq and queue at default */
1555         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1556                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1557
1558         return 0;
1559 }
1560
1561 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1562                             u16 num_tx_desc, u16 num_rx_desc)
1563
1564 {
1565         struct hnae3_handle *nic = &vport->nic;
1566         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1567         struct hclge_dev *hdev = vport->back;
1568         int ret;
1569
1570         kinfo->num_tx_desc = num_tx_desc;
1571         kinfo->num_rx_desc = num_rx_desc;
1572
1573         kinfo->rx_buf_len = hdev->rx_buf_len;
1574
1575         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1576                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1577         if (!kinfo->tqp)
1578                 return -ENOMEM;
1579
1580         ret = hclge_assign_tqp(vport, num_tqps);
1581         if (ret)
1582                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1583
1584         return ret;
1585 }
1586
1587 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1588                                   struct hclge_vport *vport)
1589 {
1590         struct hnae3_handle *nic = &vport->nic;
1591         struct hnae3_knic_private_info *kinfo;
1592         u16 i;
1593
1594         kinfo = &nic->kinfo;
1595         for (i = 0; i < vport->alloc_tqps; i++) {
1596                 struct hclge_tqp *q =
1597                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1598                 bool is_pf;
1599                 int ret;
1600
1601                 is_pf = !(vport->vport_id);
1602                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1603                                              i, is_pf);
1604                 if (ret)
1605                         return ret;
1606         }
1607
1608         return 0;
1609 }
1610
1611 static int hclge_map_tqp(struct hclge_dev *hdev)
1612 {
1613         struct hclge_vport *vport = hdev->vport;
1614         u16 i, num_vport;
1615
1616         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1617         for (i = 0; i < num_vport; i++) {
1618                 int ret;
1619
1620                 ret = hclge_map_tqp_to_vport(hdev, vport);
1621                 if (ret)
1622                         return ret;
1623
1624                 vport++;
1625         }
1626
1627         return 0;
1628 }
1629
1630 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1631 {
1632         struct hnae3_handle *nic = &vport->nic;
1633         struct hclge_dev *hdev = vport->back;
1634         int ret;
1635
1636         nic->pdev = hdev->pdev;
1637         nic->ae_algo = &ae_algo;
1638         nic->numa_node_mask = hdev->numa_node_mask;
1639
1640         ret = hclge_knic_setup(vport, num_tqps,
1641                                hdev->num_tx_desc, hdev->num_rx_desc);
1642         if (ret)
1643                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1644
1645         return ret;
1646 }
1647
1648 static int hclge_alloc_vport(struct hclge_dev *hdev)
1649 {
1650         struct pci_dev *pdev = hdev->pdev;
1651         struct hclge_vport *vport;
1652         u32 tqp_main_vport;
1653         u32 tqp_per_vport;
1654         int num_vport, i;
1655         int ret;
1656
1657         /* We need to alloc a vport for main NIC of PF */
1658         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1659
1660         if (hdev->num_tqps < num_vport) {
1661                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1662                         hdev->num_tqps, num_vport);
1663                 return -EINVAL;
1664         }
1665
1666         /* Alloc the same number of TQPs for every vport */
1667         tqp_per_vport = hdev->num_tqps / num_vport;
1668         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1669
1670         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1671                              GFP_KERNEL);
1672         if (!vport)
1673                 return -ENOMEM;
1674
1675         hdev->vport = vport;
1676         hdev->num_alloc_vport = num_vport;
1677
1678         if (IS_ENABLED(CONFIG_PCI_IOV))
1679                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1680
1681         for (i = 0; i < num_vport; i++) {
1682                 vport->back = hdev;
1683                 vport->vport_id = i;
1684                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1685                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1686                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1687                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1688                 INIT_LIST_HEAD(&vport->vlan_list);
1689                 INIT_LIST_HEAD(&vport->uc_mac_list);
1690                 INIT_LIST_HEAD(&vport->mc_mac_list);
1691
1692                 if (i == 0)
1693                         ret = hclge_vport_setup(vport, tqp_main_vport);
1694                 else
1695                         ret = hclge_vport_setup(vport, tqp_per_vport);
1696                 if (ret) {
1697                         dev_err(&pdev->dev,
1698                                 "vport setup failed for vport %d, %d\n",
1699                                 i, ret);
1700                         return ret;
1701                 }
1702
1703                 vport++;
1704         }
1705
1706         return 0;
1707 }
1708
1709 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1710                                     struct hclge_pkt_buf_alloc *buf_alloc)
1711 {
1712 /* TX buffer size is unit by 128 byte */
1713 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1714 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1715         struct hclge_tx_buff_alloc_cmd *req;
1716         struct hclge_desc desc;
1717         int ret;
1718         u8 i;
1719
1720         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1721
1722         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1723         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1724                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1725
1726                 req->tx_pkt_buff[i] =
1727                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1728                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1729         }
1730
1731         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1732         if (ret)
1733                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1734                         ret);
1735
1736         return ret;
1737 }
1738
1739 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1740                                  struct hclge_pkt_buf_alloc *buf_alloc)
1741 {
1742         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1743
1744         if (ret)
1745                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1746
1747         return ret;
1748 }
1749
1750 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1751 {
1752         unsigned int i;
1753         u32 cnt = 0;
1754
1755         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1756                 if (hdev->hw_tc_map & BIT(i))
1757                         cnt++;
1758         return cnt;
1759 }
1760
1761 /* Get the number of pfc enabled TCs, which have private buffer */
1762 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1763                                   struct hclge_pkt_buf_alloc *buf_alloc)
1764 {
1765         struct hclge_priv_buf *priv;
1766         unsigned int i;
1767         int cnt = 0;
1768
1769         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1770                 priv = &buf_alloc->priv_buf[i];
1771                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1772                     priv->enable)
1773                         cnt++;
1774         }
1775
1776         return cnt;
1777 }
1778
1779 /* Get the number of pfc disabled TCs, which have private buffer */
1780 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1781                                      struct hclge_pkt_buf_alloc *buf_alloc)
1782 {
1783         struct hclge_priv_buf *priv;
1784         unsigned int i;
1785         int cnt = 0;
1786
1787         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1788                 priv = &buf_alloc->priv_buf[i];
1789                 if (hdev->hw_tc_map & BIT(i) &&
1790                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1791                     priv->enable)
1792                         cnt++;
1793         }
1794
1795         return cnt;
1796 }
1797
1798 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1799 {
1800         struct hclge_priv_buf *priv;
1801         u32 rx_priv = 0;
1802         int i;
1803
1804         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1805                 priv = &buf_alloc->priv_buf[i];
1806                 if (priv->enable)
1807                         rx_priv += priv->buf_size;
1808         }
1809         return rx_priv;
1810 }
1811
1812 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1813 {
1814         u32 i, total_tx_size = 0;
1815
1816         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1817                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1818
1819         return total_tx_size;
1820 }
1821
1822 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1823                                 struct hclge_pkt_buf_alloc *buf_alloc,
1824                                 u32 rx_all)
1825 {
1826         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1827         u32 tc_num = hclge_get_tc_num(hdev);
1828         u32 shared_buf, aligned_mps;
1829         u32 rx_priv;
1830         int i;
1831
1832         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1833
1834         if (hnae3_dev_dcb_supported(hdev))
1835                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1836                                         hdev->dv_buf_size;
1837         else
1838                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1839                                         + hdev->dv_buf_size;
1840
1841         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1842         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1843                              HCLGE_BUF_SIZE_UNIT);
1844
1845         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1846         if (rx_all < rx_priv + shared_std)
1847                 return false;
1848
1849         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1850         buf_alloc->s_buf.buf_size = shared_buf;
1851         if (hnae3_dev_dcb_supported(hdev)) {
1852                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1853                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1854                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1855                                   HCLGE_BUF_SIZE_UNIT);
1856         } else {
1857                 buf_alloc->s_buf.self.high = aligned_mps +
1858                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1859                 buf_alloc->s_buf.self.low = aligned_mps;
1860         }
1861
1862         if (hnae3_dev_dcb_supported(hdev)) {
1863                 hi_thrd = shared_buf - hdev->dv_buf_size;
1864
1865                 if (tc_num <= NEED_RESERVE_TC_NUM)
1866                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1867                                         / BUF_MAX_PERCENT;
1868
1869                 if (tc_num)
1870                         hi_thrd = hi_thrd / tc_num;
1871
1872                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1873                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1874                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1875         } else {
1876                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1877                 lo_thrd = aligned_mps;
1878         }
1879
1880         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1881                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1882                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1883         }
1884
1885         return true;
1886 }
1887
1888 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1889                                 struct hclge_pkt_buf_alloc *buf_alloc)
1890 {
1891         u32 i, total_size;
1892
1893         total_size = hdev->pkt_buf_size;
1894
1895         /* alloc tx buffer for all enabled tc */
1896         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1897                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1898
1899                 if (hdev->hw_tc_map & BIT(i)) {
1900                         if (total_size < hdev->tx_buf_size)
1901                                 return -ENOMEM;
1902
1903                         priv->tx_buf_size = hdev->tx_buf_size;
1904                 } else {
1905                         priv->tx_buf_size = 0;
1906                 }
1907
1908                 total_size -= priv->tx_buf_size;
1909         }
1910
1911         return 0;
1912 }
1913
1914 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1915                                   struct hclge_pkt_buf_alloc *buf_alloc)
1916 {
1917         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1918         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1919         unsigned int i;
1920
1921         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1922                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1923
1924                 priv->enable = 0;
1925                 priv->wl.low = 0;
1926                 priv->wl.high = 0;
1927                 priv->buf_size = 0;
1928
1929                 if (!(hdev->hw_tc_map & BIT(i)))
1930                         continue;
1931
1932                 priv->enable = 1;
1933
1934                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1935                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1936                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1937                                                 HCLGE_BUF_SIZE_UNIT);
1938                 } else {
1939                         priv->wl.low = 0;
1940                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1941                                         aligned_mps;
1942                 }
1943
1944                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1945         }
1946
1947         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1948 }
1949
1950 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1951                                           struct hclge_pkt_buf_alloc *buf_alloc)
1952 {
1953         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1954         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1955         int i;
1956
1957         /* let the last to be cleared first */
1958         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1959                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1960                 unsigned int mask = BIT((unsigned int)i);
1961
1962                 if (hdev->hw_tc_map & mask &&
1963                     !(hdev->tm_info.hw_pfc_map & mask)) {
1964                         /* Clear the no pfc TC private buffer */
1965                         priv->wl.low = 0;
1966                         priv->wl.high = 0;
1967                         priv->buf_size = 0;
1968                         priv->enable = 0;
1969                         no_pfc_priv_num--;
1970                 }
1971
1972                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1973                     no_pfc_priv_num == 0)
1974                         break;
1975         }
1976
1977         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1978 }
1979
1980 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1981                                         struct hclge_pkt_buf_alloc *buf_alloc)
1982 {
1983         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1984         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1985         int i;
1986
1987         /* let the last to be cleared first */
1988         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1989                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1990                 unsigned int mask = BIT((unsigned int)i);
1991
1992                 if (hdev->hw_tc_map & mask &&
1993                     hdev->tm_info.hw_pfc_map & mask) {
1994                         /* Reduce the number of pfc TC with private buffer */
1995                         priv->wl.low = 0;
1996                         priv->enable = 0;
1997                         priv->wl.high = 0;
1998                         priv->buf_size = 0;
1999                         pfc_priv_num--;
2000                 }
2001
2002                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2003                     pfc_priv_num == 0)
2004                         break;
2005         }
2006
2007         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2008 }
2009
2010 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2011                                       struct hclge_pkt_buf_alloc *buf_alloc)
2012 {
2013 #define COMPENSATE_BUFFER       0x3C00
2014 #define COMPENSATE_HALF_MPS_NUM 5
2015 #define PRIV_WL_GAP             0x1800
2016
2017         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2018         u32 tc_num = hclge_get_tc_num(hdev);
2019         u32 half_mps = hdev->mps >> 1;
2020         u32 min_rx_priv;
2021         unsigned int i;
2022
2023         if (tc_num)
2024                 rx_priv = rx_priv / tc_num;
2025
2026         if (tc_num <= NEED_RESERVE_TC_NUM)
2027                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2028
2029         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2030                         COMPENSATE_HALF_MPS_NUM * half_mps;
2031         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2032         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2033
2034         if (rx_priv < min_rx_priv)
2035                 return false;
2036
2037         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2038                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2039
2040                 priv->enable = 0;
2041                 priv->wl.low = 0;
2042                 priv->wl.high = 0;
2043                 priv->buf_size = 0;
2044
2045                 if (!(hdev->hw_tc_map & BIT(i)))
2046                         continue;
2047
2048                 priv->enable = 1;
2049                 priv->buf_size = rx_priv;
2050                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2051                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2052         }
2053
2054         buf_alloc->s_buf.buf_size = 0;
2055
2056         return true;
2057 }
2058
2059 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2060  * @hdev: pointer to struct hclge_dev
2061  * @buf_alloc: pointer to buffer calculation data
2062  * @return: 0: calculate sucessful, negative: fail
2063  */
2064 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2065                                 struct hclge_pkt_buf_alloc *buf_alloc)
2066 {
2067         /* When DCB is not supported, rx private buffer is not allocated. */
2068         if (!hnae3_dev_dcb_supported(hdev)) {
2069                 u32 rx_all = hdev->pkt_buf_size;
2070
2071                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2072                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2073                         return -ENOMEM;
2074
2075                 return 0;
2076         }
2077
2078         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2079                 return 0;
2080
2081         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2082                 return 0;
2083
2084         /* try to decrease the buffer size */
2085         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2086                 return 0;
2087
2088         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2089                 return 0;
2090
2091         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2092                 return 0;
2093
2094         return -ENOMEM;
2095 }
2096
2097 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2098                                    struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100         struct hclge_rx_priv_buff_cmd *req;
2101         struct hclge_desc desc;
2102         int ret;
2103         int i;
2104
2105         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2106         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2107
2108         /* Alloc private buffer TCs */
2109         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2110                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2111
2112                 req->buf_num[i] =
2113                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2114                 req->buf_num[i] |=
2115                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2116         }
2117
2118         req->shared_buf =
2119                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2120                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2121
2122         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2123         if (ret)
2124                 dev_err(&hdev->pdev->dev,
2125                         "rx private buffer alloc cmd failed %d\n", ret);
2126
2127         return ret;
2128 }
2129
2130 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2131                                    struct hclge_pkt_buf_alloc *buf_alloc)
2132 {
2133         struct hclge_rx_priv_wl_buf *req;
2134         struct hclge_priv_buf *priv;
2135         struct hclge_desc desc[2];
2136         int i, j;
2137         int ret;
2138
2139         for (i = 0; i < 2; i++) {
2140                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2141                                            false);
2142                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2143
2144                 /* The first descriptor set the NEXT bit to 1 */
2145                 if (i == 0)
2146                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2147                 else
2148                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2149
2150                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2151                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2152
2153                         priv = &buf_alloc->priv_buf[idx];
2154                         req->tc_wl[j].high =
2155                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2156                         req->tc_wl[j].high |=
2157                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2158                         req->tc_wl[j].low =
2159                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2160                         req->tc_wl[j].low |=
2161                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2162                 }
2163         }
2164
2165         /* Send 2 descriptor at one time */
2166         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2167         if (ret)
2168                 dev_err(&hdev->pdev->dev,
2169                         "rx private waterline config cmd failed %d\n",
2170                         ret);
2171         return ret;
2172 }
2173
2174 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2175                                     struct hclge_pkt_buf_alloc *buf_alloc)
2176 {
2177         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2178         struct hclge_rx_com_thrd *req;
2179         struct hclge_desc desc[2];
2180         struct hclge_tc_thrd *tc;
2181         int i, j;
2182         int ret;
2183
2184         for (i = 0; i < 2; i++) {
2185                 hclge_cmd_setup_basic_desc(&desc[i],
2186                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2187                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2188
2189                 /* The first descriptor set the NEXT bit to 1 */
2190                 if (i == 0)
2191                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2192                 else
2193                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2194
2195                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2196                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2197
2198                         req->com_thrd[j].high =
2199                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2200                         req->com_thrd[j].high |=
2201                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2202                         req->com_thrd[j].low =
2203                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2204                         req->com_thrd[j].low |=
2205                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2206                 }
2207         }
2208
2209         /* Send 2 descriptors at one time */
2210         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2211         if (ret)
2212                 dev_err(&hdev->pdev->dev,
2213                         "common threshold config cmd failed %d\n", ret);
2214         return ret;
2215 }
2216
2217 static int hclge_common_wl_config(struct hclge_dev *hdev,
2218                                   struct hclge_pkt_buf_alloc *buf_alloc)
2219 {
2220         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2221         struct hclge_rx_com_wl *req;
2222         struct hclge_desc desc;
2223         int ret;
2224
2225         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2226
2227         req = (struct hclge_rx_com_wl *)desc.data;
2228         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2229         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2230
2231         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2232         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2233
2234         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2235         if (ret)
2236                 dev_err(&hdev->pdev->dev,
2237                         "common waterline config cmd failed %d\n", ret);
2238
2239         return ret;
2240 }
2241
2242 int hclge_buffer_alloc(struct hclge_dev *hdev)
2243 {
2244         struct hclge_pkt_buf_alloc *pkt_buf;
2245         int ret;
2246
2247         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2248         if (!pkt_buf)
2249                 return -ENOMEM;
2250
2251         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2252         if (ret) {
2253                 dev_err(&hdev->pdev->dev,
2254                         "could not calc tx buffer size for all TCs %d\n", ret);
2255                 goto out;
2256         }
2257
2258         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2259         if (ret) {
2260                 dev_err(&hdev->pdev->dev,
2261                         "could not alloc tx buffers %d\n", ret);
2262                 goto out;
2263         }
2264
2265         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2266         if (ret) {
2267                 dev_err(&hdev->pdev->dev,
2268                         "could not calc rx priv buffer size for all TCs %d\n",
2269                         ret);
2270                 goto out;
2271         }
2272
2273         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2274         if (ret) {
2275                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2276                         ret);
2277                 goto out;
2278         }
2279
2280         if (hnae3_dev_dcb_supported(hdev)) {
2281                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2282                 if (ret) {
2283                         dev_err(&hdev->pdev->dev,
2284                                 "could not configure rx private waterline %d\n",
2285                                 ret);
2286                         goto out;
2287                 }
2288
2289                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2290                 if (ret) {
2291                         dev_err(&hdev->pdev->dev,
2292                                 "could not configure common threshold %d\n",
2293                                 ret);
2294                         goto out;
2295                 }
2296         }
2297
2298         ret = hclge_common_wl_config(hdev, pkt_buf);
2299         if (ret)
2300                 dev_err(&hdev->pdev->dev,
2301                         "could not configure common waterline %d\n", ret);
2302
2303 out:
2304         kfree(pkt_buf);
2305         return ret;
2306 }
2307
2308 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2309 {
2310         struct hnae3_handle *roce = &vport->roce;
2311         struct hnae3_handle *nic = &vport->nic;
2312
2313         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2314
2315         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2316             vport->back->num_msi_left == 0)
2317                 return -EINVAL;
2318
2319         roce->rinfo.base_vector = vport->back->roce_base_vector;
2320
2321         roce->rinfo.netdev = nic->kinfo.netdev;
2322         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2323
2324         roce->pdev = nic->pdev;
2325         roce->ae_algo = nic->ae_algo;
2326         roce->numa_node_mask = nic->numa_node_mask;
2327
2328         return 0;
2329 }
2330
2331 static int hclge_init_msi(struct hclge_dev *hdev)
2332 {
2333         struct pci_dev *pdev = hdev->pdev;
2334         int vectors;
2335         int i;
2336
2337         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2338                                         hdev->num_msi,
2339                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2340         if (vectors < 0) {
2341                 dev_err(&pdev->dev,
2342                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2343                         vectors);
2344                 return vectors;
2345         }
2346         if (vectors < hdev->num_msi)
2347                 dev_warn(&hdev->pdev->dev,
2348                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2349                          hdev->num_msi, vectors);
2350
2351         hdev->num_msi = vectors;
2352         hdev->num_msi_left = vectors;
2353
2354         hdev->base_msi_vector = pdev->irq;
2355         hdev->roce_base_vector = hdev->base_msi_vector +
2356                                 hdev->roce_base_msix_offset;
2357
2358         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2359                                            sizeof(u16), GFP_KERNEL);
2360         if (!hdev->vector_status) {
2361                 pci_free_irq_vectors(pdev);
2362                 return -ENOMEM;
2363         }
2364
2365         for (i = 0; i < hdev->num_msi; i++)
2366                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2367
2368         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2369                                         sizeof(int), GFP_KERNEL);
2370         if (!hdev->vector_irq) {
2371                 pci_free_irq_vectors(pdev);
2372                 return -ENOMEM;
2373         }
2374
2375         return 0;
2376 }
2377
2378 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2379 {
2380         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2381                 duplex = HCLGE_MAC_FULL;
2382
2383         return duplex;
2384 }
2385
2386 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2387                                       u8 duplex)
2388 {
2389         struct hclge_config_mac_speed_dup_cmd *req;
2390         struct hclge_desc desc;
2391         int ret;
2392
2393         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2394
2395         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2396
2397         if (duplex)
2398                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2399
2400         switch (speed) {
2401         case HCLGE_MAC_SPEED_10M:
2402                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403                                 HCLGE_CFG_SPEED_S, 6);
2404                 break;
2405         case HCLGE_MAC_SPEED_100M:
2406                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407                                 HCLGE_CFG_SPEED_S, 7);
2408                 break;
2409         case HCLGE_MAC_SPEED_1G:
2410                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411                                 HCLGE_CFG_SPEED_S, 0);
2412                 break;
2413         case HCLGE_MAC_SPEED_10G:
2414                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415                                 HCLGE_CFG_SPEED_S, 1);
2416                 break;
2417         case HCLGE_MAC_SPEED_25G:
2418                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419                                 HCLGE_CFG_SPEED_S, 2);
2420                 break;
2421         case HCLGE_MAC_SPEED_40G:
2422                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423                                 HCLGE_CFG_SPEED_S, 3);
2424                 break;
2425         case HCLGE_MAC_SPEED_50G:
2426                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427                                 HCLGE_CFG_SPEED_S, 4);
2428                 break;
2429         case HCLGE_MAC_SPEED_100G:
2430                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2431                                 HCLGE_CFG_SPEED_S, 5);
2432                 break;
2433         default:
2434                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2435                 return -EINVAL;
2436         }
2437
2438         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2439                       1);
2440
2441         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2442         if (ret) {
2443                 dev_err(&hdev->pdev->dev,
2444                         "mac speed/duplex config cmd failed %d.\n", ret);
2445                 return ret;
2446         }
2447
2448         return 0;
2449 }
2450
2451 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2452 {
2453         int ret;
2454
2455         duplex = hclge_check_speed_dup(duplex, speed);
2456         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2457                 return 0;
2458
2459         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2460         if (ret)
2461                 return ret;
2462
2463         hdev->hw.mac.speed = speed;
2464         hdev->hw.mac.duplex = duplex;
2465
2466         return 0;
2467 }
2468
2469 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2470                                      u8 duplex)
2471 {
2472         struct hclge_vport *vport = hclge_get_vport(handle);
2473         struct hclge_dev *hdev = vport->back;
2474
2475         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2476 }
2477
2478 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2479 {
2480         struct hclge_config_auto_neg_cmd *req;
2481         struct hclge_desc desc;
2482         u32 flag = 0;
2483         int ret;
2484
2485         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2486
2487         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2488         if (enable)
2489                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2490         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2491
2492         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2493         if (ret)
2494                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2495                         ret);
2496
2497         return ret;
2498 }
2499
2500 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2501 {
2502         struct hclge_vport *vport = hclge_get_vport(handle);
2503         struct hclge_dev *hdev = vport->back;
2504
2505         if (!hdev->hw.mac.support_autoneg) {
2506                 if (enable) {
2507                         dev_err(&hdev->pdev->dev,
2508                                 "autoneg is not supported by current port\n");
2509                         return -EOPNOTSUPP;
2510                 } else {
2511                         return 0;
2512                 }
2513         }
2514
2515         return hclge_set_autoneg_en(hdev, enable);
2516 }
2517
2518 static int hclge_get_autoneg(struct hnae3_handle *handle)
2519 {
2520         struct hclge_vport *vport = hclge_get_vport(handle);
2521         struct hclge_dev *hdev = vport->back;
2522         struct phy_device *phydev = hdev->hw.mac.phydev;
2523
2524         if (phydev)
2525                 return phydev->autoneg;
2526
2527         return hdev->hw.mac.autoneg;
2528 }
2529
2530 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2531 {
2532         struct hclge_vport *vport = hclge_get_vport(handle);
2533         struct hclge_dev *hdev = vport->back;
2534         int ret;
2535
2536         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2537
2538         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2539         if (ret)
2540                 return ret;
2541         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2542 }
2543
2544 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2545 {
2546         struct hclge_vport *vport = hclge_get_vport(handle);
2547         struct hclge_dev *hdev = vport->back;
2548
2549         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2550                 return hclge_set_autoneg_en(hdev, !halt);
2551
2552         return 0;
2553 }
2554
2555 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2556 {
2557         struct hclge_config_fec_cmd *req;
2558         struct hclge_desc desc;
2559         int ret;
2560
2561         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2562
2563         req = (struct hclge_config_fec_cmd *)desc.data;
2564         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2565                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2566         if (fec_mode & BIT(HNAE3_FEC_RS))
2567                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2568                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2569         if (fec_mode & BIT(HNAE3_FEC_BASER))
2570                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2571                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2572
2573         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2574         if (ret)
2575                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2576
2577         return ret;
2578 }
2579
2580 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2581 {
2582         struct hclge_vport *vport = hclge_get_vport(handle);
2583         struct hclge_dev *hdev = vport->back;
2584         struct hclge_mac *mac = &hdev->hw.mac;
2585         int ret;
2586
2587         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2588                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2589                 return -EINVAL;
2590         }
2591
2592         ret = hclge_set_fec_hw(hdev, fec_mode);
2593         if (ret)
2594                 return ret;
2595
2596         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2597         return 0;
2598 }
2599
2600 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2601                           u8 *fec_mode)
2602 {
2603         struct hclge_vport *vport = hclge_get_vport(handle);
2604         struct hclge_dev *hdev = vport->back;
2605         struct hclge_mac *mac = &hdev->hw.mac;
2606
2607         if (fec_ability)
2608                 *fec_ability = mac->fec_ability;
2609         if (fec_mode)
2610                 *fec_mode = mac->fec_mode;
2611 }
2612
2613 static int hclge_mac_init(struct hclge_dev *hdev)
2614 {
2615         struct hclge_mac *mac = &hdev->hw.mac;
2616         int ret;
2617
2618         hdev->support_sfp_query = true;
2619         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2620         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2621                                          hdev->hw.mac.duplex);
2622         if (ret) {
2623                 dev_err(&hdev->pdev->dev,
2624                         "Config mac speed dup fail ret=%d\n", ret);
2625                 return ret;
2626         }
2627
2628         if (hdev->hw.mac.support_autoneg) {
2629                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2630                 if (ret) {
2631                         dev_err(&hdev->pdev->dev,
2632                                 "Config mac autoneg fail ret=%d\n", ret);
2633                         return ret;
2634                 }
2635         }
2636
2637         mac->link = 0;
2638
2639         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2640                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2641                 if (ret) {
2642                         dev_err(&hdev->pdev->dev,
2643                                 "Fec mode init fail, ret = %d\n", ret);
2644                         return ret;
2645                 }
2646         }
2647
2648         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2649         if (ret) {
2650                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2651                 return ret;
2652         }
2653
2654         ret = hclge_set_default_loopback(hdev);
2655         if (ret)
2656                 return ret;
2657
2658         ret = hclge_buffer_alloc(hdev);
2659         if (ret)
2660                 dev_err(&hdev->pdev->dev,
2661                         "allocate buffer fail, ret=%d\n", ret);
2662
2663         return ret;
2664 }
2665
2666 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2667 {
2668         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2669             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2670                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2671                               &hdev->mbx_service_task);
2672 }
2673
2674 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2675 {
2676         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2677             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2678                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2679                               &hdev->rst_service_task);
2680 }
2681
2682 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2683 {
2684         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2685             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2686             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2687                 hdev->hw_stats.stats_timer++;
2688                 hdev->fd_arfs_expire_timer++;
2689                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2690                                     system_wq, &hdev->service_task,
2691                                     delay_time);
2692         }
2693 }
2694
2695 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2696 {
2697         struct hclge_link_status_cmd *req;
2698         struct hclge_desc desc;
2699         int link_status;
2700         int ret;
2701
2702         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2703         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2704         if (ret) {
2705                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2706                         ret);
2707                 return ret;
2708         }
2709
2710         req = (struct hclge_link_status_cmd *)desc.data;
2711         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2712
2713         return !!link_status;
2714 }
2715
2716 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2717 {
2718         unsigned int mac_state;
2719         int link_stat;
2720
2721         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2722                 return 0;
2723
2724         mac_state = hclge_get_mac_link_status(hdev);
2725
2726         if (hdev->hw.mac.phydev) {
2727                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2728                         link_stat = mac_state &
2729                                 hdev->hw.mac.phydev->link;
2730                 else
2731                         link_stat = 0;
2732
2733         } else {
2734                 link_stat = mac_state;
2735         }
2736
2737         return !!link_stat;
2738 }
2739
2740 static void hclge_update_link_status(struct hclge_dev *hdev)
2741 {
2742         struct hnae3_client *rclient = hdev->roce_client;
2743         struct hnae3_client *client = hdev->nic_client;
2744         struct hnae3_handle *rhandle;
2745         struct hnae3_handle *handle;
2746         int state;
2747         int i;
2748
2749         if (!client)
2750                 return;
2751         state = hclge_get_mac_phy_link(hdev);
2752         if (state != hdev->hw.mac.link) {
2753                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2754                         handle = &hdev->vport[i].nic;
2755                         client->ops->link_status_change(handle, state);
2756                         hclge_config_mac_tnl_int(hdev, state);
2757                         rhandle = &hdev->vport[i].roce;
2758                         if (rclient && rclient->ops->link_status_change)
2759                                 rclient->ops->link_status_change(rhandle,
2760                                                                  state);
2761                 }
2762                 hdev->hw.mac.link = state;
2763         }
2764 }
2765
2766 static void hclge_update_port_capability(struct hclge_mac *mac)
2767 {
2768         /* update fec ability by speed */
2769         hclge_convert_setting_fec(mac);
2770
2771         /* firmware can not identify back plane type, the media type
2772          * read from configuration can help deal it
2773          */
2774         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2775             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2776                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2777         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2778                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2779
2780         if (mac->support_autoneg) {
2781                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2782                 linkmode_copy(mac->advertising, mac->supported);
2783         } else {
2784                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2785                                    mac->supported);
2786                 linkmode_zero(mac->advertising);
2787         }
2788 }
2789
2790 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2791 {
2792         struct hclge_sfp_info_cmd *resp;
2793         struct hclge_desc desc;
2794         int ret;
2795
2796         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2797         resp = (struct hclge_sfp_info_cmd *)desc.data;
2798         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2799         if (ret == -EOPNOTSUPP) {
2800                 dev_warn(&hdev->pdev->dev,
2801                          "IMP do not support get SFP speed %d\n", ret);
2802                 return ret;
2803         } else if (ret) {
2804                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2805                 return ret;
2806         }
2807
2808         *speed = le32_to_cpu(resp->speed);
2809
2810         return 0;
2811 }
2812
2813 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2814 {
2815         struct hclge_sfp_info_cmd *resp;
2816         struct hclge_desc desc;
2817         int ret;
2818
2819         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2820         resp = (struct hclge_sfp_info_cmd *)desc.data;
2821
2822         resp->query_type = QUERY_ACTIVE_SPEED;
2823
2824         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2825         if (ret == -EOPNOTSUPP) {
2826                 dev_warn(&hdev->pdev->dev,
2827                          "IMP does not support get SFP info %d\n", ret);
2828                 return ret;
2829         } else if (ret) {
2830                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2831                 return ret;
2832         }
2833
2834         mac->speed = le32_to_cpu(resp->speed);
2835         /* if resp->speed_ability is 0, it means it's an old version
2836          * firmware, do not update these params
2837          */
2838         if (resp->speed_ability) {
2839                 mac->module_type = le32_to_cpu(resp->module_type);
2840                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2841                 mac->autoneg = resp->autoneg;
2842                 mac->support_autoneg = resp->autoneg_ability;
2843                 mac->speed_type = QUERY_ACTIVE_SPEED;
2844                 if (!resp->active_fec)
2845                         mac->fec_mode = 0;
2846                 else
2847                         mac->fec_mode = BIT(resp->active_fec);
2848         } else {
2849                 mac->speed_type = QUERY_SFP_SPEED;
2850         }
2851
2852         return 0;
2853 }
2854
2855 static int hclge_update_port_info(struct hclge_dev *hdev)
2856 {
2857         struct hclge_mac *mac = &hdev->hw.mac;
2858         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2859         int ret;
2860
2861         /* get the port info from SFP cmd if not copper port */
2862         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2863                 return 0;
2864
2865         /* if IMP does not support get SFP/qSFP info, return directly */
2866         if (!hdev->support_sfp_query)
2867                 return 0;
2868
2869         if (hdev->pdev->revision >= 0x21)
2870                 ret = hclge_get_sfp_info(hdev, mac);
2871         else
2872                 ret = hclge_get_sfp_speed(hdev, &speed);
2873
2874         if (ret == -EOPNOTSUPP) {
2875                 hdev->support_sfp_query = false;
2876                 return ret;
2877         } else if (ret) {
2878                 return ret;
2879         }
2880
2881         if (hdev->pdev->revision >= 0x21) {
2882                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2883                         hclge_update_port_capability(mac);
2884                         return 0;
2885                 }
2886                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2887                                                HCLGE_MAC_FULL);
2888         } else {
2889                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2890                         return 0; /* do nothing if no SFP */
2891
2892                 /* must config full duplex for SFP */
2893                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2894         }
2895 }
2896
2897 static int hclge_get_status(struct hnae3_handle *handle)
2898 {
2899         struct hclge_vport *vport = hclge_get_vport(handle);
2900         struct hclge_dev *hdev = vport->back;
2901
2902         hclge_update_link_status(hdev);
2903
2904         return hdev->hw.mac.link;
2905 }
2906
2907 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2908 {
2909         if (pci_num_vf(hdev->pdev) == 0) {
2910                 dev_err(&hdev->pdev->dev,
2911                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2912                 return NULL;
2913         }
2914
2915         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2916                 dev_err(&hdev->pdev->dev,
2917                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2918                         vf, pci_num_vf(hdev->pdev));
2919                 return NULL;
2920         }
2921
2922         /* VF start from 1 in vport */
2923         vf += HCLGE_VF_VPORT_START_NUM;
2924         return &hdev->vport[vf];
2925 }
2926
2927 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2928                                struct ifla_vf_info *ivf)
2929 {
2930         struct hclge_vport *vport = hclge_get_vport(handle);
2931         struct hclge_dev *hdev = vport->back;
2932
2933         vport = hclge_get_vf_vport(hdev, vf);
2934         if (!vport)
2935                 return -EINVAL;
2936
2937         ivf->vf = vf;
2938         ivf->linkstate = vport->vf_info.link_state;
2939         ivf->spoofchk = vport->vf_info.spoofchk;
2940         ivf->trusted = vport->vf_info.trusted;
2941         ivf->min_tx_rate = 0;
2942         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2943         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2944
2945         return 0;
2946 }
2947
2948 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2949                                    int link_state)
2950 {
2951         struct hclge_vport *vport = hclge_get_vport(handle);
2952         struct hclge_dev *hdev = vport->back;
2953
2954         vport = hclge_get_vf_vport(hdev, vf);
2955         if (!vport)
2956                 return -EINVAL;
2957
2958         vport->vf_info.link_state = link_state;
2959
2960         return 0;
2961 }
2962
2963 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2964 {
2965         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2966
2967         /* fetch the events from their corresponding regs */
2968         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2969         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2970         msix_src_reg = hclge_read_dev(&hdev->hw,
2971                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2972
2973         /* Assumption: If by any chance reset and mailbox events are reported
2974          * together then we will only process reset event in this go and will
2975          * defer the processing of the mailbox events. Since, we would have not
2976          * cleared RX CMDQ event this time we would receive again another
2977          * interrupt from H/W just for the mailbox.
2978          *
2979          * check for vector0 reset event sources
2980          */
2981         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2982                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2983                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2984                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2985                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2986                 hdev->rst_stats.imp_rst_cnt++;
2987                 return HCLGE_VECTOR0_EVENT_RST;
2988         }
2989
2990         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2991                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2992                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2993                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2994                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2995                 hdev->rst_stats.global_rst_cnt++;
2996                 return HCLGE_VECTOR0_EVENT_RST;
2997         }
2998
2999         /* check for vector0 msix event source */
3000         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3001                 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
3002                          msix_src_reg);
3003                 *clearval = msix_src_reg;
3004                 return HCLGE_VECTOR0_EVENT_ERR;
3005         }
3006
3007         /* check for vector0 mailbox(=CMDQ RX) event source */
3008         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3009                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3010                 *clearval = cmdq_src_reg;
3011                 return HCLGE_VECTOR0_EVENT_MBX;
3012         }
3013
3014         /* print other vector0 event source */
3015         dev_info(&hdev->pdev->dev,
3016                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3017                  cmdq_src_reg, msix_src_reg);
3018         *clearval = msix_src_reg;
3019
3020         return HCLGE_VECTOR0_EVENT_OTHER;
3021 }
3022
3023 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3024                                     u32 regclr)
3025 {
3026         switch (event_type) {
3027         case HCLGE_VECTOR0_EVENT_RST:
3028                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3029                 break;
3030         case HCLGE_VECTOR0_EVENT_MBX:
3031                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3032                 break;
3033         default:
3034                 break;
3035         }
3036 }
3037
3038 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3039 {
3040         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3041                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3042                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3043                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3044         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3045 }
3046
3047 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3048 {
3049         writel(enable ? 1 : 0, vector->addr);
3050 }
3051
3052 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3053 {
3054         struct hclge_dev *hdev = data;
3055         u32 clearval = 0;
3056         u32 event_cause;
3057
3058         hclge_enable_vector(&hdev->misc_vector, false);
3059         event_cause = hclge_check_event_cause(hdev, &clearval);
3060
3061         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3062         switch (event_cause) {
3063         case HCLGE_VECTOR0_EVENT_ERR:
3064                 /* we do not know what type of reset is required now. This could
3065                  * only be decided after we fetch the type of errors which
3066                  * caused this event. Therefore, we will do below for now:
3067                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3068                  *    have defered type of reset to be used.
3069                  * 2. Schedule the reset serivce task.
3070                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3071                  *    will fetch the correct type of reset.  This would be done
3072                  *    by first decoding the types of errors.
3073                  */
3074                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3075                 /* fall through */
3076         case HCLGE_VECTOR0_EVENT_RST:
3077                 hclge_reset_task_schedule(hdev);
3078                 break;
3079         case HCLGE_VECTOR0_EVENT_MBX:
3080                 /* If we are here then,
3081                  * 1. Either we are not handling any mbx task and we are not
3082                  *    scheduled as well
3083                  *                        OR
3084                  * 2. We could be handling a mbx task but nothing more is
3085                  *    scheduled.
3086                  * In both cases, we should schedule mbx task as there are more
3087                  * mbx messages reported by this interrupt.
3088                  */
3089                 hclge_mbx_task_schedule(hdev);
3090                 break;
3091         default:
3092                 dev_warn(&hdev->pdev->dev,
3093                          "received unknown or unhandled event of vector0\n");
3094                 break;
3095         }
3096
3097         hclge_clear_event_cause(hdev, event_cause, clearval);
3098
3099         /* Enable interrupt if it is not cause by reset. And when
3100          * clearval equal to 0, it means interrupt status may be
3101          * cleared by hardware before driver reads status register.
3102          * For this case, vector0 interrupt also should be enabled.
3103          */
3104         if (!clearval ||
3105             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3106                 hclge_enable_vector(&hdev->misc_vector, true);
3107         }
3108
3109         return IRQ_HANDLED;
3110 }
3111
3112 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3113 {
3114         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3115                 dev_warn(&hdev->pdev->dev,
3116                          "vector(vector_id %d) has been freed.\n", vector_id);
3117                 return;
3118         }
3119
3120         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3121         hdev->num_msi_left += 1;
3122         hdev->num_msi_used -= 1;
3123 }
3124
3125 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3126 {
3127         struct hclge_misc_vector *vector = &hdev->misc_vector;
3128
3129         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3130
3131         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3132         hdev->vector_status[0] = 0;
3133
3134         hdev->num_msi_left -= 1;
3135         hdev->num_msi_used += 1;
3136 }
3137
3138 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3139                                       const cpumask_t *mask)
3140 {
3141         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3142                                               affinity_notify);
3143
3144         cpumask_copy(&hdev->affinity_mask, mask);
3145 }
3146
3147 static void hclge_irq_affinity_release(struct kref *ref)
3148 {
3149 }
3150
3151 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3152 {
3153         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3154                               &hdev->affinity_mask);
3155
3156         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3157         hdev->affinity_notify.release = hclge_irq_affinity_release;
3158         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3159                                   &hdev->affinity_notify);
3160 }
3161
3162 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3163 {
3164         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3165         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3166 }
3167
3168 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3169 {
3170         int ret;
3171
3172         hclge_get_misc_vector(hdev);
3173
3174         /* this would be explicitly freed in the end */
3175         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3176                           0, "hclge_misc", hdev);
3177         if (ret) {
3178                 hclge_free_vector(hdev, 0);
3179                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3180                         hdev->misc_vector.vector_irq);
3181         }
3182
3183         return ret;
3184 }
3185
3186 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3187 {
3188         free_irq(hdev->misc_vector.vector_irq, hdev);
3189         hclge_free_vector(hdev, 0);
3190 }
3191
3192 int hclge_notify_client(struct hclge_dev *hdev,
3193                         enum hnae3_reset_notify_type type)
3194 {
3195         struct hnae3_client *client = hdev->nic_client;
3196         u16 i;
3197
3198         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3199                 return 0;
3200
3201         if (!client->ops->reset_notify)
3202                 return -EOPNOTSUPP;
3203
3204         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3205                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3206                 int ret;
3207
3208                 ret = client->ops->reset_notify(handle, type);
3209                 if (ret) {
3210                         dev_err(&hdev->pdev->dev,
3211                                 "notify nic client failed %d(%d)\n", type, ret);
3212                         return ret;
3213                 }
3214         }
3215
3216         return 0;
3217 }
3218
3219 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3220                                     enum hnae3_reset_notify_type type)
3221 {
3222         struct hnae3_client *client = hdev->roce_client;
3223         int ret = 0;
3224         u16 i;
3225
3226         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3227                 return 0;
3228
3229         if (!client->ops->reset_notify)
3230                 return -EOPNOTSUPP;
3231
3232         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3233                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3234
3235                 ret = client->ops->reset_notify(handle, type);
3236                 if (ret) {
3237                         dev_err(&hdev->pdev->dev,
3238                                 "notify roce client failed %d(%d)",
3239                                 type, ret);
3240                         return ret;
3241                 }
3242         }
3243
3244         return ret;
3245 }
3246
3247 static int hclge_reset_wait(struct hclge_dev *hdev)
3248 {
3249 #define HCLGE_RESET_WATI_MS     100
3250 #define HCLGE_RESET_WAIT_CNT    200
3251         u32 val, reg, reg_bit;
3252         u32 cnt = 0;
3253
3254         switch (hdev->reset_type) {
3255         case HNAE3_IMP_RESET:
3256                 reg = HCLGE_GLOBAL_RESET_REG;
3257                 reg_bit = HCLGE_IMP_RESET_BIT;
3258                 break;
3259         case HNAE3_GLOBAL_RESET:
3260                 reg = HCLGE_GLOBAL_RESET_REG;
3261                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3262                 break;
3263         case HNAE3_FUNC_RESET:
3264                 reg = HCLGE_FUN_RST_ING;
3265                 reg_bit = HCLGE_FUN_RST_ING_B;
3266                 break;
3267         case HNAE3_FLR_RESET:
3268                 break;
3269         default:
3270                 dev_err(&hdev->pdev->dev,
3271                         "Wait for unsupported reset type: %d\n",
3272                         hdev->reset_type);
3273                 return -EINVAL;
3274         }
3275
3276         if (hdev->reset_type == HNAE3_FLR_RESET) {
3277                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3278                        cnt++ < HCLGE_RESET_WAIT_CNT)
3279                         msleep(HCLGE_RESET_WATI_MS);
3280
3281                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3282                         dev_err(&hdev->pdev->dev,
3283                                 "flr wait timeout: %u\n", cnt);
3284                         return -EBUSY;
3285                 }
3286
3287                 return 0;
3288         }
3289
3290         val = hclge_read_dev(&hdev->hw, reg);
3291         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3292                 msleep(HCLGE_RESET_WATI_MS);
3293                 val = hclge_read_dev(&hdev->hw, reg);
3294                 cnt++;
3295         }
3296
3297         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3298                 dev_warn(&hdev->pdev->dev,
3299                          "Wait for reset timeout: %d\n", hdev->reset_type);
3300                 return -EBUSY;
3301         }
3302
3303         return 0;
3304 }
3305
3306 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3307 {
3308         struct hclge_vf_rst_cmd *req;
3309         struct hclge_desc desc;
3310
3311         req = (struct hclge_vf_rst_cmd *)desc.data;
3312         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3313         req->dest_vfid = func_id;
3314
3315         if (reset)
3316                 req->vf_rst = 0x1;
3317
3318         return hclge_cmd_send(&hdev->hw, &desc, 1);
3319 }
3320
3321 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3322 {
3323         int i;
3324
3325         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3326                 struct hclge_vport *vport = &hdev->vport[i];
3327                 int ret;
3328
3329                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3330                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3331                 if (ret) {
3332                         dev_err(&hdev->pdev->dev,
3333                                 "set vf(%u) rst failed %d!\n",
3334                                 vport->vport_id, ret);
3335                         return ret;
3336                 }
3337
3338                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3339                         continue;
3340
3341                 /* Inform VF to process the reset.
3342                  * hclge_inform_reset_assert_to_vf may fail if VF
3343                  * driver is not loaded.
3344                  */
3345                 ret = hclge_inform_reset_assert_to_vf(vport);
3346                 if (ret)
3347                         dev_warn(&hdev->pdev->dev,
3348                                  "inform reset to vf(%u) failed %d!\n",
3349                                  vport->vport_id, ret);
3350         }
3351
3352         return 0;
3353 }
3354
3355 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3356 {
3357         struct hclge_pf_rst_sync_cmd *req;
3358         struct hclge_desc desc;
3359         int cnt = 0;
3360         int ret;
3361
3362         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3363         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3364
3365         do {
3366                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367                 /* for compatible with old firmware, wait
3368                  * 100 ms for VF to stop IO
3369                  */
3370                 if (ret == -EOPNOTSUPP) {
3371                         msleep(HCLGE_RESET_SYNC_TIME);
3372                         return 0;
3373                 } else if (ret) {
3374                         dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3375                                 ret);
3376                         return ret;
3377                 } else if (req->all_vf_ready) {
3378                         return 0;
3379                 }
3380                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3381                 hclge_cmd_reuse_desc(&desc, true);
3382         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3383
3384         dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3385         return -ETIME;
3386 }
3387
3388 void hclge_report_hw_error(struct hclge_dev *hdev,
3389                            enum hnae3_hw_error_type type)
3390 {
3391         struct hnae3_client *client = hdev->nic_client;
3392         u16 i;
3393
3394         if (!client || !client->ops->process_hw_error ||
3395             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3396                 return;
3397
3398         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3399                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3400 }
3401
3402 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3403 {
3404         u32 reg_val;
3405
3406         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3407         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3408                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3409                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3410                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3411         }
3412
3413         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3414                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3415                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3416                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3417         }
3418 }
3419
3420 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3421 {
3422         struct hclge_desc desc;
3423         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3424         int ret;
3425
3426         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3427         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3428         req->fun_reset_vfid = func_id;
3429
3430         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3431         if (ret)
3432                 dev_err(&hdev->pdev->dev,
3433                         "send function reset cmd fail, status =%d\n", ret);
3434
3435         return ret;
3436 }
3437
3438 static void hclge_do_reset(struct hclge_dev *hdev)
3439 {
3440         struct hnae3_handle *handle = &hdev->vport[0].nic;
3441         struct pci_dev *pdev = hdev->pdev;
3442         u32 val;
3443
3444         if (hclge_get_hw_reset_stat(handle)) {
3445                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3446                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3447                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3448                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3449                 return;
3450         }
3451
3452         switch (hdev->reset_type) {
3453         case HNAE3_GLOBAL_RESET:
3454                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3455                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3456                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3457                 dev_info(&pdev->dev, "Global Reset requested\n");
3458                 break;
3459         case HNAE3_FUNC_RESET:
3460                 dev_info(&pdev->dev, "PF Reset requested\n");
3461                 /* schedule again to check later */
3462                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3463                 hclge_reset_task_schedule(hdev);
3464                 break;
3465         case HNAE3_FLR_RESET:
3466                 dev_info(&pdev->dev, "FLR requested\n");
3467                 /* schedule again to check later */
3468                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3469                 hclge_reset_task_schedule(hdev);
3470                 break;
3471         default:
3472                 dev_warn(&pdev->dev,
3473                          "Unsupported reset type: %d\n", hdev->reset_type);
3474                 break;
3475         }
3476 }
3477
3478 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3479                                                    unsigned long *addr)
3480 {
3481         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3482         struct hclge_dev *hdev = ae_dev->priv;
3483
3484         /* first, resolve any unknown reset type to the known type(s) */
3485         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3486                 /* we will intentionally ignore any errors from this function
3487                  *  as we will end up in *some* reset request in any case
3488                  */
3489                 hclge_handle_hw_msix_error(hdev, addr);
3490                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3491                 /* We defered the clearing of the error event which caused
3492                  * interrupt since it was not posssible to do that in
3493                  * interrupt context (and this is the reason we introduced
3494                  * new UNKNOWN reset type). Now, the errors have been
3495                  * handled and cleared in hardware we can safely enable
3496                  * interrupts. This is an exception to the norm.
3497                  */
3498                 hclge_enable_vector(&hdev->misc_vector, true);
3499         }
3500
3501         /* return the highest priority reset level amongst all */
3502         if (test_bit(HNAE3_IMP_RESET, addr)) {
3503                 rst_level = HNAE3_IMP_RESET;
3504                 clear_bit(HNAE3_IMP_RESET, addr);
3505                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3506                 clear_bit(HNAE3_FUNC_RESET, addr);
3507         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3508                 rst_level = HNAE3_GLOBAL_RESET;
3509                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3510                 clear_bit(HNAE3_FUNC_RESET, addr);
3511         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3512                 rst_level = HNAE3_FUNC_RESET;
3513                 clear_bit(HNAE3_FUNC_RESET, addr);
3514         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3515                 rst_level = HNAE3_FLR_RESET;
3516                 clear_bit(HNAE3_FLR_RESET, addr);
3517         }
3518
3519         if (hdev->reset_type != HNAE3_NONE_RESET &&
3520             rst_level < hdev->reset_type)
3521                 return HNAE3_NONE_RESET;
3522
3523         return rst_level;
3524 }
3525
3526 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3527 {
3528         u32 clearval = 0;
3529
3530         switch (hdev->reset_type) {
3531         case HNAE3_IMP_RESET:
3532                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3533                 break;
3534         case HNAE3_GLOBAL_RESET:
3535                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3536                 break;
3537         default:
3538                 break;
3539         }
3540
3541         if (!clearval)
3542                 return;
3543
3544         /* For revision 0x20, the reset interrupt source
3545          * can only be cleared after hardware reset done
3546          */
3547         if (hdev->pdev->revision == 0x20)
3548                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3549                                 clearval);
3550
3551         hclge_enable_vector(&hdev->misc_vector, true);
3552 }
3553
3554 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3555 {
3556         int ret = 0;
3557
3558         switch (hdev->reset_type) {
3559         case HNAE3_FUNC_RESET:
3560                 /* fall through */
3561         case HNAE3_FLR_RESET:
3562                 ret = hclge_set_all_vf_rst(hdev, true);
3563                 break;
3564         default:
3565                 break;
3566         }
3567
3568         return ret;
3569 }
3570
3571 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3572 {
3573         u32 reg_val;
3574
3575         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3576         if (enable)
3577                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3578         else
3579                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3580
3581         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3582 }
3583
3584 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3585 {
3586         u32 reg_val;
3587         int ret = 0;
3588
3589         switch (hdev->reset_type) {
3590         case HNAE3_FUNC_RESET:
3591                 /* to confirm whether all running VF is ready
3592                  * before request PF reset
3593                  */
3594                 ret = hclge_func_reset_sync_vf(hdev);
3595                 if (ret)
3596                         return ret;
3597
3598                 ret = hclge_func_reset_cmd(hdev, 0);
3599                 if (ret) {
3600                         dev_err(&hdev->pdev->dev,
3601                                 "asserting function reset fail %d!\n", ret);
3602                         return ret;
3603                 }
3604
3605                 /* After performaning pf reset, it is not necessary to do the
3606                  * mailbox handling or send any command to firmware, because
3607                  * any mailbox handling or command to firmware is only valid
3608                  * after hclge_cmd_init is called.
3609                  */
3610                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3611                 hdev->rst_stats.pf_rst_cnt++;
3612                 break;
3613         case HNAE3_FLR_RESET:
3614                 /* to confirm whether all running VF is ready
3615                  * before request PF reset
3616                  */
3617                 ret = hclge_func_reset_sync_vf(hdev);
3618                 if (ret)
3619                         return ret;
3620
3621                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3622                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3623                 hdev->rst_stats.flr_rst_cnt++;
3624                 break;
3625         case HNAE3_IMP_RESET:
3626                 hclge_handle_imp_error(hdev);
3627                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3628                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3629                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3630                 break;
3631         default:
3632                 break;
3633         }
3634
3635         /* inform hardware that preparatory work is done */
3636         msleep(HCLGE_RESET_SYNC_TIME);
3637         hclge_reset_handshake(hdev, true);
3638         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3639
3640         return ret;
3641 }
3642
3643 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3644 {
3645 #define MAX_RESET_FAIL_CNT 5
3646
3647         if (hdev->reset_pending) {
3648                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3649                          hdev->reset_pending);
3650                 return true;
3651         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3652                    HCLGE_RESET_INT_M) {
3653                 dev_info(&hdev->pdev->dev,
3654                          "reset failed because new reset interrupt\n");
3655                 hclge_clear_reset_cause(hdev);
3656                 return false;
3657         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3658                 hdev->rst_stats.reset_fail_cnt++;
3659                 set_bit(hdev->reset_type, &hdev->reset_pending);
3660                 dev_info(&hdev->pdev->dev,
3661                          "re-schedule reset task(%u)\n",
3662                          hdev->rst_stats.reset_fail_cnt);
3663                 return true;
3664         }
3665
3666         hclge_clear_reset_cause(hdev);
3667
3668         /* recover the handshake status when reset fail */
3669         hclge_reset_handshake(hdev, true);
3670
3671         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3672
3673         hclge_dbg_dump_rst_info(hdev);
3674
3675         return false;
3676 }
3677
3678 static int hclge_set_rst_done(struct hclge_dev *hdev)
3679 {
3680         struct hclge_pf_rst_done_cmd *req;
3681         struct hclge_desc desc;
3682
3683         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3684         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3685         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3686
3687         return hclge_cmd_send(&hdev->hw, &desc, 1);
3688 }
3689
3690 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3691 {
3692         int ret = 0;
3693
3694         switch (hdev->reset_type) {
3695         case HNAE3_FUNC_RESET:
3696                 /* fall through */
3697         case HNAE3_FLR_RESET:
3698                 ret = hclge_set_all_vf_rst(hdev, false);
3699                 break;
3700         case HNAE3_GLOBAL_RESET:
3701                 /* fall through */
3702         case HNAE3_IMP_RESET:
3703                 ret = hclge_set_rst_done(hdev);
3704                 break;
3705         default:
3706                 break;
3707         }
3708
3709         /* clear up the handshake status after re-initialize done */
3710         hclge_reset_handshake(hdev, false);
3711
3712         return ret;
3713 }
3714
3715 static int hclge_reset_stack(struct hclge_dev *hdev)
3716 {
3717         int ret;
3718
3719         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3720         if (ret)
3721                 return ret;
3722
3723         ret = hclge_reset_ae_dev(hdev->ae_dev);
3724         if (ret)
3725                 return ret;
3726
3727         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3728         if (ret)
3729                 return ret;
3730
3731         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3732 }
3733
3734 static void hclge_reset(struct hclge_dev *hdev)
3735 {
3736         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3737         enum hnae3_reset_type reset_level;
3738         int ret;
3739
3740         /* Initialize ae_dev reset status as well, in case enet layer wants to
3741          * know if device is undergoing reset
3742          */
3743         ae_dev->reset_type = hdev->reset_type;
3744         hdev->rst_stats.reset_cnt++;
3745         /* perform reset of the stack & ae device for a client */
3746         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3747         if (ret)
3748                 goto err_reset;
3749
3750         ret = hclge_reset_prepare_down(hdev);
3751         if (ret)
3752                 goto err_reset;
3753
3754         rtnl_lock();
3755         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3756         if (ret)
3757                 goto err_reset_lock;
3758
3759         rtnl_unlock();
3760
3761         ret = hclge_reset_prepare_wait(hdev);
3762         if (ret)
3763                 goto err_reset;
3764
3765         if (hclge_reset_wait(hdev))
3766                 goto err_reset;
3767
3768         hdev->rst_stats.hw_reset_done_cnt++;
3769
3770         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3771         if (ret)
3772                 goto err_reset;
3773
3774         rtnl_lock();
3775
3776         ret = hclge_reset_stack(hdev);
3777         if (ret)
3778                 goto err_reset_lock;
3779
3780         hclge_clear_reset_cause(hdev);
3781
3782         ret = hclge_reset_prepare_up(hdev);
3783         if (ret)
3784                 goto err_reset_lock;
3785
3786         rtnl_unlock();
3787
3788         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3789         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3790          * times
3791          */
3792         if (ret &&
3793             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3794                 goto err_reset;
3795
3796         rtnl_lock();
3797
3798         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3799         if (ret)
3800                 goto err_reset_lock;
3801
3802         rtnl_unlock();
3803
3804         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3805         if (ret)
3806                 goto err_reset;
3807
3808         hdev->last_reset_time = jiffies;
3809         hdev->rst_stats.reset_fail_cnt = 0;
3810         hdev->rst_stats.reset_done_cnt++;
3811         ae_dev->reset_type = HNAE3_NONE_RESET;
3812
3813         /* if default_reset_request has a higher level reset request,
3814          * it should be handled as soon as possible. since some errors
3815          * need this kind of reset to fix.
3816          */
3817         reset_level = hclge_get_reset_level(ae_dev,
3818                                             &hdev->default_reset_request);
3819         if (reset_level != HNAE3_NONE_RESET)
3820                 set_bit(reset_level, &hdev->reset_request);
3821
3822         return;
3823
3824 err_reset_lock:
3825         rtnl_unlock();
3826 err_reset:
3827         if (hclge_reset_err_handle(hdev))
3828                 hclge_reset_task_schedule(hdev);
3829 }
3830
3831 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3832 {
3833         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3834         struct hclge_dev *hdev = ae_dev->priv;
3835
3836         /* We might end up getting called broadly because of 2 below cases:
3837          * 1. Recoverable error was conveyed through APEI and only way to bring
3838          *    normalcy is to reset.
3839          * 2. A new reset request from the stack due to timeout
3840          *
3841          * For the first case,error event might not have ae handle available.
3842          * check if this is a new reset request and we are not here just because
3843          * last reset attempt did not succeed and watchdog hit us again. We will
3844          * know this if last reset request did not occur very recently (watchdog
3845          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3846          * In case of new request we reset the "reset level" to PF reset.
3847          * And if it is a repeat reset request of the most recent one then we
3848          * want to make sure we throttle the reset request. Therefore, we will
3849          * not allow it again before 3*HZ times.
3850          */
3851         if (!handle)
3852                 handle = &hdev->vport[0].nic;
3853
3854         if (time_before(jiffies, (hdev->last_reset_time +
3855                                   HCLGE_RESET_INTERVAL))) {
3856                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3857                 return;
3858         } else if (hdev->default_reset_request) {
3859                 hdev->reset_level =
3860                         hclge_get_reset_level(ae_dev,
3861                                               &hdev->default_reset_request);
3862         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3863                 hdev->reset_level = HNAE3_FUNC_RESET;
3864         }
3865
3866         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3867                  hdev->reset_level);
3868
3869         /* request reset & schedule reset task */
3870         set_bit(hdev->reset_level, &hdev->reset_request);
3871         hclge_reset_task_schedule(hdev);
3872
3873         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3874                 hdev->reset_level++;
3875 }
3876
3877 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3878                                         enum hnae3_reset_type rst_type)
3879 {
3880         struct hclge_dev *hdev = ae_dev->priv;
3881
3882         set_bit(rst_type, &hdev->default_reset_request);
3883 }
3884
3885 static void hclge_reset_timer(struct timer_list *t)
3886 {
3887         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3888
3889         /* if default_reset_request has no value, it means that this reset
3890          * request has already be handled, so just return here
3891          */
3892         if (!hdev->default_reset_request)
3893                 return;
3894
3895         dev_info(&hdev->pdev->dev,
3896                  "triggering reset in reset timer\n");
3897         hclge_reset_event(hdev->pdev, NULL);
3898 }
3899
3900 static void hclge_reset_subtask(struct hclge_dev *hdev)
3901 {
3902         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3903
3904         /* check if there is any ongoing reset in the hardware. This status can
3905          * be checked from reset_pending. If there is then, we need to wait for
3906          * hardware to complete reset.
3907          *    a. If we are able to figure out in reasonable time that hardware
3908          *       has fully resetted then, we can proceed with driver, client
3909          *       reset.
3910          *    b. else, we can come back later to check this status so re-sched
3911          *       now.
3912          */
3913         hdev->last_reset_time = jiffies;
3914         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3915         if (hdev->reset_type != HNAE3_NONE_RESET)
3916                 hclge_reset(hdev);
3917
3918         /* check if we got any *new* reset requests to be honored */
3919         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3920         if (hdev->reset_type != HNAE3_NONE_RESET)
3921                 hclge_do_reset(hdev);
3922
3923         hdev->reset_type = HNAE3_NONE_RESET;
3924 }
3925
3926 static void hclge_reset_service_task(struct work_struct *work)
3927 {
3928         struct hclge_dev *hdev =
3929                 container_of(work, struct hclge_dev, rst_service_task);
3930
3931         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3932                 return;
3933
3934         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3935
3936         hclge_reset_subtask(hdev);
3937
3938         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3939 }
3940
3941 static void hclge_mailbox_service_task(struct work_struct *work)
3942 {
3943         struct hclge_dev *hdev =
3944                 container_of(work, struct hclge_dev, mbx_service_task);
3945
3946         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3947                 return;
3948
3949         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3950
3951         hclge_mbx_handler(hdev);
3952
3953         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3954 }
3955
3956 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3957 {
3958         int i;
3959
3960         /* start from vport 1 for PF is always alive */
3961         for (i = 1; i < hdev->num_alloc_vport; i++) {
3962                 struct hclge_vport *vport = &hdev->vport[i];
3963
3964                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3965                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3966
3967                 /* If vf is not alive, set to default value */
3968                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3969                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3970         }
3971 }
3972
3973 static void hclge_service_task(struct work_struct *work)
3974 {
3975         struct hclge_dev *hdev =
3976                 container_of(work, struct hclge_dev, service_task.work);
3977
3978         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3979
3980         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3981                 hclge_update_stats_for_all(hdev);
3982                 hdev->hw_stats.stats_timer = 0;
3983         }
3984
3985         hclge_update_port_info(hdev);
3986         hclge_update_link_status(hdev);
3987         hclge_update_vport_alive(hdev);
3988         hclge_sync_vlan_filter(hdev);
3989
3990         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3991                 hclge_rfs_filter_expire(hdev);
3992                 hdev->fd_arfs_expire_timer = 0;
3993         }
3994
3995         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3996 }
3997
3998 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3999 {
4000         /* VF handle has no client */
4001         if (!handle->client)
4002                 return container_of(handle, struct hclge_vport, nic);
4003         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4004                 return container_of(handle, struct hclge_vport, roce);
4005         else
4006                 return container_of(handle, struct hclge_vport, nic);
4007 }
4008
4009 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4010                             struct hnae3_vector_info *vector_info)
4011 {
4012         struct hclge_vport *vport = hclge_get_vport(handle);
4013         struct hnae3_vector_info *vector = vector_info;
4014         struct hclge_dev *hdev = vport->back;
4015         int alloc = 0;
4016         int i, j;
4017
4018         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4019         vector_num = min(hdev->num_msi_left, vector_num);
4020
4021         for (j = 0; j < vector_num; j++) {
4022                 for (i = 1; i < hdev->num_msi; i++) {
4023                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4024                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4025                                 vector->io_addr = hdev->hw.io_base +
4026                                         HCLGE_VECTOR_REG_BASE +
4027                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4028                                         vport->vport_id *
4029                                         HCLGE_VECTOR_VF_OFFSET;
4030                                 hdev->vector_status[i] = vport->vport_id;
4031                                 hdev->vector_irq[i] = vector->vector;
4032
4033                                 vector++;
4034                                 alloc++;
4035
4036                                 break;
4037                         }
4038                 }
4039         }
4040         hdev->num_msi_left -= alloc;
4041         hdev->num_msi_used += alloc;
4042
4043         return alloc;
4044 }
4045
4046 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4047 {
4048         int i;
4049
4050         for (i = 0; i < hdev->num_msi; i++)
4051                 if (vector == hdev->vector_irq[i])
4052                         return i;
4053
4054         return -EINVAL;
4055 }
4056
4057 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4058 {
4059         struct hclge_vport *vport = hclge_get_vport(handle);
4060         struct hclge_dev *hdev = vport->back;
4061         int vector_id;
4062
4063         vector_id = hclge_get_vector_index(hdev, vector);
4064         if (vector_id < 0) {
4065                 dev_err(&hdev->pdev->dev,
4066                         "Get vector index fail. vector_id =%d\n", vector_id);
4067                 return vector_id;
4068         }
4069
4070         hclge_free_vector(hdev, vector_id);
4071
4072         return 0;
4073 }
4074
4075 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4076 {
4077         return HCLGE_RSS_KEY_SIZE;
4078 }
4079
4080 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4081 {
4082         return HCLGE_RSS_IND_TBL_SIZE;
4083 }
4084
4085 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4086                                   const u8 hfunc, const u8 *key)
4087 {
4088         struct hclge_rss_config_cmd *req;
4089         unsigned int key_offset = 0;
4090         struct hclge_desc desc;
4091         int key_counts;
4092         int key_size;
4093         int ret;
4094
4095         key_counts = HCLGE_RSS_KEY_SIZE;
4096         req = (struct hclge_rss_config_cmd *)desc.data;
4097
4098         while (key_counts) {
4099                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4100                                            false);
4101
4102                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4103                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4104
4105                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4106                 memcpy(req->hash_key,
4107                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4108
4109                 key_counts -= key_size;
4110                 key_offset++;
4111                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4112                 if (ret) {
4113                         dev_err(&hdev->pdev->dev,
4114                                 "Configure RSS config fail, status = %d\n",
4115                                 ret);
4116                         return ret;
4117                 }
4118         }
4119         return 0;
4120 }
4121
4122 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4123 {
4124         struct hclge_rss_indirection_table_cmd *req;
4125         struct hclge_desc desc;
4126         int i, j;
4127         int ret;
4128
4129         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4130
4131         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4132                 hclge_cmd_setup_basic_desc
4133                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4134
4135                 req->start_table_index =
4136                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4137                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4138
4139                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4140                         req->rss_result[j] =
4141                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4142
4143                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4144                 if (ret) {
4145                         dev_err(&hdev->pdev->dev,
4146                                 "Configure rss indir table fail,status = %d\n",
4147                                 ret);
4148                         return ret;
4149                 }
4150         }
4151         return 0;
4152 }
4153
4154 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4155                                  u16 *tc_size, u16 *tc_offset)
4156 {
4157         struct hclge_rss_tc_mode_cmd *req;
4158         struct hclge_desc desc;
4159         int ret;
4160         int i;
4161
4162         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4163         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4164
4165         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4166                 u16 mode = 0;
4167
4168                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4169                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4170                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4171                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4172                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4173
4174                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4175         }
4176
4177         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4178         if (ret)
4179                 dev_err(&hdev->pdev->dev,
4180                         "Configure rss tc mode fail, status = %d\n", ret);
4181
4182         return ret;
4183 }
4184
4185 static void hclge_get_rss_type(struct hclge_vport *vport)
4186 {
4187         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4188             vport->rss_tuple_sets.ipv4_udp_en ||
4189             vport->rss_tuple_sets.ipv4_sctp_en ||
4190             vport->rss_tuple_sets.ipv6_tcp_en ||
4191             vport->rss_tuple_sets.ipv6_udp_en ||
4192             vport->rss_tuple_sets.ipv6_sctp_en)
4193                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4194         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4195                  vport->rss_tuple_sets.ipv6_fragment_en)
4196                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4197         else
4198                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4199 }
4200
4201 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4202 {
4203         struct hclge_rss_input_tuple_cmd *req;
4204         struct hclge_desc desc;
4205         int ret;
4206
4207         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4208
4209         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4210
4211         /* Get the tuple cfg from pf */
4212         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4213         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4214         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4215         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4216         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4217         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4218         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4219         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4220         hclge_get_rss_type(&hdev->vport[0]);
4221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4222         if (ret)
4223                 dev_err(&hdev->pdev->dev,
4224                         "Configure rss input fail, status = %d\n", ret);
4225         return ret;
4226 }
4227
4228 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4229                          u8 *key, u8 *hfunc)
4230 {
4231         struct hclge_vport *vport = hclge_get_vport(handle);
4232         int i;
4233
4234         /* Get hash algorithm */
4235         if (hfunc) {
4236                 switch (vport->rss_algo) {
4237                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4238                         *hfunc = ETH_RSS_HASH_TOP;
4239                         break;
4240                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4241                         *hfunc = ETH_RSS_HASH_XOR;
4242                         break;
4243                 default:
4244                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4245                         break;
4246                 }
4247         }
4248
4249         /* Get the RSS Key required by the user */
4250         if (key)
4251                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4252
4253         /* Get indirect table */
4254         if (indir)
4255                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4256                         indir[i] =  vport->rss_indirection_tbl[i];
4257
4258         return 0;
4259 }
4260
4261 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4262                          const  u8 *key, const  u8 hfunc)
4263 {
4264         struct hclge_vport *vport = hclge_get_vport(handle);
4265         struct hclge_dev *hdev = vport->back;
4266         u8 hash_algo;
4267         int ret, i;
4268
4269         /* Set the RSS Hash Key if specififed by the user */
4270         if (key) {
4271                 switch (hfunc) {
4272                 case ETH_RSS_HASH_TOP:
4273                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4274                         break;
4275                 case ETH_RSS_HASH_XOR:
4276                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4277                         break;
4278                 case ETH_RSS_HASH_NO_CHANGE:
4279                         hash_algo = vport->rss_algo;
4280                         break;
4281                 default:
4282                         return -EINVAL;
4283                 }
4284
4285                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4286                 if (ret)
4287                         return ret;
4288
4289                 /* Update the shadow RSS key with user specified qids */
4290                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4291                 vport->rss_algo = hash_algo;
4292         }
4293
4294         /* Update the shadow RSS table with user specified qids */
4295         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4296                 vport->rss_indirection_tbl[i] = indir[i];
4297
4298         /* Update the hardware */
4299         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4300 }
4301
4302 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4303 {
4304         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4305
4306         if (nfc->data & RXH_L4_B_2_3)
4307                 hash_sets |= HCLGE_D_PORT_BIT;
4308         else
4309                 hash_sets &= ~HCLGE_D_PORT_BIT;
4310
4311         if (nfc->data & RXH_IP_SRC)
4312                 hash_sets |= HCLGE_S_IP_BIT;
4313         else
4314                 hash_sets &= ~HCLGE_S_IP_BIT;
4315
4316         if (nfc->data & RXH_IP_DST)
4317                 hash_sets |= HCLGE_D_IP_BIT;
4318         else
4319                 hash_sets &= ~HCLGE_D_IP_BIT;
4320
4321         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4322                 hash_sets |= HCLGE_V_TAG_BIT;
4323
4324         return hash_sets;
4325 }
4326
4327 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4328                                struct ethtool_rxnfc *nfc)
4329 {
4330         struct hclge_vport *vport = hclge_get_vport(handle);
4331         struct hclge_dev *hdev = vport->back;
4332         struct hclge_rss_input_tuple_cmd *req;
4333         struct hclge_desc desc;
4334         u8 tuple_sets;
4335         int ret;
4336
4337         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4338                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4339                 return -EINVAL;
4340
4341         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4342         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4343
4344         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4345         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4346         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4347         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4348         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4349         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4350         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4351         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4352
4353         tuple_sets = hclge_get_rss_hash_bits(nfc);
4354         switch (nfc->flow_type) {
4355         case TCP_V4_FLOW:
4356                 req->ipv4_tcp_en = tuple_sets;
4357                 break;
4358         case TCP_V6_FLOW:
4359                 req->ipv6_tcp_en = tuple_sets;
4360                 break;
4361         case UDP_V4_FLOW:
4362                 req->ipv4_udp_en = tuple_sets;
4363                 break;
4364         case UDP_V6_FLOW:
4365                 req->ipv6_udp_en = tuple_sets;
4366                 break;
4367         case SCTP_V4_FLOW:
4368                 req->ipv4_sctp_en = tuple_sets;
4369                 break;
4370         case SCTP_V6_FLOW:
4371                 if ((nfc->data & RXH_L4_B_0_1) ||
4372                     (nfc->data & RXH_L4_B_2_3))
4373                         return -EINVAL;
4374
4375                 req->ipv6_sctp_en = tuple_sets;
4376                 break;
4377         case IPV4_FLOW:
4378                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4379                 break;
4380         case IPV6_FLOW:
4381                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4382                 break;
4383         default:
4384                 return -EINVAL;
4385         }
4386
4387         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4388         if (ret) {
4389                 dev_err(&hdev->pdev->dev,
4390                         "Set rss tuple fail, status = %d\n", ret);
4391                 return ret;
4392         }
4393
4394         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4395         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4396         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4397         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4398         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4399         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4400         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4401         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4402         hclge_get_rss_type(vport);
4403         return 0;
4404 }
4405
4406 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4407                                struct ethtool_rxnfc *nfc)
4408 {
4409         struct hclge_vport *vport = hclge_get_vport(handle);
4410         u8 tuple_sets;
4411
4412         nfc->data = 0;
4413
4414         switch (nfc->flow_type) {
4415         case TCP_V4_FLOW:
4416                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4417                 break;
4418         case UDP_V4_FLOW:
4419                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4420                 break;
4421         case TCP_V6_FLOW:
4422                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4423                 break;
4424         case UDP_V6_FLOW:
4425                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4426                 break;
4427         case SCTP_V4_FLOW:
4428                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4429                 break;
4430         case SCTP_V6_FLOW:
4431                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4432                 break;
4433         case IPV4_FLOW:
4434         case IPV6_FLOW:
4435                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4436                 break;
4437         default:
4438                 return -EINVAL;
4439         }
4440
4441         if (!tuple_sets)
4442                 return 0;
4443
4444         if (tuple_sets & HCLGE_D_PORT_BIT)
4445                 nfc->data |= RXH_L4_B_2_3;
4446         if (tuple_sets & HCLGE_S_PORT_BIT)
4447                 nfc->data |= RXH_L4_B_0_1;
4448         if (tuple_sets & HCLGE_D_IP_BIT)
4449                 nfc->data |= RXH_IP_DST;
4450         if (tuple_sets & HCLGE_S_IP_BIT)
4451                 nfc->data |= RXH_IP_SRC;
4452
4453         return 0;
4454 }
4455
4456 static int hclge_get_tc_size(struct hnae3_handle *handle)
4457 {
4458         struct hclge_vport *vport = hclge_get_vport(handle);
4459         struct hclge_dev *hdev = vport->back;
4460
4461         return hdev->rss_size_max;
4462 }
4463
4464 int hclge_rss_init_hw(struct hclge_dev *hdev)
4465 {
4466         struct hclge_vport *vport = hdev->vport;
4467         u8 *rss_indir = vport[0].rss_indirection_tbl;
4468         u16 rss_size = vport[0].alloc_rss_size;
4469         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4470         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4471         u8 *key = vport[0].rss_hash_key;
4472         u8 hfunc = vport[0].rss_algo;
4473         u16 tc_valid[HCLGE_MAX_TC_NUM];
4474         u16 roundup_size;
4475         unsigned int i;
4476         int ret;
4477
4478         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4479         if (ret)
4480                 return ret;
4481
4482         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4483         if (ret)
4484                 return ret;
4485
4486         ret = hclge_set_rss_input_tuple(hdev);
4487         if (ret)
4488                 return ret;
4489
4490         /* Each TC have the same queue size, and tc_size set to hardware is
4491          * the log2 of roundup power of two of rss_size, the acutal queue
4492          * size is limited by indirection table.
4493          */
4494         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4495                 dev_err(&hdev->pdev->dev,
4496                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4497                         rss_size);
4498                 return -EINVAL;
4499         }
4500
4501         roundup_size = roundup_pow_of_two(rss_size);
4502         roundup_size = ilog2(roundup_size);
4503
4504         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4505                 tc_valid[i] = 0;
4506
4507                 if (!(hdev->hw_tc_map & BIT(i)))
4508                         continue;
4509
4510                 tc_valid[i] = 1;
4511                 tc_size[i] = roundup_size;
4512                 tc_offset[i] = rss_size * i;
4513         }
4514
4515         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4516 }
4517
4518 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4519 {
4520         struct hclge_vport *vport = hdev->vport;
4521         int i, j;
4522
4523         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4524                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4525                         vport[j].rss_indirection_tbl[i] =
4526                                 i % vport[j].alloc_rss_size;
4527         }
4528 }
4529
4530 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4531 {
4532         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4533         struct hclge_vport *vport = hdev->vport;
4534
4535         if (hdev->pdev->revision >= 0x21)
4536                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4537
4538         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4539                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4540                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4541                 vport[i].rss_tuple_sets.ipv4_udp_en =
4542                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4543                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4544                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4545                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4546                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4547                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4548                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4549                 vport[i].rss_tuple_sets.ipv6_udp_en =
4550                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4551                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4552                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4553                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4554                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4555
4556                 vport[i].rss_algo = rss_algo;
4557
4558                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4559                        HCLGE_RSS_KEY_SIZE);
4560         }
4561
4562         hclge_rss_indir_init_cfg(hdev);
4563 }
4564
4565 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4566                                 int vector_id, bool en,
4567                                 struct hnae3_ring_chain_node *ring_chain)
4568 {
4569         struct hclge_dev *hdev = vport->back;
4570         struct hnae3_ring_chain_node *node;
4571         struct hclge_desc desc;
4572         struct hclge_ctrl_vector_chain_cmd *req =
4573                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4574         enum hclge_cmd_status status;
4575         enum hclge_opcode_type op;
4576         u16 tqp_type_and_id;
4577         int i;
4578
4579         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4580         hclge_cmd_setup_basic_desc(&desc, op, false);
4581         req->int_vector_id = vector_id;
4582
4583         i = 0;
4584         for (node = ring_chain; node; node = node->next) {
4585                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4586                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4587                                 HCLGE_INT_TYPE_S,
4588                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4589                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4590                                 HCLGE_TQP_ID_S, node->tqp_index);
4591                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4592                                 HCLGE_INT_GL_IDX_S,
4593                                 hnae3_get_field(node->int_gl_idx,
4594                                                 HNAE3_RING_GL_IDX_M,
4595                                                 HNAE3_RING_GL_IDX_S));
4596                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4597                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4598                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4599                         req->vfid = vport->vport_id;
4600
4601                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4602                         if (status) {
4603                                 dev_err(&hdev->pdev->dev,
4604                                         "Map TQP fail, status is %d.\n",
4605                                         status);
4606                                 return -EIO;
4607                         }
4608                         i = 0;
4609
4610                         hclge_cmd_setup_basic_desc(&desc,
4611                                                    op,
4612                                                    false);
4613                         req->int_vector_id = vector_id;
4614                 }
4615         }
4616
4617         if (i > 0) {
4618                 req->int_cause_num = i;
4619                 req->vfid = vport->vport_id;
4620                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4621                 if (status) {
4622                         dev_err(&hdev->pdev->dev,
4623                                 "Map TQP fail, status is %d.\n", status);
4624                         return -EIO;
4625                 }
4626         }
4627
4628         return 0;
4629 }
4630
4631 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4632                                     struct hnae3_ring_chain_node *ring_chain)
4633 {
4634         struct hclge_vport *vport = hclge_get_vport(handle);
4635         struct hclge_dev *hdev = vport->back;
4636         int vector_id;
4637
4638         vector_id = hclge_get_vector_index(hdev, vector);
4639         if (vector_id < 0) {
4640                 dev_err(&hdev->pdev->dev,
4641                         "Get vector index fail. vector_id =%d\n", vector_id);
4642                 return vector_id;
4643         }
4644
4645         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4646 }
4647
4648 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4649                                        struct hnae3_ring_chain_node *ring_chain)
4650 {
4651         struct hclge_vport *vport = hclge_get_vport(handle);
4652         struct hclge_dev *hdev = vport->back;
4653         int vector_id, ret;
4654
4655         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4656                 return 0;
4657
4658         vector_id = hclge_get_vector_index(hdev, vector);
4659         if (vector_id < 0) {
4660                 dev_err(&handle->pdev->dev,
4661                         "Get vector index fail. ret =%d\n", vector_id);
4662                 return vector_id;
4663         }
4664
4665         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4666         if (ret)
4667                 dev_err(&handle->pdev->dev,
4668                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4669                         vector_id, ret);
4670
4671         return ret;
4672 }
4673
4674 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4675                                       struct hclge_promisc_param *param)
4676 {
4677         struct hclge_promisc_cfg_cmd *req;
4678         struct hclge_desc desc;
4679         int ret;
4680
4681         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4682
4683         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4684         req->vf_id = param->vf_id;
4685
4686         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4687          * pdev revision(0x20), new revision support them. The
4688          * value of this two fields will not return error when driver
4689          * send command to fireware in revision(0x20).
4690          */
4691         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4692                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4693
4694         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4695         if (ret)
4696                 dev_err(&hdev->pdev->dev,
4697                         "Set promisc mode fail, status is %d.\n", ret);
4698
4699         return ret;
4700 }
4701
4702 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4703                                      bool en_uc, bool en_mc, bool en_bc,
4704                                      int vport_id)
4705 {
4706         if (!param)
4707                 return;
4708
4709         memset(param, 0, sizeof(struct hclge_promisc_param));
4710         if (en_uc)
4711                 param->enable = HCLGE_PROMISC_EN_UC;
4712         if (en_mc)
4713                 param->enable |= HCLGE_PROMISC_EN_MC;
4714         if (en_bc)
4715                 param->enable |= HCLGE_PROMISC_EN_BC;
4716         param->vf_id = vport_id;
4717 }
4718
4719 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4720                                  bool en_mc_pmc, bool en_bc_pmc)
4721 {
4722         struct hclge_dev *hdev = vport->back;
4723         struct hclge_promisc_param param;
4724
4725         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4726                                  vport->vport_id);
4727         return hclge_cmd_set_promisc_mode(hdev, &param);
4728 }
4729
4730 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4731                                   bool en_mc_pmc)
4732 {
4733         struct hclge_vport *vport = hclge_get_vport(handle);
4734         bool en_bc_pmc = true;
4735
4736         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4737          * always bypassed. So broadcast promisc should be disabled until
4738          * user enable promisc mode
4739          */
4740         if (handle->pdev->revision == 0x20)
4741                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4742
4743         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4744                                             en_bc_pmc);
4745 }
4746
4747 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4748 {
4749         struct hclge_get_fd_mode_cmd *req;
4750         struct hclge_desc desc;
4751         int ret;
4752
4753         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4754
4755         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4756
4757         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4758         if (ret) {
4759                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4760                 return ret;
4761         }
4762
4763         *fd_mode = req->mode;
4764
4765         return ret;
4766 }
4767
4768 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4769                                    u32 *stage1_entry_num,
4770                                    u32 *stage2_entry_num,
4771                                    u16 *stage1_counter_num,
4772                                    u16 *stage2_counter_num)
4773 {
4774         struct hclge_get_fd_allocation_cmd *req;
4775         struct hclge_desc desc;
4776         int ret;
4777
4778         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4779
4780         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4781
4782         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4783         if (ret) {
4784                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4785                         ret);
4786                 return ret;
4787         }
4788
4789         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4790         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4791         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4792         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4793
4794         return ret;
4795 }
4796
4797 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4798 {
4799         struct hclge_set_fd_key_config_cmd *req;
4800         struct hclge_fd_key_cfg *stage;
4801         struct hclge_desc desc;
4802         int ret;
4803
4804         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4805
4806         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4807         stage = &hdev->fd_cfg.key_cfg[stage_num];
4808         req->stage = stage_num;
4809         req->key_select = stage->key_sel;
4810         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4811         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4812         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4813         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4814         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4815         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4816
4817         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4818         if (ret)
4819                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4820
4821         return ret;
4822 }
4823
4824 static int hclge_init_fd_config(struct hclge_dev *hdev)
4825 {
4826 #define LOW_2_WORDS             0x03
4827         struct hclge_fd_key_cfg *key_cfg;
4828         int ret;
4829
4830         if (!hnae3_dev_fd_supported(hdev))
4831                 return 0;
4832
4833         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4834         if (ret)
4835                 return ret;
4836
4837         switch (hdev->fd_cfg.fd_mode) {
4838         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4839                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4840                 break;
4841         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4842                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4843                 break;
4844         default:
4845                 dev_err(&hdev->pdev->dev,
4846                         "Unsupported flow director mode %u\n",
4847                         hdev->fd_cfg.fd_mode);
4848                 return -EOPNOTSUPP;
4849         }
4850
4851         hdev->fd_cfg.proto_support =
4852                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4853                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4854         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4855         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4856         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4857         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4858         key_cfg->outer_sipv6_word_en = 0;
4859         key_cfg->outer_dipv6_word_en = 0;
4860
4861         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4862                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4863                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4864                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4865
4866         /* If use max 400bit key, we can support tuples for ether type */
4867         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4868                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4869                 key_cfg->tuple_active |=
4870                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4871         }
4872
4873         /* roce_type is used to filter roce frames
4874          * dst_vport is used to specify the rule
4875          */
4876         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4877
4878         ret = hclge_get_fd_allocation(hdev,
4879                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4880                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4881                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4882                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4883         if (ret)
4884                 return ret;
4885
4886         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4887 }
4888
4889 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4890                                 int loc, u8 *key, bool is_add)
4891 {
4892         struct hclge_fd_tcam_config_1_cmd *req1;
4893         struct hclge_fd_tcam_config_2_cmd *req2;
4894         struct hclge_fd_tcam_config_3_cmd *req3;
4895         struct hclge_desc desc[3];
4896         int ret;
4897
4898         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4899         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4900         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4901         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4902         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4903
4904         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4905         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4906         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4907
4908         req1->stage = stage;
4909         req1->xy_sel = sel_x ? 1 : 0;
4910         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4911         req1->index = cpu_to_le32(loc);
4912         req1->entry_vld = sel_x ? is_add : 0;
4913
4914         if (key) {
4915                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4916                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4917                        sizeof(req2->tcam_data));
4918                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4919                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4920         }
4921
4922         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4923         if (ret)
4924                 dev_err(&hdev->pdev->dev,
4925                         "config tcam key fail, ret=%d\n",
4926                         ret);
4927
4928         return ret;
4929 }
4930
4931 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4932                               struct hclge_fd_ad_data *action)
4933 {
4934         struct hclge_fd_ad_config_cmd *req;
4935         struct hclge_desc desc;
4936         u64 ad_data = 0;
4937         int ret;
4938
4939         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4940
4941         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4942         req->index = cpu_to_le32(loc);
4943         req->stage = stage;
4944
4945         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4946                       action->write_rule_id_to_bd);
4947         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4948                         action->rule_id);
4949         ad_data <<= 32;
4950         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4951         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4952                       action->forward_to_direct_queue);
4953         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4954                         action->queue_id);
4955         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4956         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4957                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4958         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4959         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4960                         action->counter_id);
4961
4962         req->ad_data = cpu_to_le64(ad_data);
4963         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4964         if (ret)
4965                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4966
4967         return ret;
4968 }
4969
4970 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4971                                    struct hclge_fd_rule *rule)
4972 {
4973         u16 tmp_x_s, tmp_y_s;
4974         u32 tmp_x_l, tmp_y_l;
4975         int i;
4976
4977         if (rule->unused_tuple & tuple_bit)
4978                 return true;
4979
4980         switch (tuple_bit) {
4981         case 0:
4982                 return false;
4983         case BIT(INNER_DST_MAC):
4984                 for (i = 0; i < ETH_ALEN; i++) {
4985                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4986                                rule->tuples_mask.dst_mac[i]);
4987                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4988                                rule->tuples_mask.dst_mac[i]);
4989                 }
4990
4991                 return true;
4992         case BIT(INNER_SRC_MAC):
4993                 for (i = 0; i < ETH_ALEN; i++) {
4994                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4995                                rule->tuples.src_mac[i]);
4996                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4997                                rule->tuples.src_mac[i]);
4998                 }
4999
5000                 return true;
5001         case BIT(INNER_VLAN_TAG_FST):
5002                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5003                        rule->tuples_mask.vlan_tag1);
5004                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5005                        rule->tuples_mask.vlan_tag1);
5006                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5007                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5008
5009                 return true;
5010         case BIT(INNER_ETH_TYPE):
5011                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5012                        rule->tuples_mask.ether_proto);
5013                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5014                        rule->tuples_mask.ether_proto);
5015                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5016                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5017
5018                 return true;
5019         case BIT(INNER_IP_TOS):
5020                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5021                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5022
5023                 return true;
5024         case BIT(INNER_IP_PROTO):
5025                 calc_x(*key_x, rule->tuples.ip_proto,
5026                        rule->tuples_mask.ip_proto);
5027                 calc_y(*key_y, rule->tuples.ip_proto,
5028                        rule->tuples_mask.ip_proto);
5029
5030                 return true;
5031         case BIT(INNER_SRC_IP):
5032                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5033                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5034                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5035                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5036                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5037                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5038
5039                 return true;
5040         case BIT(INNER_DST_IP):
5041                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5042                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5043                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5044                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5045                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5046                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5047
5048                 return true;
5049         case BIT(INNER_SRC_PORT):
5050                 calc_x(tmp_x_s, rule->tuples.src_port,
5051                        rule->tuples_mask.src_port);
5052                 calc_y(tmp_y_s, rule->tuples.src_port,
5053                        rule->tuples_mask.src_port);
5054                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5055                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5056
5057                 return true;
5058         case BIT(INNER_DST_PORT):
5059                 calc_x(tmp_x_s, rule->tuples.dst_port,
5060                        rule->tuples_mask.dst_port);
5061                 calc_y(tmp_y_s, rule->tuples.dst_port,
5062                        rule->tuples_mask.dst_port);
5063                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5064                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5065
5066                 return true;
5067         default:
5068                 return false;
5069         }
5070 }
5071
5072 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5073                                  u8 vf_id, u8 network_port_id)
5074 {
5075         u32 port_number = 0;
5076
5077         if (port_type == HOST_PORT) {
5078                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5079                                 pf_id);
5080                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5081                                 vf_id);
5082                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5083         } else {
5084                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5085                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5086                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5087         }
5088
5089         return port_number;
5090 }
5091
5092 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5093                                        __le32 *key_x, __le32 *key_y,
5094                                        struct hclge_fd_rule *rule)
5095 {
5096         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5097         u8 cur_pos = 0, tuple_size, shift_bits;
5098         unsigned int i;
5099
5100         for (i = 0; i < MAX_META_DATA; i++) {
5101                 tuple_size = meta_data_key_info[i].key_length;
5102                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5103
5104                 switch (tuple_bit) {
5105                 case BIT(ROCE_TYPE):
5106                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5107                         cur_pos += tuple_size;
5108                         break;
5109                 case BIT(DST_VPORT):
5110                         port_number = hclge_get_port_number(HOST_PORT, 0,
5111                                                             rule->vf_id, 0);
5112                         hnae3_set_field(meta_data,
5113                                         GENMASK(cur_pos + tuple_size, cur_pos),
5114                                         cur_pos, port_number);
5115                         cur_pos += tuple_size;
5116                         break;
5117                 default:
5118                         break;
5119                 }
5120         }
5121
5122         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5123         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5124         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5125
5126         *key_x = cpu_to_le32(tmp_x << shift_bits);
5127         *key_y = cpu_to_le32(tmp_y << shift_bits);
5128 }
5129
5130 /* A complete key is combined with meta data key and tuple key.
5131  * Meta data key is stored at the MSB region, and tuple key is stored at
5132  * the LSB region, unused bits will be filled 0.
5133  */
5134 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5135                             struct hclge_fd_rule *rule)
5136 {
5137         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5138         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5139         u8 *cur_key_x, *cur_key_y;
5140         unsigned int i;
5141         int ret, tuple_size;
5142         u8 meta_data_region;
5143
5144         memset(key_x, 0, sizeof(key_x));
5145         memset(key_y, 0, sizeof(key_y));
5146         cur_key_x = key_x;
5147         cur_key_y = key_y;
5148
5149         for (i = 0 ; i < MAX_TUPLE; i++) {
5150                 bool tuple_valid;
5151                 u32 check_tuple;
5152
5153                 tuple_size = tuple_key_info[i].key_length / 8;
5154                 check_tuple = key_cfg->tuple_active & BIT(i);
5155
5156                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5157                                                      cur_key_y, rule);
5158                 if (tuple_valid) {
5159                         cur_key_x += tuple_size;
5160                         cur_key_y += tuple_size;
5161                 }
5162         }
5163
5164         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5165                         MAX_META_DATA_LENGTH / 8;
5166
5167         hclge_fd_convert_meta_data(key_cfg,
5168                                    (__le32 *)(key_x + meta_data_region),
5169                                    (__le32 *)(key_y + meta_data_region),
5170                                    rule);
5171
5172         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5173                                    true);
5174         if (ret) {
5175                 dev_err(&hdev->pdev->dev,
5176                         "fd key_y config fail, loc=%u, ret=%d\n",
5177                         rule->queue_id, ret);
5178                 return ret;
5179         }
5180
5181         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5182                                    true);
5183         if (ret)
5184                 dev_err(&hdev->pdev->dev,
5185                         "fd key_x config fail, loc=%u, ret=%d\n",
5186                         rule->queue_id, ret);
5187         return ret;
5188 }
5189
5190 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5191                                struct hclge_fd_rule *rule)
5192 {
5193         struct hclge_fd_ad_data ad_data;
5194
5195         ad_data.ad_id = rule->location;
5196
5197         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5198                 ad_data.drop_packet = true;
5199                 ad_data.forward_to_direct_queue = false;
5200                 ad_data.queue_id = 0;
5201         } else {
5202                 ad_data.drop_packet = false;
5203                 ad_data.forward_to_direct_queue = true;
5204                 ad_data.queue_id = rule->queue_id;
5205         }
5206
5207         ad_data.use_counter = false;
5208         ad_data.counter_id = 0;
5209
5210         ad_data.use_next_stage = false;
5211         ad_data.next_input_key = 0;
5212
5213         ad_data.write_rule_id_to_bd = true;
5214         ad_data.rule_id = rule->location;
5215
5216         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5217 }
5218
5219 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5220                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5221 {
5222         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5223         struct ethtool_usrip4_spec *usr_ip4_spec;
5224         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5225         struct ethtool_usrip6_spec *usr_ip6_spec;
5226         struct ethhdr *ether_spec;
5227
5228         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5229                 return -EINVAL;
5230
5231         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5232                 return -EOPNOTSUPP;
5233
5234         if ((fs->flow_type & FLOW_EXT) &&
5235             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5236                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5237                 return -EOPNOTSUPP;
5238         }
5239
5240         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5241         case SCTP_V4_FLOW:
5242         case TCP_V4_FLOW:
5243         case UDP_V4_FLOW:
5244                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5245                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5246
5247                 if (!tcp_ip4_spec->ip4src)
5248                         *unused |= BIT(INNER_SRC_IP);
5249
5250                 if (!tcp_ip4_spec->ip4dst)
5251                         *unused |= BIT(INNER_DST_IP);
5252
5253                 if (!tcp_ip4_spec->psrc)
5254                         *unused |= BIT(INNER_SRC_PORT);
5255
5256                 if (!tcp_ip4_spec->pdst)
5257                         *unused |= BIT(INNER_DST_PORT);
5258
5259                 if (!tcp_ip4_spec->tos)
5260                         *unused |= BIT(INNER_IP_TOS);
5261
5262                 break;
5263         case IP_USER_FLOW:
5264                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5265                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5266                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5267
5268                 if (!usr_ip4_spec->ip4src)
5269                         *unused |= BIT(INNER_SRC_IP);
5270
5271                 if (!usr_ip4_spec->ip4dst)
5272                         *unused |= BIT(INNER_DST_IP);
5273
5274                 if (!usr_ip4_spec->tos)
5275                         *unused |= BIT(INNER_IP_TOS);
5276
5277                 if (!usr_ip4_spec->proto)
5278                         *unused |= BIT(INNER_IP_PROTO);
5279
5280                 if (usr_ip4_spec->l4_4_bytes)
5281                         return -EOPNOTSUPP;
5282
5283                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5284                         return -EOPNOTSUPP;
5285
5286                 break;
5287         case SCTP_V6_FLOW:
5288         case TCP_V6_FLOW:
5289         case UDP_V6_FLOW:
5290                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5291                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5292                         BIT(INNER_IP_TOS);
5293
5294                 /* check whether src/dst ip address used */
5295                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5296                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5297                         *unused |= BIT(INNER_SRC_IP);
5298
5299                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5300                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5301                         *unused |= BIT(INNER_DST_IP);
5302
5303                 if (!tcp_ip6_spec->psrc)
5304                         *unused |= BIT(INNER_SRC_PORT);
5305
5306                 if (!tcp_ip6_spec->pdst)
5307                         *unused |= BIT(INNER_DST_PORT);
5308
5309                 if (tcp_ip6_spec->tclass)
5310                         return -EOPNOTSUPP;
5311
5312                 break;
5313         case IPV6_USER_FLOW:
5314                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5315                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5316                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5317                         BIT(INNER_DST_PORT);
5318
5319                 /* check whether src/dst ip address used */
5320                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5321                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5322                         *unused |= BIT(INNER_SRC_IP);
5323
5324                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5325                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5326                         *unused |= BIT(INNER_DST_IP);
5327
5328                 if (!usr_ip6_spec->l4_proto)
5329                         *unused |= BIT(INNER_IP_PROTO);
5330
5331                 if (usr_ip6_spec->tclass)
5332                         return -EOPNOTSUPP;
5333
5334                 if (usr_ip6_spec->l4_4_bytes)
5335                         return -EOPNOTSUPP;
5336
5337                 break;
5338         case ETHER_FLOW:
5339                 ether_spec = &fs->h_u.ether_spec;
5340                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5341                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5342                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5343
5344                 if (is_zero_ether_addr(ether_spec->h_source))
5345                         *unused |= BIT(INNER_SRC_MAC);
5346
5347                 if (is_zero_ether_addr(ether_spec->h_dest))
5348                         *unused |= BIT(INNER_DST_MAC);
5349
5350                 if (!ether_spec->h_proto)
5351                         *unused |= BIT(INNER_ETH_TYPE);
5352
5353                 break;
5354         default:
5355                 return -EOPNOTSUPP;
5356         }
5357
5358         if ((fs->flow_type & FLOW_EXT)) {
5359                 if (fs->h_ext.vlan_etype)
5360                         return -EOPNOTSUPP;
5361                 if (!fs->h_ext.vlan_tci)
5362                         *unused |= BIT(INNER_VLAN_TAG_FST);
5363
5364                 if (fs->m_ext.vlan_tci) {
5365                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5366                                 return -EINVAL;
5367                 }
5368         } else {
5369                 *unused |= BIT(INNER_VLAN_TAG_FST);
5370         }
5371
5372         if (fs->flow_type & FLOW_MAC_EXT) {
5373                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5374                         return -EOPNOTSUPP;
5375
5376                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5377                         *unused |= BIT(INNER_DST_MAC);
5378                 else
5379                         *unused &= ~(BIT(INNER_DST_MAC));
5380         }
5381
5382         return 0;
5383 }
5384
5385 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5386 {
5387         struct hclge_fd_rule *rule = NULL;
5388         struct hlist_node *node2;
5389
5390         spin_lock_bh(&hdev->fd_rule_lock);
5391         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5392                 if (rule->location >= location)
5393                         break;
5394         }
5395
5396         spin_unlock_bh(&hdev->fd_rule_lock);
5397
5398         return  rule && rule->location == location;
5399 }
5400
5401 /* make sure being called after lock up with fd_rule_lock */
5402 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5403                                      struct hclge_fd_rule *new_rule,
5404                                      u16 location,
5405                                      bool is_add)
5406 {
5407         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5408         struct hlist_node *node2;
5409
5410         if (is_add && !new_rule)
5411                 return -EINVAL;
5412
5413         hlist_for_each_entry_safe(rule, node2,
5414                                   &hdev->fd_rule_list, rule_node) {
5415                 if (rule->location >= location)
5416                         break;
5417                 parent = rule;
5418         }
5419
5420         if (rule && rule->location == location) {
5421                 hlist_del(&rule->rule_node);
5422                 kfree(rule);
5423                 hdev->hclge_fd_rule_num--;
5424
5425                 if (!is_add) {
5426                         if (!hdev->hclge_fd_rule_num)
5427                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5428                         clear_bit(location, hdev->fd_bmap);
5429
5430                         return 0;
5431                 }
5432         } else if (!is_add) {
5433                 dev_err(&hdev->pdev->dev,
5434                         "delete fail, rule %u is inexistent\n",
5435                         location);
5436                 return -EINVAL;
5437         }
5438
5439         INIT_HLIST_NODE(&new_rule->rule_node);
5440
5441         if (parent)
5442                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5443         else
5444                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5445
5446         set_bit(location, hdev->fd_bmap);
5447         hdev->hclge_fd_rule_num++;
5448         hdev->fd_active_type = new_rule->rule_type;
5449
5450         return 0;
5451 }
5452
5453 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5454                               struct ethtool_rx_flow_spec *fs,
5455                               struct hclge_fd_rule *rule)
5456 {
5457         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5458
5459         switch (flow_type) {
5460         case SCTP_V4_FLOW:
5461         case TCP_V4_FLOW:
5462         case UDP_V4_FLOW:
5463                 rule->tuples.src_ip[IPV4_INDEX] =
5464                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5465                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5466                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5467
5468                 rule->tuples.dst_ip[IPV4_INDEX] =
5469                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5470                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5471                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5472
5473                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5474                 rule->tuples_mask.src_port =
5475                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5476
5477                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5478                 rule->tuples_mask.dst_port =
5479                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5480
5481                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5482                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5483
5484                 rule->tuples.ether_proto = ETH_P_IP;
5485                 rule->tuples_mask.ether_proto = 0xFFFF;
5486
5487                 break;
5488         case IP_USER_FLOW:
5489                 rule->tuples.src_ip[IPV4_INDEX] =
5490                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5491                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5492                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5493
5494                 rule->tuples.dst_ip[IPV4_INDEX] =
5495                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5496                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5497                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5498
5499                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5500                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5501
5502                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5503                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5504
5505                 rule->tuples.ether_proto = ETH_P_IP;
5506                 rule->tuples_mask.ether_proto = 0xFFFF;
5507
5508                 break;
5509         case SCTP_V6_FLOW:
5510         case TCP_V6_FLOW:
5511         case UDP_V6_FLOW:
5512                 be32_to_cpu_array(rule->tuples.src_ip,
5513                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5514                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5515                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5516
5517                 be32_to_cpu_array(rule->tuples.dst_ip,
5518                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5519                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5520                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5521
5522                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5523                 rule->tuples_mask.src_port =
5524                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5525
5526                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5527                 rule->tuples_mask.dst_port =
5528                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5529
5530                 rule->tuples.ether_proto = ETH_P_IPV6;
5531                 rule->tuples_mask.ether_proto = 0xFFFF;
5532
5533                 break;
5534         case IPV6_USER_FLOW:
5535                 be32_to_cpu_array(rule->tuples.src_ip,
5536                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5537                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5538                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5539
5540                 be32_to_cpu_array(rule->tuples.dst_ip,
5541                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5542                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5543                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5544
5545                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5546                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5547
5548                 rule->tuples.ether_proto = ETH_P_IPV6;
5549                 rule->tuples_mask.ether_proto = 0xFFFF;
5550
5551                 break;
5552         case ETHER_FLOW:
5553                 ether_addr_copy(rule->tuples.src_mac,
5554                                 fs->h_u.ether_spec.h_source);
5555                 ether_addr_copy(rule->tuples_mask.src_mac,
5556                                 fs->m_u.ether_spec.h_source);
5557
5558                 ether_addr_copy(rule->tuples.dst_mac,
5559                                 fs->h_u.ether_spec.h_dest);
5560                 ether_addr_copy(rule->tuples_mask.dst_mac,
5561                                 fs->m_u.ether_spec.h_dest);
5562
5563                 rule->tuples.ether_proto =
5564                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5565                 rule->tuples_mask.ether_proto =
5566                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5567
5568                 break;
5569         default:
5570                 return -EOPNOTSUPP;
5571         }
5572
5573         switch (flow_type) {
5574         case SCTP_V4_FLOW:
5575         case SCTP_V6_FLOW:
5576                 rule->tuples.ip_proto = IPPROTO_SCTP;
5577                 rule->tuples_mask.ip_proto = 0xFF;
5578                 break;
5579         case TCP_V4_FLOW:
5580         case TCP_V6_FLOW:
5581                 rule->tuples.ip_proto = IPPROTO_TCP;
5582                 rule->tuples_mask.ip_proto = 0xFF;
5583                 break;
5584         case UDP_V4_FLOW:
5585         case UDP_V6_FLOW:
5586                 rule->tuples.ip_proto = IPPROTO_UDP;
5587                 rule->tuples_mask.ip_proto = 0xFF;
5588                 break;
5589         default:
5590                 break;
5591         }
5592
5593         if ((fs->flow_type & FLOW_EXT)) {
5594                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5595                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5596         }
5597
5598         if (fs->flow_type & FLOW_MAC_EXT) {
5599                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5600                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5601         }
5602
5603         return 0;
5604 }
5605
5606 /* make sure being called after lock up with fd_rule_lock */
5607 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5608                                 struct hclge_fd_rule *rule)
5609 {
5610         int ret;
5611
5612         if (!rule) {
5613                 dev_err(&hdev->pdev->dev,
5614                         "The flow director rule is NULL\n");
5615                 return -EINVAL;
5616         }
5617
5618         /* it will never fail here, so needn't to check return value */
5619         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5620
5621         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5622         if (ret)
5623                 goto clear_rule;
5624
5625         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5626         if (ret)
5627                 goto clear_rule;
5628
5629         return 0;
5630
5631 clear_rule:
5632         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5633         return ret;
5634 }
5635
5636 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5637                               struct ethtool_rxnfc *cmd)
5638 {
5639         struct hclge_vport *vport = hclge_get_vport(handle);
5640         struct hclge_dev *hdev = vport->back;
5641         u16 dst_vport_id = 0, q_index = 0;
5642         struct ethtool_rx_flow_spec *fs;
5643         struct hclge_fd_rule *rule;
5644         u32 unused = 0;
5645         u8 action;
5646         int ret;
5647
5648         if (!hnae3_dev_fd_supported(hdev))
5649                 return -EOPNOTSUPP;
5650
5651         if (!hdev->fd_en) {
5652                 dev_warn(&hdev->pdev->dev,
5653                          "Please enable flow director first\n");
5654                 return -EOPNOTSUPP;
5655         }
5656
5657         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5658
5659         ret = hclge_fd_check_spec(hdev, fs, &unused);
5660         if (ret) {
5661                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5662                 return ret;
5663         }
5664
5665         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5666                 action = HCLGE_FD_ACTION_DROP_PACKET;
5667         } else {
5668                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5669                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5670                 u16 tqps;
5671
5672                 if (vf > hdev->num_req_vfs) {
5673                         dev_err(&hdev->pdev->dev,
5674                                 "Error: vf id (%u) > max vf num (%u)\n",
5675                                 vf, hdev->num_req_vfs);
5676                         return -EINVAL;
5677                 }
5678
5679                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5680                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5681
5682                 if (ring >= tqps) {
5683                         dev_err(&hdev->pdev->dev,
5684                                 "Error: queue id (%u) > max tqp num (%u)\n",
5685                                 ring, tqps - 1);
5686                         return -EINVAL;
5687                 }
5688
5689                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5690                 q_index = ring;
5691         }
5692
5693         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5694         if (!rule)
5695                 return -ENOMEM;
5696
5697         ret = hclge_fd_get_tuple(hdev, fs, rule);
5698         if (ret) {
5699                 kfree(rule);
5700                 return ret;
5701         }
5702
5703         rule->flow_type = fs->flow_type;
5704
5705         rule->location = fs->location;
5706         rule->unused_tuple = unused;
5707         rule->vf_id = dst_vport_id;
5708         rule->queue_id = q_index;
5709         rule->action = action;
5710         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5711
5712         /* to avoid rule conflict, when user configure rule by ethtool,
5713          * we need to clear all arfs rules
5714          */
5715         hclge_clear_arfs_rules(handle);
5716
5717         spin_lock_bh(&hdev->fd_rule_lock);
5718         ret = hclge_fd_config_rule(hdev, rule);
5719
5720         spin_unlock_bh(&hdev->fd_rule_lock);
5721
5722         return ret;
5723 }
5724
5725 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5726                               struct ethtool_rxnfc *cmd)
5727 {
5728         struct hclge_vport *vport = hclge_get_vport(handle);
5729         struct hclge_dev *hdev = vport->back;
5730         struct ethtool_rx_flow_spec *fs;
5731         int ret;
5732
5733         if (!hnae3_dev_fd_supported(hdev))
5734                 return -EOPNOTSUPP;
5735
5736         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5737
5738         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5739                 return -EINVAL;
5740
5741         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5742                 dev_err(&hdev->pdev->dev,
5743                         "Delete fail, rule %d is inexistent\n", fs->location);
5744                 return -ENOENT;
5745         }
5746
5747         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5748                                    NULL, false);
5749         if (ret)
5750                 return ret;
5751
5752         spin_lock_bh(&hdev->fd_rule_lock);
5753         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5754
5755         spin_unlock_bh(&hdev->fd_rule_lock);
5756
5757         return ret;
5758 }
5759
5760 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5761                                      bool clear_list)
5762 {
5763         struct hclge_vport *vport = hclge_get_vport(handle);
5764         struct hclge_dev *hdev = vport->back;
5765         struct hclge_fd_rule *rule;
5766         struct hlist_node *node;
5767         u16 location;
5768
5769         if (!hnae3_dev_fd_supported(hdev))
5770                 return;
5771
5772         spin_lock_bh(&hdev->fd_rule_lock);
5773         for_each_set_bit(location, hdev->fd_bmap,
5774                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5775                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5776                                      NULL, false);
5777
5778         if (clear_list) {
5779                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5780                                           rule_node) {
5781                         hlist_del(&rule->rule_node);
5782                         kfree(rule);
5783                 }
5784                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5785                 hdev->hclge_fd_rule_num = 0;
5786                 bitmap_zero(hdev->fd_bmap,
5787                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5788         }
5789
5790         spin_unlock_bh(&hdev->fd_rule_lock);
5791 }
5792
5793 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5794 {
5795         struct hclge_vport *vport = hclge_get_vport(handle);
5796         struct hclge_dev *hdev = vport->back;
5797         struct hclge_fd_rule *rule;
5798         struct hlist_node *node;
5799         int ret;
5800
5801         /* Return ok here, because reset error handling will check this
5802          * return value. If error is returned here, the reset process will
5803          * fail.
5804          */
5805         if (!hnae3_dev_fd_supported(hdev))
5806                 return 0;
5807
5808         /* if fd is disabled, should not restore it when reset */
5809         if (!hdev->fd_en)
5810                 return 0;
5811
5812         spin_lock_bh(&hdev->fd_rule_lock);
5813         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5814                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5815                 if (!ret)
5816                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5817
5818                 if (ret) {
5819                         dev_warn(&hdev->pdev->dev,
5820                                  "Restore rule %u failed, remove it\n",
5821                                  rule->location);
5822                         clear_bit(rule->location, hdev->fd_bmap);
5823                         hlist_del(&rule->rule_node);
5824                         kfree(rule);
5825                         hdev->hclge_fd_rule_num--;
5826                 }
5827         }
5828
5829         if (hdev->hclge_fd_rule_num)
5830                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5831
5832         spin_unlock_bh(&hdev->fd_rule_lock);
5833
5834         return 0;
5835 }
5836
5837 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5838                                  struct ethtool_rxnfc *cmd)
5839 {
5840         struct hclge_vport *vport = hclge_get_vport(handle);
5841         struct hclge_dev *hdev = vport->back;
5842
5843         if (!hnae3_dev_fd_supported(hdev))
5844                 return -EOPNOTSUPP;
5845
5846         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5847         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5848
5849         return 0;
5850 }
5851
5852 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5853                                   struct ethtool_rxnfc *cmd)
5854 {
5855         struct hclge_vport *vport = hclge_get_vport(handle);
5856         struct hclge_fd_rule *rule = NULL;
5857         struct hclge_dev *hdev = vport->back;
5858         struct ethtool_rx_flow_spec *fs;
5859         struct hlist_node *node2;
5860
5861         if (!hnae3_dev_fd_supported(hdev))
5862                 return -EOPNOTSUPP;
5863
5864         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5865
5866         spin_lock_bh(&hdev->fd_rule_lock);
5867
5868         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5869                 if (rule->location >= fs->location)
5870                         break;
5871         }
5872
5873         if (!rule || fs->location != rule->location) {
5874                 spin_unlock_bh(&hdev->fd_rule_lock);
5875
5876                 return -ENOENT;
5877         }
5878
5879         fs->flow_type = rule->flow_type;
5880         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5881         case SCTP_V4_FLOW:
5882         case TCP_V4_FLOW:
5883         case UDP_V4_FLOW:
5884                 fs->h_u.tcp_ip4_spec.ip4src =
5885                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5886                 fs->m_u.tcp_ip4_spec.ip4src =
5887                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5888                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5889
5890                 fs->h_u.tcp_ip4_spec.ip4dst =
5891                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5892                 fs->m_u.tcp_ip4_spec.ip4dst =
5893                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5894                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5895
5896                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5897                 fs->m_u.tcp_ip4_spec.psrc =
5898                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5899                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5900
5901                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5902                 fs->m_u.tcp_ip4_spec.pdst =
5903                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5904                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5905
5906                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5907                 fs->m_u.tcp_ip4_spec.tos =
5908                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5909                                 0 : rule->tuples_mask.ip_tos;
5910
5911                 break;
5912         case IP_USER_FLOW:
5913                 fs->h_u.usr_ip4_spec.ip4src =
5914                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5915                 fs->m_u.tcp_ip4_spec.ip4src =
5916                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5917                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5918
5919                 fs->h_u.usr_ip4_spec.ip4dst =
5920                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5921                 fs->m_u.usr_ip4_spec.ip4dst =
5922                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5923                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5924
5925                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5926                 fs->m_u.usr_ip4_spec.tos =
5927                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5928                                 0 : rule->tuples_mask.ip_tos;
5929
5930                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5931                 fs->m_u.usr_ip4_spec.proto =
5932                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5933                                 0 : rule->tuples_mask.ip_proto;
5934
5935                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5936
5937                 break;
5938         case SCTP_V6_FLOW:
5939         case TCP_V6_FLOW:
5940         case UDP_V6_FLOW:
5941                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5942                                   rule->tuples.src_ip, IPV6_SIZE);
5943                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5944                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5945                                sizeof(int) * IPV6_SIZE);
5946                 else
5947                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5948                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5949
5950                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5951                                   rule->tuples.dst_ip, IPV6_SIZE);
5952                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5953                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5954                                sizeof(int) * IPV6_SIZE);
5955                 else
5956                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5957                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5958
5959                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5960                 fs->m_u.tcp_ip6_spec.psrc =
5961                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5962                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5963
5964                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5965                 fs->m_u.tcp_ip6_spec.pdst =
5966                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5967                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5968
5969                 break;
5970         case IPV6_USER_FLOW:
5971                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5972                                   rule->tuples.src_ip, IPV6_SIZE);
5973                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5974                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5975                                sizeof(int) * IPV6_SIZE);
5976                 else
5977                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5978                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5979
5980                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5981                                   rule->tuples.dst_ip, IPV6_SIZE);
5982                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5983                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5984                                sizeof(int) * IPV6_SIZE);
5985                 else
5986                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5987                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5988
5989                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5990                 fs->m_u.usr_ip6_spec.l4_proto =
5991                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5992                                 0 : rule->tuples_mask.ip_proto;
5993
5994                 break;
5995         case ETHER_FLOW:
5996                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5997                                 rule->tuples.src_mac);
5998                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5999                         eth_zero_addr(fs->m_u.ether_spec.h_source);
6000                 else
6001                         ether_addr_copy(fs->m_u.ether_spec.h_source,
6002                                         rule->tuples_mask.src_mac);
6003
6004                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6005                                 rule->tuples.dst_mac);
6006                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6007                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6008                 else
6009                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6010                                         rule->tuples_mask.dst_mac);
6011
6012                 fs->h_u.ether_spec.h_proto =
6013                                 cpu_to_be16(rule->tuples.ether_proto);
6014                 fs->m_u.ether_spec.h_proto =
6015                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6016                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6017
6018                 break;
6019         default:
6020                 spin_unlock_bh(&hdev->fd_rule_lock);
6021                 return -EOPNOTSUPP;
6022         }
6023
6024         if (fs->flow_type & FLOW_EXT) {
6025                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6026                 fs->m_ext.vlan_tci =
6027                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6028                                 cpu_to_be16(VLAN_VID_MASK) :
6029                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6030         }
6031
6032         if (fs->flow_type & FLOW_MAC_EXT) {
6033                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6034                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6035                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6036                 else
6037                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6038                                         rule->tuples_mask.dst_mac);
6039         }
6040
6041         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6042                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6043         } else {
6044                 u64 vf_id;
6045
6046                 fs->ring_cookie = rule->queue_id;
6047                 vf_id = rule->vf_id;
6048                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6049                 fs->ring_cookie |= vf_id;
6050         }
6051
6052         spin_unlock_bh(&hdev->fd_rule_lock);
6053
6054         return 0;
6055 }
6056
6057 static int hclge_get_all_rules(struct hnae3_handle *handle,
6058                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6059 {
6060         struct hclge_vport *vport = hclge_get_vport(handle);
6061         struct hclge_dev *hdev = vport->back;
6062         struct hclge_fd_rule *rule;
6063         struct hlist_node *node2;
6064         int cnt = 0;
6065
6066         if (!hnae3_dev_fd_supported(hdev))
6067                 return -EOPNOTSUPP;
6068
6069         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6070
6071         spin_lock_bh(&hdev->fd_rule_lock);
6072         hlist_for_each_entry_safe(rule, node2,
6073                                   &hdev->fd_rule_list, rule_node) {
6074                 if (cnt == cmd->rule_cnt) {
6075                         spin_unlock_bh(&hdev->fd_rule_lock);
6076                         return -EMSGSIZE;
6077                 }
6078
6079                 rule_locs[cnt] = rule->location;
6080                 cnt++;
6081         }
6082
6083         spin_unlock_bh(&hdev->fd_rule_lock);
6084
6085         cmd->rule_cnt = cnt;
6086
6087         return 0;
6088 }
6089
6090 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6091                                      struct hclge_fd_rule_tuples *tuples)
6092 {
6093         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6094         tuples->ip_proto = fkeys->basic.ip_proto;
6095         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6096
6097         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6098                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6099                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6100         } else {
6101                 memcpy(tuples->src_ip,
6102                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6103                        sizeof(tuples->src_ip));
6104                 memcpy(tuples->dst_ip,
6105                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6106                        sizeof(tuples->dst_ip));
6107         }
6108 }
6109
6110 /* traverse all rules, check whether an existed rule has the same tuples */
6111 static struct hclge_fd_rule *
6112 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6113                           const struct hclge_fd_rule_tuples *tuples)
6114 {
6115         struct hclge_fd_rule *rule = NULL;
6116         struct hlist_node *node;
6117
6118         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6119                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6120                         return rule;
6121         }
6122
6123         return NULL;
6124 }
6125
6126 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6127                                      struct hclge_fd_rule *rule)
6128 {
6129         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6130                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6131                              BIT(INNER_SRC_PORT);
6132         rule->action = 0;
6133         rule->vf_id = 0;
6134         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6135         if (tuples->ether_proto == ETH_P_IP) {
6136                 if (tuples->ip_proto == IPPROTO_TCP)
6137                         rule->flow_type = TCP_V4_FLOW;
6138                 else
6139                         rule->flow_type = UDP_V4_FLOW;
6140         } else {
6141                 if (tuples->ip_proto == IPPROTO_TCP)
6142                         rule->flow_type = TCP_V6_FLOW;
6143                 else
6144                         rule->flow_type = UDP_V6_FLOW;
6145         }
6146         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6147         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6148 }
6149
6150 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6151                                       u16 flow_id, struct flow_keys *fkeys)
6152 {
6153         struct hclge_vport *vport = hclge_get_vport(handle);
6154         struct hclge_fd_rule_tuples new_tuples;
6155         struct hclge_dev *hdev = vport->back;
6156         struct hclge_fd_rule *rule;
6157         u16 tmp_queue_id;
6158         u16 bit_id;
6159         int ret;
6160
6161         if (!hnae3_dev_fd_supported(hdev))
6162                 return -EOPNOTSUPP;
6163
6164         memset(&new_tuples, 0, sizeof(new_tuples));
6165         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6166
6167         spin_lock_bh(&hdev->fd_rule_lock);
6168
6169         /* when there is already fd rule existed add by user,
6170          * arfs should not work
6171          */
6172         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6173                 spin_unlock_bh(&hdev->fd_rule_lock);
6174
6175                 return -EOPNOTSUPP;
6176         }
6177
6178         /* check is there flow director filter existed for this flow,
6179          * if not, create a new filter for it;
6180          * if filter exist with different queue id, modify the filter;
6181          * if filter exist with same queue id, do nothing
6182          */
6183         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6184         if (!rule) {
6185                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6186                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6187                         spin_unlock_bh(&hdev->fd_rule_lock);
6188
6189                         return -ENOSPC;
6190                 }
6191
6192                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6193                 if (!rule) {
6194                         spin_unlock_bh(&hdev->fd_rule_lock);
6195
6196                         return -ENOMEM;
6197                 }
6198
6199                 set_bit(bit_id, hdev->fd_bmap);
6200                 rule->location = bit_id;
6201                 rule->flow_id = flow_id;
6202                 rule->queue_id = queue_id;
6203                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6204                 ret = hclge_fd_config_rule(hdev, rule);
6205
6206                 spin_unlock_bh(&hdev->fd_rule_lock);
6207
6208                 if (ret)
6209                         return ret;
6210
6211                 return rule->location;
6212         }
6213
6214         spin_unlock_bh(&hdev->fd_rule_lock);
6215
6216         if (rule->queue_id == queue_id)
6217                 return rule->location;
6218
6219         tmp_queue_id = rule->queue_id;
6220         rule->queue_id = queue_id;
6221         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6222         if (ret) {
6223                 rule->queue_id = tmp_queue_id;
6224                 return ret;
6225         }
6226
6227         return rule->location;
6228 }
6229
6230 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6231 {
6232 #ifdef CONFIG_RFS_ACCEL
6233         struct hnae3_handle *handle = &hdev->vport[0].nic;
6234         struct hclge_fd_rule *rule;
6235         struct hlist_node *node;
6236         HLIST_HEAD(del_list);
6237
6238         spin_lock_bh(&hdev->fd_rule_lock);
6239         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6240                 spin_unlock_bh(&hdev->fd_rule_lock);
6241                 return;
6242         }
6243         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6244                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6245                                         rule->flow_id, rule->location)) {
6246                         hlist_del_init(&rule->rule_node);
6247                         hlist_add_head(&rule->rule_node, &del_list);
6248                         hdev->hclge_fd_rule_num--;
6249                         clear_bit(rule->location, hdev->fd_bmap);
6250                 }
6251         }
6252         spin_unlock_bh(&hdev->fd_rule_lock);
6253
6254         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6255                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6256                                      rule->location, NULL, false);
6257                 kfree(rule);
6258         }
6259 #endif
6260 }
6261
6262 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6263 {
6264 #ifdef CONFIG_RFS_ACCEL
6265         struct hclge_vport *vport = hclge_get_vport(handle);
6266         struct hclge_dev *hdev = vport->back;
6267
6268         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6269                 hclge_del_all_fd_entries(handle, true);
6270 #endif
6271 }
6272
6273 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6274 {
6275         struct hclge_vport *vport = hclge_get_vport(handle);
6276         struct hclge_dev *hdev = vport->back;
6277
6278         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6279                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6280 }
6281
6282 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6283 {
6284         struct hclge_vport *vport = hclge_get_vport(handle);
6285         struct hclge_dev *hdev = vport->back;
6286
6287         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6288 }
6289
6290 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6291 {
6292         struct hclge_vport *vport = hclge_get_vport(handle);
6293         struct hclge_dev *hdev = vport->back;
6294
6295         return hdev->rst_stats.hw_reset_done_cnt;
6296 }
6297
6298 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6299 {
6300         struct hclge_vport *vport = hclge_get_vport(handle);
6301         struct hclge_dev *hdev = vport->back;
6302         bool clear;
6303
6304         hdev->fd_en = enable;
6305         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6306         if (!enable)
6307                 hclge_del_all_fd_entries(handle, clear);
6308         else
6309                 hclge_restore_fd_entries(handle);
6310 }
6311
6312 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6313 {
6314         struct hclge_desc desc;
6315         struct hclge_config_mac_mode_cmd *req =
6316                 (struct hclge_config_mac_mode_cmd *)desc.data;
6317         u32 loop_en = 0;
6318         int ret;
6319
6320         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6321
6322         if (enable) {
6323                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6324                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6325                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6326                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6327                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6328                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6329                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6330                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6331                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6332                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6333         }
6334
6335         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6336
6337         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6338         if (ret)
6339                 dev_err(&hdev->pdev->dev,
6340                         "mac enable fail, ret =%d.\n", ret);
6341 }
6342
6343 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6344                                      u8 switch_param, u8 param_mask)
6345 {
6346         struct hclge_mac_vlan_switch_cmd *req;
6347         struct hclge_desc desc;
6348         u32 func_id;
6349         int ret;
6350
6351         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6352         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6353         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6354                                    false);
6355         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6356         req->func_id = cpu_to_le32(func_id);
6357         req->switch_param = switch_param;
6358         req->param_mask = param_mask;
6359
6360         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6361         if (ret)
6362                 dev_err(&hdev->pdev->dev,
6363                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6364         return ret;
6365 }
6366
6367 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6368                                        int link_ret)
6369 {
6370 #define HCLGE_PHY_LINK_STATUS_NUM  200
6371
6372         struct phy_device *phydev = hdev->hw.mac.phydev;
6373         int i = 0;
6374         int ret;
6375
6376         do {
6377                 ret = phy_read_status(phydev);
6378                 if (ret) {
6379                         dev_err(&hdev->pdev->dev,
6380                                 "phy update link status fail, ret = %d\n", ret);
6381                         return;
6382                 }
6383
6384                 if (phydev->link == link_ret)
6385                         break;
6386
6387                 msleep(HCLGE_LINK_STATUS_MS);
6388         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6389 }
6390
6391 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6392 {
6393 #define HCLGE_MAC_LINK_STATUS_NUM  100
6394
6395         int i = 0;
6396         int ret;
6397
6398         do {
6399                 ret = hclge_get_mac_link_status(hdev);
6400                 if (ret < 0)
6401                         return ret;
6402                 else if (ret == link_ret)
6403                         return 0;
6404
6405                 msleep(HCLGE_LINK_STATUS_MS);
6406         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6407         return -EBUSY;
6408 }
6409
6410 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6411                                           bool is_phy)
6412 {
6413 #define HCLGE_LINK_STATUS_DOWN 0
6414 #define HCLGE_LINK_STATUS_UP   1
6415
6416         int link_ret;
6417
6418         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6419
6420         if (is_phy)
6421                 hclge_phy_link_status_wait(hdev, link_ret);
6422
6423         return hclge_mac_link_status_wait(hdev, link_ret);
6424 }
6425
6426 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6427 {
6428         struct hclge_config_mac_mode_cmd *req;
6429         struct hclge_desc desc;
6430         u32 loop_en;
6431         int ret;
6432
6433         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6434         /* 1 Read out the MAC mode config at first */
6435         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6436         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6437         if (ret) {
6438                 dev_err(&hdev->pdev->dev,
6439                         "mac loopback get fail, ret =%d.\n", ret);
6440                 return ret;
6441         }
6442
6443         /* 2 Then setup the loopback flag */
6444         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6445         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6446         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6447         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6448
6449         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6450
6451         /* 3 Config mac work mode with loopback flag
6452          * and its original configure parameters
6453          */
6454         hclge_cmd_reuse_desc(&desc, false);
6455         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6456         if (ret)
6457                 dev_err(&hdev->pdev->dev,
6458                         "mac loopback set fail, ret =%d.\n", ret);
6459         return ret;
6460 }
6461
6462 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6463                                      enum hnae3_loop loop_mode)
6464 {
6465 #define HCLGE_SERDES_RETRY_MS   10
6466 #define HCLGE_SERDES_RETRY_NUM  100
6467
6468         struct hclge_serdes_lb_cmd *req;
6469         struct hclge_desc desc;
6470         int ret, i = 0;
6471         u8 loop_mode_b;
6472
6473         req = (struct hclge_serdes_lb_cmd *)desc.data;
6474         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6475
6476         switch (loop_mode) {
6477         case HNAE3_LOOP_SERIAL_SERDES:
6478                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6479                 break;
6480         case HNAE3_LOOP_PARALLEL_SERDES:
6481                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6482                 break;
6483         default:
6484                 dev_err(&hdev->pdev->dev,
6485                         "unsupported serdes loopback mode %d\n", loop_mode);
6486                 return -ENOTSUPP;
6487         }
6488
6489         if (en) {
6490                 req->enable = loop_mode_b;
6491                 req->mask = loop_mode_b;
6492         } else {
6493                 req->mask = loop_mode_b;
6494         }
6495
6496         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6497         if (ret) {
6498                 dev_err(&hdev->pdev->dev,
6499                         "serdes loopback set fail, ret = %d\n", ret);
6500                 return ret;
6501         }
6502
6503         do {
6504                 msleep(HCLGE_SERDES_RETRY_MS);
6505                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6506                                            true);
6507                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6508                 if (ret) {
6509                         dev_err(&hdev->pdev->dev,
6510                                 "serdes loopback get, ret = %d\n", ret);
6511                         return ret;
6512                 }
6513         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6514                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6515
6516         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6517                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6518                 return -EBUSY;
6519         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6520                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6521                 return -EIO;
6522         }
6523         return ret;
6524 }
6525
6526 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6527                                      enum hnae3_loop loop_mode)
6528 {
6529         int ret;
6530
6531         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6532         if (ret)
6533                 return ret;
6534
6535         hclge_cfg_mac_mode(hdev, en);
6536
6537         ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6538         if (ret)
6539                 dev_err(&hdev->pdev->dev,
6540                         "serdes loopback config mac mode timeout\n");
6541
6542         return ret;
6543 }
6544
6545 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6546                                      struct phy_device *phydev)
6547 {
6548         int ret;
6549
6550         if (!phydev->suspended) {
6551                 ret = phy_suspend(phydev);
6552                 if (ret)
6553                         return ret;
6554         }
6555
6556         ret = phy_resume(phydev);
6557         if (ret)
6558                 return ret;
6559
6560         return phy_loopback(phydev, true);
6561 }
6562
6563 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6564                                       struct phy_device *phydev)
6565 {
6566         int ret;
6567
6568         ret = phy_loopback(phydev, false);
6569         if (ret)
6570                 return ret;
6571
6572         return phy_suspend(phydev);
6573 }
6574
6575 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6576 {
6577         struct phy_device *phydev = hdev->hw.mac.phydev;
6578         int ret;
6579
6580         if (!phydev)
6581                 return -ENOTSUPP;
6582
6583         if (en)
6584                 ret = hclge_enable_phy_loopback(hdev, phydev);
6585         else
6586                 ret = hclge_disable_phy_loopback(hdev, phydev);
6587         if (ret) {
6588                 dev_err(&hdev->pdev->dev,
6589                         "set phy loopback fail, ret = %d\n", ret);
6590                 return ret;
6591         }
6592
6593         hclge_cfg_mac_mode(hdev, en);
6594
6595         ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6596         if (ret)
6597                 dev_err(&hdev->pdev->dev,
6598                         "phy loopback config mac mode timeout\n");
6599
6600         return ret;
6601 }
6602
6603 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6604                             int stream_id, bool enable)
6605 {
6606         struct hclge_desc desc;
6607         struct hclge_cfg_com_tqp_queue_cmd *req =
6608                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6609         int ret;
6610
6611         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6612         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6613         req->stream_id = cpu_to_le16(stream_id);
6614         if (enable)
6615                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6616
6617         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6618         if (ret)
6619                 dev_err(&hdev->pdev->dev,
6620                         "Tqp enable fail, status =%d.\n", ret);
6621         return ret;
6622 }
6623
6624 static int hclge_set_loopback(struct hnae3_handle *handle,
6625                               enum hnae3_loop loop_mode, bool en)
6626 {
6627         struct hclge_vport *vport = hclge_get_vport(handle);
6628         struct hnae3_knic_private_info *kinfo;
6629         struct hclge_dev *hdev = vport->back;
6630         int i, ret;
6631
6632         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6633          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6634          * the same, the packets are looped back in the SSU. If SSU loopback
6635          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6636          */
6637         if (hdev->pdev->revision >= 0x21) {
6638                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6639
6640                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6641                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6642                 if (ret)
6643                         return ret;
6644         }
6645
6646         switch (loop_mode) {
6647         case HNAE3_LOOP_APP:
6648                 ret = hclge_set_app_loopback(hdev, en);
6649                 break;
6650         case HNAE3_LOOP_SERIAL_SERDES:
6651         case HNAE3_LOOP_PARALLEL_SERDES:
6652                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6653                 break;
6654         case HNAE3_LOOP_PHY:
6655                 ret = hclge_set_phy_loopback(hdev, en);
6656                 break;
6657         default:
6658                 ret = -ENOTSUPP;
6659                 dev_err(&hdev->pdev->dev,
6660                         "loop_mode %d is not supported\n", loop_mode);
6661                 break;
6662         }
6663
6664         if (ret)
6665                 return ret;
6666
6667         kinfo = &vport->nic.kinfo;
6668         for (i = 0; i < kinfo->num_tqps; i++) {
6669                 ret = hclge_tqp_enable(hdev, i, 0, en);
6670                 if (ret)
6671                         return ret;
6672         }
6673
6674         return 0;
6675 }
6676
6677 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6678 {
6679         int ret;
6680
6681         ret = hclge_set_app_loopback(hdev, false);
6682         if (ret)
6683                 return ret;
6684
6685         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6686         if (ret)
6687                 return ret;
6688
6689         return hclge_cfg_serdes_loopback(hdev, false,
6690                                          HNAE3_LOOP_PARALLEL_SERDES);
6691 }
6692
6693 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6694 {
6695         struct hclge_vport *vport = hclge_get_vport(handle);
6696         struct hnae3_knic_private_info *kinfo;
6697         struct hnae3_queue *queue;
6698         struct hclge_tqp *tqp;
6699         int i;
6700
6701         kinfo = &vport->nic.kinfo;
6702         for (i = 0; i < kinfo->num_tqps; i++) {
6703                 queue = handle->kinfo.tqp[i];
6704                 tqp = container_of(queue, struct hclge_tqp, q);
6705                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6706         }
6707 }
6708
6709 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6710 {
6711         struct hclge_vport *vport = hclge_get_vport(handle);
6712         struct hclge_dev *hdev = vport->back;
6713
6714         if (enable) {
6715                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6716         } else {
6717                 /* Set the DOWN flag here to disable the service to be
6718                  * scheduled again
6719                  */
6720                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6721                 cancel_delayed_work_sync(&hdev->service_task);
6722                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6723         }
6724 }
6725
6726 static int hclge_ae_start(struct hnae3_handle *handle)
6727 {
6728         struct hclge_vport *vport = hclge_get_vport(handle);
6729         struct hclge_dev *hdev = vport->back;
6730
6731         /* mac enable */
6732         hclge_cfg_mac_mode(hdev, true);
6733         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6734         hdev->hw.mac.link = 0;
6735
6736         /* reset tqp stats */
6737         hclge_reset_tqp_stats(handle);
6738
6739         hclge_mac_start_phy(hdev);
6740
6741         return 0;
6742 }
6743
6744 static void hclge_ae_stop(struct hnae3_handle *handle)
6745 {
6746         struct hclge_vport *vport = hclge_get_vport(handle);
6747         struct hclge_dev *hdev = vport->back;
6748         int i;
6749
6750         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6751
6752         hclge_clear_arfs_rules(handle);
6753
6754         /* If it is not PF reset, the firmware will disable the MAC,
6755          * so it only need to stop phy here.
6756          */
6757         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6758             hdev->reset_type != HNAE3_FUNC_RESET) {
6759                 hclge_mac_stop_phy(hdev);
6760                 hclge_update_link_status(hdev);
6761                 return;
6762         }
6763
6764         for (i = 0; i < handle->kinfo.num_tqps; i++)
6765                 hclge_reset_tqp(handle, i);
6766
6767         hclge_config_mac_tnl_int(hdev, false);
6768
6769         /* Mac disable */
6770         hclge_cfg_mac_mode(hdev, false);
6771
6772         hclge_mac_stop_phy(hdev);
6773
6774         /* reset tqp stats */
6775         hclge_reset_tqp_stats(handle);
6776         hclge_update_link_status(hdev);
6777 }
6778
6779 int hclge_vport_start(struct hclge_vport *vport)
6780 {
6781         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6782         vport->last_active_jiffies = jiffies;
6783         return 0;
6784 }
6785
6786 void hclge_vport_stop(struct hclge_vport *vport)
6787 {
6788         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6789 }
6790
6791 static int hclge_client_start(struct hnae3_handle *handle)
6792 {
6793         struct hclge_vport *vport = hclge_get_vport(handle);
6794
6795         return hclge_vport_start(vport);
6796 }
6797
6798 static void hclge_client_stop(struct hnae3_handle *handle)
6799 {
6800         struct hclge_vport *vport = hclge_get_vport(handle);
6801
6802         hclge_vport_stop(vport);
6803 }
6804
6805 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6806                                          u16 cmdq_resp, u8  resp_code,
6807                                          enum hclge_mac_vlan_tbl_opcode op)
6808 {
6809         struct hclge_dev *hdev = vport->back;
6810
6811         if (cmdq_resp) {
6812                 dev_err(&hdev->pdev->dev,
6813                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6814                         cmdq_resp);
6815                 return -EIO;
6816         }
6817
6818         if (op == HCLGE_MAC_VLAN_ADD) {
6819                 if ((!resp_code) || (resp_code == 1)) {
6820                         return 0;
6821                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6822                         dev_err(&hdev->pdev->dev,
6823                                 "add mac addr failed for uc_overflow.\n");
6824                         return -ENOSPC;
6825                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6826                         dev_err(&hdev->pdev->dev,
6827                                 "add mac addr failed for mc_overflow.\n");
6828                         return -ENOSPC;
6829                 }
6830
6831                 dev_err(&hdev->pdev->dev,
6832                         "add mac addr failed for undefined, code=%u.\n",
6833                         resp_code);
6834                 return -EIO;
6835         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6836                 if (!resp_code) {
6837                         return 0;
6838                 } else if (resp_code == 1) {
6839                         dev_dbg(&hdev->pdev->dev,
6840                                 "remove mac addr failed for miss.\n");
6841                         return -ENOENT;
6842                 }
6843
6844                 dev_err(&hdev->pdev->dev,
6845                         "remove mac addr failed for undefined, code=%u.\n",
6846                         resp_code);
6847                 return -EIO;
6848         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6849                 if (!resp_code) {
6850                         return 0;
6851                 } else if (resp_code == 1) {
6852                         dev_dbg(&hdev->pdev->dev,
6853                                 "lookup mac addr failed for miss.\n");
6854                         return -ENOENT;
6855                 }
6856
6857                 dev_err(&hdev->pdev->dev,
6858                         "lookup mac addr failed for undefined, code=%u.\n",
6859                         resp_code);
6860                 return -EIO;
6861         }
6862
6863         dev_err(&hdev->pdev->dev,
6864                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6865
6866         return -EINVAL;
6867 }
6868
6869 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6870 {
6871 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6872
6873         unsigned int word_num;
6874         unsigned int bit_num;
6875
6876         if (vfid > 255 || vfid < 0)
6877                 return -EIO;
6878
6879         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6880                 word_num = vfid / 32;
6881                 bit_num  = vfid % 32;
6882                 if (clr)
6883                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6884                 else
6885                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6886         } else {
6887                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6888                 bit_num  = vfid % 32;
6889                 if (clr)
6890                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6891                 else
6892                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6893         }
6894
6895         return 0;
6896 }
6897
6898 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6899 {
6900 #define HCLGE_DESC_NUMBER 3
6901 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6902         int i, j;
6903
6904         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6905                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6906                         if (desc[i].data[j])
6907                                 return false;
6908
6909         return true;
6910 }
6911
6912 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6913                                    const u8 *addr, bool is_mc)
6914 {
6915         const unsigned char *mac_addr = addr;
6916         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6917                        (mac_addr[0]) | (mac_addr[1] << 8);
6918         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6919
6920         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6921         if (is_mc) {
6922                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6923                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6924         }
6925
6926         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6927         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6928 }
6929
6930 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6931                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6932 {
6933         struct hclge_dev *hdev = vport->back;
6934         struct hclge_desc desc;
6935         u8 resp_code;
6936         u16 retval;
6937         int ret;
6938
6939         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6940
6941         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6942
6943         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6944         if (ret) {
6945                 dev_err(&hdev->pdev->dev,
6946                         "del mac addr failed for cmd_send, ret =%d.\n",
6947                         ret);
6948                 return ret;
6949         }
6950         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6951         retval = le16_to_cpu(desc.retval);
6952
6953         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6954                                              HCLGE_MAC_VLAN_REMOVE);
6955 }
6956
6957 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6958                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6959                                      struct hclge_desc *desc,
6960                                      bool is_mc)
6961 {
6962         struct hclge_dev *hdev = vport->back;
6963         u8 resp_code;
6964         u16 retval;
6965         int ret;
6966
6967         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6968         if (is_mc) {
6969                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6970                 memcpy(desc[0].data,
6971                        req,
6972                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6973                 hclge_cmd_setup_basic_desc(&desc[1],
6974                                            HCLGE_OPC_MAC_VLAN_ADD,
6975                                            true);
6976                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6977                 hclge_cmd_setup_basic_desc(&desc[2],
6978                                            HCLGE_OPC_MAC_VLAN_ADD,
6979                                            true);
6980                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6981         } else {
6982                 memcpy(desc[0].data,
6983                        req,
6984                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6985                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6986         }
6987         if (ret) {
6988                 dev_err(&hdev->pdev->dev,
6989                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6990                         ret);
6991                 return ret;
6992         }
6993         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6994         retval = le16_to_cpu(desc[0].retval);
6995
6996         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6997                                              HCLGE_MAC_VLAN_LKUP);
6998 }
6999
7000 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7001                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7002                                   struct hclge_desc *mc_desc)
7003 {
7004         struct hclge_dev *hdev = vport->back;
7005         int cfg_status;
7006         u8 resp_code;
7007         u16 retval;
7008         int ret;
7009
7010         if (!mc_desc) {
7011                 struct hclge_desc desc;
7012
7013                 hclge_cmd_setup_basic_desc(&desc,
7014                                            HCLGE_OPC_MAC_VLAN_ADD,
7015                                            false);
7016                 memcpy(desc.data, req,
7017                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7018                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7019                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7020                 retval = le16_to_cpu(desc.retval);
7021
7022                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7023                                                            resp_code,
7024                                                            HCLGE_MAC_VLAN_ADD);
7025         } else {
7026                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7027                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7028                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7029                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7030                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7031                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7032                 memcpy(mc_desc[0].data, req,
7033                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7034                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7035                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7036                 retval = le16_to_cpu(mc_desc[0].retval);
7037
7038                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7039                                                            resp_code,
7040                                                            HCLGE_MAC_VLAN_ADD);
7041         }
7042
7043         if (ret) {
7044                 dev_err(&hdev->pdev->dev,
7045                         "add mac addr failed for cmd_send, ret =%d.\n",
7046                         ret);
7047                 return ret;
7048         }
7049
7050         return cfg_status;
7051 }
7052
7053 static int hclge_init_umv_space(struct hclge_dev *hdev)
7054 {
7055         u16 allocated_size = 0;
7056         int ret;
7057
7058         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7059                                   true);
7060         if (ret)
7061                 return ret;
7062
7063         if (allocated_size < hdev->wanted_umv_size)
7064                 dev_warn(&hdev->pdev->dev,
7065                          "Alloc umv space failed, want %u, get %u\n",
7066                          hdev->wanted_umv_size, allocated_size);
7067
7068         mutex_init(&hdev->umv_mutex);
7069         hdev->max_umv_size = allocated_size;
7070         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7071          * preserve some unicast mac vlan table entries shared by pf
7072          * and its vfs.
7073          */
7074         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7075         hdev->share_umv_size = hdev->priv_umv_size +
7076                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7077
7078         return 0;
7079 }
7080
7081 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7082 {
7083         int ret;
7084
7085         if (hdev->max_umv_size > 0) {
7086                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7087                                           false);
7088                 if (ret)
7089                         return ret;
7090                 hdev->max_umv_size = 0;
7091         }
7092         mutex_destroy(&hdev->umv_mutex);
7093
7094         return 0;
7095 }
7096
7097 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7098                                u16 *allocated_size, bool is_alloc)
7099 {
7100         struct hclge_umv_spc_alc_cmd *req;
7101         struct hclge_desc desc;
7102         int ret;
7103
7104         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7105         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7106         if (!is_alloc)
7107                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7108
7109         req->space_size = cpu_to_le32(space_size);
7110
7111         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7112         if (ret) {
7113                 dev_err(&hdev->pdev->dev,
7114                         "%s umv space failed for cmd_send, ret =%d\n",
7115                         is_alloc ? "allocate" : "free", ret);
7116                 return ret;
7117         }
7118
7119         if (is_alloc && allocated_size)
7120                 *allocated_size = le32_to_cpu(desc.data[1]);
7121
7122         return 0;
7123 }
7124
7125 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7126 {
7127         struct hclge_vport *vport;
7128         int i;
7129
7130         for (i = 0; i < hdev->num_alloc_vport; i++) {
7131                 vport = &hdev->vport[i];
7132                 vport->used_umv_num = 0;
7133         }
7134
7135         mutex_lock(&hdev->umv_mutex);
7136         hdev->share_umv_size = hdev->priv_umv_size +
7137                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7138         mutex_unlock(&hdev->umv_mutex);
7139 }
7140
7141 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7142 {
7143         struct hclge_dev *hdev = vport->back;
7144         bool is_full;
7145
7146         mutex_lock(&hdev->umv_mutex);
7147         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7148                    hdev->share_umv_size == 0);
7149         mutex_unlock(&hdev->umv_mutex);
7150
7151         return is_full;
7152 }
7153
7154 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7155 {
7156         struct hclge_dev *hdev = vport->back;
7157
7158         mutex_lock(&hdev->umv_mutex);
7159         if (is_free) {
7160                 if (vport->used_umv_num > hdev->priv_umv_size)
7161                         hdev->share_umv_size++;
7162
7163                 if (vport->used_umv_num > 0)
7164                         vport->used_umv_num--;
7165         } else {
7166                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7167                     hdev->share_umv_size > 0)
7168                         hdev->share_umv_size--;
7169                 vport->used_umv_num++;
7170         }
7171         mutex_unlock(&hdev->umv_mutex);
7172 }
7173
7174 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7175                              const unsigned char *addr)
7176 {
7177         struct hclge_vport *vport = hclge_get_vport(handle);
7178
7179         return hclge_add_uc_addr_common(vport, addr);
7180 }
7181
7182 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7183                              const unsigned char *addr)
7184 {
7185         struct hclge_dev *hdev = vport->back;
7186         struct hclge_mac_vlan_tbl_entry_cmd req;
7187         struct hclge_desc desc;
7188         u16 egress_port = 0;
7189         int ret;
7190
7191         /* mac addr check */
7192         if (is_zero_ether_addr(addr) ||
7193             is_broadcast_ether_addr(addr) ||
7194             is_multicast_ether_addr(addr)) {
7195                 dev_err(&hdev->pdev->dev,
7196                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7197                          addr, is_zero_ether_addr(addr),
7198                          is_broadcast_ether_addr(addr),
7199                          is_multicast_ether_addr(addr));
7200                 return -EINVAL;
7201         }
7202
7203         memset(&req, 0, sizeof(req));
7204
7205         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7206                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7207
7208         req.egress_port = cpu_to_le16(egress_port);
7209
7210         hclge_prepare_mac_addr(&req, addr, false);
7211
7212         /* Lookup the mac address in the mac_vlan table, and add
7213          * it if the entry is inexistent. Repeated unicast entry
7214          * is not allowed in the mac vlan table.
7215          */
7216         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7217         if (ret == -ENOENT) {
7218                 if (!hclge_is_umv_space_full(vport)) {
7219                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7220                         if (!ret)
7221                                 hclge_update_umv_space(vport, false);
7222                         return ret;
7223                 }
7224
7225                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7226                         hdev->priv_umv_size);
7227
7228                 return -ENOSPC;
7229         }
7230
7231         /* check if we just hit the duplicate */
7232         if (!ret) {
7233                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7234                          vport->vport_id, addr);
7235                 return 0;
7236         }
7237
7238         dev_err(&hdev->pdev->dev,
7239                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7240                 addr);
7241
7242         return ret;
7243 }
7244
7245 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7246                             const unsigned char *addr)
7247 {
7248         struct hclge_vport *vport = hclge_get_vport(handle);
7249
7250         return hclge_rm_uc_addr_common(vport, addr);
7251 }
7252
7253 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7254                             const unsigned char *addr)
7255 {
7256         struct hclge_dev *hdev = vport->back;
7257         struct hclge_mac_vlan_tbl_entry_cmd req;
7258         int ret;
7259
7260         /* mac addr check */
7261         if (is_zero_ether_addr(addr) ||
7262             is_broadcast_ether_addr(addr) ||
7263             is_multicast_ether_addr(addr)) {
7264                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7265                         addr);
7266                 return -EINVAL;
7267         }
7268
7269         memset(&req, 0, sizeof(req));
7270         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7271         hclge_prepare_mac_addr(&req, addr, false);
7272         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7273         if (!ret)
7274                 hclge_update_umv_space(vport, true);
7275
7276         return ret;
7277 }
7278
7279 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7280                              const unsigned char *addr)
7281 {
7282         struct hclge_vport *vport = hclge_get_vport(handle);
7283
7284         return hclge_add_mc_addr_common(vport, addr);
7285 }
7286
7287 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7288                              const unsigned char *addr)
7289 {
7290         struct hclge_dev *hdev = vport->back;
7291         struct hclge_mac_vlan_tbl_entry_cmd req;
7292         struct hclge_desc desc[3];
7293         int status;
7294
7295         /* mac addr check */
7296         if (!is_multicast_ether_addr(addr)) {
7297                 dev_err(&hdev->pdev->dev,
7298                         "Add mc mac err! invalid mac:%pM.\n",
7299                          addr);
7300                 return -EINVAL;
7301         }
7302         memset(&req, 0, sizeof(req));
7303         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7304         hclge_prepare_mac_addr(&req, addr, true);
7305         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7306         if (status) {
7307                 /* This mac addr do not exist, add new entry for it */
7308                 memset(desc[0].data, 0, sizeof(desc[0].data));
7309                 memset(desc[1].data, 0, sizeof(desc[0].data));
7310                 memset(desc[2].data, 0, sizeof(desc[0].data));
7311         }
7312         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7313         if (status)
7314                 return status;
7315         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7316
7317         if (status == -ENOSPC)
7318                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7319
7320         return status;
7321 }
7322
7323 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7324                             const unsigned char *addr)
7325 {
7326         struct hclge_vport *vport = hclge_get_vport(handle);
7327
7328         return hclge_rm_mc_addr_common(vport, addr);
7329 }
7330
7331 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7332                             const unsigned char *addr)
7333 {
7334         struct hclge_dev *hdev = vport->back;
7335         struct hclge_mac_vlan_tbl_entry_cmd req;
7336         enum hclge_cmd_status status;
7337         struct hclge_desc desc[3];
7338
7339         /* mac addr check */
7340         if (!is_multicast_ether_addr(addr)) {
7341                 dev_dbg(&hdev->pdev->dev,
7342                         "Remove mc mac err! invalid mac:%pM.\n",
7343                          addr);
7344                 return -EINVAL;
7345         }
7346
7347         memset(&req, 0, sizeof(req));
7348         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7349         hclge_prepare_mac_addr(&req, addr, true);
7350         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7351         if (!status) {
7352                 /* This mac addr exist, remove this handle's VFID for it */
7353                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7354                 if (status)
7355                         return status;
7356
7357                 if (hclge_is_all_function_id_zero(desc))
7358                         /* All the vfid is zero, so need to delete this entry */
7359                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7360                 else
7361                         /* Not all the vfid is zero, update the vfid */
7362                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7363
7364         } else {
7365                 /* Maybe this mac address is in mta table, but it cannot be
7366                  * deleted here because an entry of mta represents an address
7367                  * range rather than a specific address. the delete action to
7368                  * all entries will take effect in update_mta_status called by
7369                  * hns3_nic_set_rx_mode.
7370                  */
7371                 status = 0;
7372         }
7373
7374         return status;
7375 }
7376
7377 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7378                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7379 {
7380         struct hclge_vport_mac_addr_cfg *mac_cfg;
7381         struct list_head *list;
7382
7383         if (!vport->vport_id)
7384                 return;
7385
7386         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7387         if (!mac_cfg)
7388                 return;
7389
7390         mac_cfg->hd_tbl_status = true;
7391         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7392
7393         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7394                &vport->uc_mac_list : &vport->mc_mac_list;
7395
7396         list_add_tail(&mac_cfg->node, list);
7397 }
7398
7399 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7400                               bool is_write_tbl,
7401                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7402 {
7403         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7404         struct list_head *list;
7405         bool uc_flag, mc_flag;
7406
7407         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7408                &vport->uc_mac_list : &vport->mc_mac_list;
7409
7410         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7411         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7412
7413         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7414                 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7415                         if (uc_flag && mac_cfg->hd_tbl_status)
7416                                 hclge_rm_uc_addr_common(vport, mac_addr);
7417
7418                         if (mc_flag && mac_cfg->hd_tbl_status)
7419                                 hclge_rm_mc_addr_common(vport, mac_addr);
7420
7421                         list_del(&mac_cfg->node);
7422                         kfree(mac_cfg);
7423                         break;
7424                 }
7425         }
7426 }
7427
7428 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7429                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7430 {
7431         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7432         struct list_head *list;
7433
7434         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7435                &vport->uc_mac_list : &vport->mc_mac_list;
7436
7437         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7438                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7439                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7440
7441                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7442                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7443
7444                 mac_cfg->hd_tbl_status = false;
7445                 if (is_del_list) {
7446                         list_del(&mac_cfg->node);
7447                         kfree(mac_cfg);
7448                 }
7449         }
7450 }
7451
7452 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7453 {
7454         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7455         struct hclge_vport *vport;
7456         int i;
7457
7458         mutex_lock(&hdev->vport_cfg_mutex);
7459         for (i = 0; i < hdev->num_alloc_vport; i++) {
7460                 vport = &hdev->vport[i];
7461                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7462                         list_del(&mac->node);
7463                         kfree(mac);
7464                 }
7465
7466                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7467                         list_del(&mac->node);
7468                         kfree(mac);
7469                 }
7470         }
7471         mutex_unlock(&hdev->vport_cfg_mutex);
7472 }
7473
7474 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7475                                               u16 cmdq_resp, u8 resp_code)
7476 {
7477 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7478 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7479 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7480 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7481
7482         int return_status;
7483
7484         if (cmdq_resp) {
7485                 dev_err(&hdev->pdev->dev,
7486                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7487                         cmdq_resp);
7488                 return -EIO;
7489         }
7490
7491         switch (resp_code) {
7492         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7493         case HCLGE_ETHERTYPE_ALREADY_ADD:
7494                 return_status = 0;
7495                 break;
7496         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7497                 dev_err(&hdev->pdev->dev,
7498                         "add mac ethertype failed for manager table overflow.\n");
7499                 return_status = -EIO;
7500                 break;
7501         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7502                 dev_err(&hdev->pdev->dev,
7503                         "add mac ethertype failed for key conflict.\n");
7504                 return_status = -EIO;
7505                 break;
7506         default:
7507                 dev_err(&hdev->pdev->dev,
7508                         "add mac ethertype failed for undefined, code=%u.\n",
7509                         resp_code);
7510                 return_status = -EIO;
7511         }
7512
7513         return return_status;
7514 }
7515
7516 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7517                                      u8 *mac_addr)
7518 {
7519         struct hclge_mac_vlan_tbl_entry_cmd req;
7520         struct hclge_dev *hdev = vport->back;
7521         struct hclge_desc desc;
7522         u16 egress_port = 0;
7523         int i;
7524
7525         if (is_zero_ether_addr(mac_addr))
7526                 return false;
7527
7528         memset(&req, 0, sizeof(req));
7529         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7530                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7531         req.egress_port = cpu_to_le16(egress_port);
7532         hclge_prepare_mac_addr(&req, mac_addr, false);
7533
7534         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7535                 return true;
7536
7537         vf_idx += HCLGE_VF_VPORT_START_NUM;
7538         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7539                 if (i != vf_idx &&
7540                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7541                         return true;
7542
7543         return false;
7544 }
7545
7546 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7547                             u8 *mac_addr)
7548 {
7549         struct hclge_vport *vport = hclge_get_vport(handle);
7550         struct hclge_dev *hdev = vport->back;
7551
7552         vport = hclge_get_vf_vport(hdev, vf);
7553         if (!vport)
7554                 return -EINVAL;
7555
7556         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7557                 dev_info(&hdev->pdev->dev,
7558                          "Specified MAC(=%pM) is same as before, no change committed!\n",
7559                          mac_addr);
7560                 return 0;
7561         }
7562
7563         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7564                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7565                         mac_addr);
7566                 return -EEXIST;
7567         }
7568
7569         ether_addr_copy(vport->vf_info.mac, mac_addr);
7570         dev_info(&hdev->pdev->dev,
7571                  "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7572                  vf, mac_addr);
7573
7574         return hclge_inform_reset_assert_to_vf(vport);
7575 }
7576
7577 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7578                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7579 {
7580         struct hclge_desc desc;
7581         u8 resp_code;
7582         u16 retval;
7583         int ret;
7584
7585         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7586         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7587
7588         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7589         if (ret) {
7590                 dev_err(&hdev->pdev->dev,
7591                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7592                         ret);
7593                 return ret;
7594         }
7595
7596         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7597         retval = le16_to_cpu(desc.retval);
7598
7599         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7600 }
7601
7602 static int init_mgr_tbl(struct hclge_dev *hdev)
7603 {
7604         int ret;
7605         int i;
7606
7607         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7608                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7609                 if (ret) {
7610                         dev_err(&hdev->pdev->dev,
7611                                 "add mac ethertype failed, ret =%d.\n",
7612                                 ret);
7613                         return ret;
7614                 }
7615         }
7616
7617         return 0;
7618 }
7619
7620 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7621 {
7622         struct hclge_vport *vport = hclge_get_vport(handle);
7623         struct hclge_dev *hdev = vport->back;
7624
7625         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7626 }
7627
7628 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7629                               bool is_first)
7630 {
7631         const unsigned char *new_addr = (const unsigned char *)p;
7632         struct hclge_vport *vport = hclge_get_vport(handle);
7633         struct hclge_dev *hdev = vport->back;
7634         int ret;
7635
7636         /* mac addr check */
7637         if (is_zero_ether_addr(new_addr) ||
7638             is_broadcast_ether_addr(new_addr) ||
7639             is_multicast_ether_addr(new_addr)) {
7640                 dev_err(&hdev->pdev->dev,
7641                         "Change uc mac err! invalid mac:%pM.\n",
7642                          new_addr);
7643                 return -EINVAL;
7644         }
7645
7646         if ((!is_first || is_kdump_kernel()) &&
7647             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7648                 dev_warn(&hdev->pdev->dev,
7649                          "remove old uc mac address fail.\n");
7650
7651         ret = hclge_add_uc_addr(handle, new_addr);
7652         if (ret) {
7653                 dev_err(&hdev->pdev->dev,
7654                         "add uc mac address fail, ret =%d.\n",
7655                         ret);
7656
7657                 if (!is_first &&
7658                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7659                         dev_err(&hdev->pdev->dev,
7660                                 "restore uc mac address fail.\n");
7661
7662                 return -EIO;
7663         }
7664
7665         ret = hclge_pause_addr_cfg(hdev, new_addr);
7666         if (ret) {
7667                 dev_err(&hdev->pdev->dev,
7668                         "configure mac pause address fail, ret =%d.\n",
7669                         ret);
7670                 return -EIO;
7671         }
7672
7673         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7674
7675         return 0;
7676 }
7677
7678 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7679                           int cmd)
7680 {
7681         struct hclge_vport *vport = hclge_get_vport(handle);
7682         struct hclge_dev *hdev = vport->back;
7683
7684         if (!hdev->hw.mac.phydev)
7685                 return -EOPNOTSUPP;
7686
7687         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7688 }
7689
7690 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7691                                       u8 fe_type, bool filter_en, u8 vf_id)
7692 {
7693         struct hclge_vlan_filter_ctrl_cmd *req;
7694         struct hclge_desc desc;
7695         int ret;
7696
7697         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7698
7699         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7700         req->vlan_type = vlan_type;
7701         req->vlan_fe = filter_en ? fe_type : 0;
7702         req->vf_id = vf_id;
7703
7704         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7705         if (ret)
7706                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7707                         ret);
7708
7709         return ret;
7710 }
7711
7712 #define HCLGE_FILTER_TYPE_VF            0
7713 #define HCLGE_FILTER_TYPE_PORT          1
7714 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7715 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7716 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7717 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7718 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7719 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7720                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7721 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7722                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7723
7724 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7725 {
7726         struct hclge_vport *vport = hclge_get_vport(handle);
7727         struct hclge_dev *hdev = vport->back;
7728
7729         if (hdev->pdev->revision >= 0x21) {
7730                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7731                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7732                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7733                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7734         } else {
7735                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7736                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7737                                            0);
7738         }
7739         if (enable)
7740                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7741         else
7742                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7743 }
7744
7745 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7746                                     bool is_kill, u16 vlan,
7747                                     __be16 proto)
7748 {
7749         struct hclge_vport *vport = &hdev->vport[vfid];
7750         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7751         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7752         struct hclge_desc desc[2];
7753         u8 vf_byte_val;
7754         u8 vf_byte_off;
7755         int ret;
7756
7757         /* if vf vlan table is full, firmware will close vf vlan filter, it
7758          * is unable and unnecessary to add new vlan id to vf vlan filter.
7759          * If spoof check is enable, and vf vlan is full, it shouldn't add
7760          * new vlan, because tx packets with these vlan id will be dropped.
7761          */
7762         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7763                 if (vport->vf_info.spoofchk && vlan) {
7764                         dev_err(&hdev->pdev->dev,
7765                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7766                         return -EPERM;
7767                 }
7768                 return 0;
7769         }
7770
7771         hclge_cmd_setup_basic_desc(&desc[0],
7772                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7773         hclge_cmd_setup_basic_desc(&desc[1],
7774                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7775
7776         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7777
7778         vf_byte_off = vfid / 8;
7779         vf_byte_val = 1 << (vfid % 8);
7780
7781         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7782         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7783
7784         req0->vlan_id  = cpu_to_le16(vlan);
7785         req0->vlan_cfg = is_kill;
7786
7787         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7788                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7789         else
7790                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7791
7792         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7793         if (ret) {
7794                 dev_err(&hdev->pdev->dev,
7795                         "Send vf vlan command fail, ret =%d.\n",
7796                         ret);
7797                 return ret;
7798         }
7799
7800         if (!is_kill) {
7801 #define HCLGE_VF_VLAN_NO_ENTRY  2
7802                 if (!req0->resp_code || req0->resp_code == 1)
7803                         return 0;
7804
7805                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7806                         set_bit(vfid, hdev->vf_vlan_full);
7807                         dev_warn(&hdev->pdev->dev,
7808                                  "vf vlan table is full, vf vlan filter is disabled\n");
7809                         return 0;
7810                 }
7811
7812                 dev_err(&hdev->pdev->dev,
7813                         "Add vf vlan filter fail, ret =%u.\n",
7814                         req0->resp_code);
7815         } else {
7816 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7817                 if (!req0->resp_code)
7818                         return 0;
7819
7820                 /* vf vlan filter is disabled when vf vlan table is full,
7821                  * then new vlan id will not be added into vf vlan table.
7822                  * Just return 0 without warning, avoid massive verbose
7823                  * print logs when unload.
7824                  */
7825                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7826                         return 0;
7827
7828                 dev_err(&hdev->pdev->dev,
7829                         "Kill vf vlan filter fail, ret =%u.\n",
7830                         req0->resp_code);
7831         }
7832
7833         return -EIO;
7834 }
7835
7836 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7837                                       u16 vlan_id, bool is_kill)
7838 {
7839         struct hclge_vlan_filter_pf_cfg_cmd *req;
7840         struct hclge_desc desc;
7841         u8 vlan_offset_byte_val;
7842         u8 vlan_offset_byte;
7843         u8 vlan_offset_160;
7844         int ret;
7845
7846         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7847
7848         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7849         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7850                            HCLGE_VLAN_BYTE_SIZE;
7851         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7852
7853         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7854         req->vlan_offset = vlan_offset_160;
7855         req->vlan_cfg = is_kill;
7856         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7857
7858         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7859         if (ret)
7860                 dev_err(&hdev->pdev->dev,
7861                         "port vlan command, send fail, ret =%d.\n", ret);
7862         return ret;
7863 }
7864
7865 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7866                                     u16 vport_id, u16 vlan_id,
7867                                     bool is_kill)
7868 {
7869         u16 vport_idx, vport_num = 0;
7870         int ret;
7871
7872         if (is_kill && !vlan_id)
7873                 return 0;
7874
7875         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7876                                        proto);
7877         if (ret) {
7878                 dev_err(&hdev->pdev->dev,
7879                         "Set %u vport vlan filter config fail, ret =%d.\n",
7880                         vport_id, ret);
7881                 return ret;
7882         }
7883
7884         /* vlan 0 may be added twice when 8021q module is enabled */
7885         if (!is_kill && !vlan_id &&
7886             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7887                 return 0;
7888
7889         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7890                 dev_err(&hdev->pdev->dev,
7891                         "Add port vlan failed, vport %u is already in vlan %u\n",
7892                         vport_id, vlan_id);
7893                 return -EINVAL;
7894         }
7895
7896         if (is_kill &&
7897             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7898                 dev_err(&hdev->pdev->dev,
7899                         "Delete port vlan failed, vport %u is not in vlan %u\n",
7900                         vport_id, vlan_id);
7901                 return -EINVAL;
7902         }
7903
7904         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7905                 vport_num++;
7906
7907         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7908                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7909                                                  is_kill);
7910
7911         return ret;
7912 }
7913
7914 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7915 {
7916         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7917         struct hclge_vport_vtag_tx_cfg_cmd *req;
7918         struct hclge_dev *hdev = vport->back;
7919         struct hclge_desc desc;
7920         u16 bmap_index;
7921         int status;
7922
7923         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7924
7925         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7926         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7927         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7928         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7929                       vcfg->accept_tag1 ? 1 : 0);
7930         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7931                       vcfg->accept_untag1 ? 1 : 0);
7932         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7933                       vcfg->accept_tag2 ? 1 : 0);
7934         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7935                       vcfg->accept_untag2 ? 1 : 0);
7936         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7937                       vcfg->insert_tag1_en ? 1 : 0);
7938         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7939                       vcfg->insert_tag2_en ? 1 : 0);
7940         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7941
7942         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7943         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7944                         HCLGE_VF_NUM_PER_BYTE;
7945         req->vf_bitmap[bmap_index] =
7946                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7947
7948         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7949         if (status)
7950                 dev_err(&hdev->pdev->dev,
7951                         "Send port txvlan cfg command fail, ret =%d\n",
7952                         status);
7953
7954         return status;
7955 }
7956
7957 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7958 {
7959         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7960         struct hclge_vport_vtag_rx_cfg_cmd *req;
7961         struct hclge_dev *hdev = vport->back;
7962         struct hclge_desc desc;
7963         u16 bmap_index;
7964         int status;
7965
7966         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7967
7968         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7969         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7970                       vcfg->strip_tag1_en ? 1 : 0);
7971         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7972                       vcfg->strip_tag2_en ? 1 : 0);
7973         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7974                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7975         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7976                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7977
7978         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7979         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7980                         HCLGE_VF_NUM_PER_BYTE;
7981         req->vf_bitmap[bmap_index] =
7982                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7983
7984         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7985         if (status)
7986                 dev_err(&hdev->pdev->dev,
7987                         "Send port rxvlan cfg command fail, ret =%d\n",
7988                         status);
7989
7990         return status;
7991 }
7992
7993 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7994                                   u16 port_base_vlan_state,
7995                                   u16 vlan_tag)
7996 {
7997         int ret;
7998
7999         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8000                 vport->txvlan_cfg.accept_tag1 = true;
8001                 vport->txvlan_cfg.insert_tag1_en = false;
8002                 vport->txvlan_cfg.default_tag1 = 0;
8003         } else {
8004                 vport->txvlan_cfg.accept_tag1 = false;
8005                 vport->txvlan_cfg.insert_tag1_en = true;
8006                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8007         }
8008
8009         vport->txvlan_cfg.accept_untag1 = true;
8010
8011         /* accept_tag2 and accept_untag2 are not supported on
8012          * pdev revision(0x20), new revision support them,
8013          * this two fields can not be configured by user.
8014          */
8015         vport->txvlan_cfg.accept_tag2 = true;
8016         vport->txvlan_cfg.accept_untag2 = true;
8017         vport->txvlan_cfg.insert_tag2_en = false;
8018         vport->txvlan_cfg.default_tag2 = 0;
8019
8020         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8021                 vport->rxvlan_cfg.strip_tag1_en = false;
8022                 vport->rxvlan_cfg.strip_tag2_en =
8023                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8024         } else {
8025                 vport->rxvlan_cfg.strip_tag1_en =
8026                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8027                 vport->rxvlan_cfg.strip_tag2_en = true;
8028         }
8029         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8030         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8031
8032         ret = hclge_set_vlan_tx_offload_cfg(vport);
8033         if (ret)
8034                 return ret;
8035
8036         return hclge_set_vlan_rx_offload_cfg(vport);
8037 }
8038
8039 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8040 {
8041         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8042         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8043         struct hclge_desc desc;
8044         int status;
8045
8046         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8047         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8048         rx_req->ot_fst_vlan_type =
8049                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8050         rx_req->ot_sec_vlan_type =
8051                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8052         rx_req->in_fst_vlan_type =
8053                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8054         rx_req->in_sec_vlan_type =
8055                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8056
8057         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8058         if (status) {
8059                 dev_err(&hdev->pdev->dev,
8060                         "Send rxvlan protocol type command fail, ret =%d\n",
8061                         status);
8062                 return status;
8063         }
8064
8065         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8066
8067         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8068         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8069         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8070
8071         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8072         if (status)
8073                 dev_err(&hdev->pdev->dev,
8074                         "Send txvlan protocol type command fail, ret =%d\n",
8075                         status);
8076
8077         return status;
8078 }
8079
8080 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8081 {
8082 #define HCLGE_DEF_VLAN_TYPE             0x8100
8083
8084         struct hnae3_handle *handle = &hdev->vport[0].nic;
8085         struct hclge_vport *vport;
8086         int ret;
8087         int i;
8088
8089         if (hdev->pdev->revision >= 0x21) {
8090                 /* for revision 0x21, vf vlan filter is per function */
8091                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8092                         vport = &hdev->vport[i];
8093                         ret = hclge_set_vlan_filter_ctrl(hdev,
8094                                                          HCLGE_FILTER_TYPE_VF,
8095                                                          HCLGE_FILTER_FE_EGRESS,
8096                                                          true,
8097                                                          vport->vport_id);
8098                         if (ret)
8099                                 return ret;
8100                 }
8101
8102                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8103                                                  HCLGE_FILTER_FE_INGRESS, true,
8104                                                  0);
8105                 if (ret)
8106                         return ret;
8107         } else {
8108                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8109                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8110                                                  true, 0);
8111                 if (ret)
8112                         return ret;
8113         }
8114
8115         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8116
8117         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8118         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8119         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8120         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8121         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8122         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8123
8124         ret = hclge_set_vlan_protocol_type(hdev);
8125         if (ret)
8126                 return ret;
8127
8128         for (i = 0; i < hdev->num_alloc_vport; i++) {
8129                 u16 vlan_tag;
8130
8131                 vport = &hdev->vport[i];
8132                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8133
8134                 ret = hclge_vlan_offload_cfg(vport,
8135                                              vport->port_base_vlan_cfg.state,
8136                                              vlan_tag);
8137                 if (ret)
8138                         return ret;
8139         }
8140
8141         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8142 }
8143
8144 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8145                                        bool writen_to_tbl)
8146 {
8147         struct hclge_vport_vlan_cfg *vlan;
8148
8149         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8150         if (!vlan)
8151                 return;
8152
8153         vlan->hd_tbl_status = writen_to_tbl;
8154         vlan->vlan_id = vlan_id;
8155
8156         list_add_tail(&vlan->node, &vport->vlan_list);
8157 }
8158
8159 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8160 {
8161         struct hclge_vport_vlan_cfg *vlan, *tmp;
8162         struct hclge_dev *hdev = vport->back;
8163         int ret;
8164
8165         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8166                 if (!vlan->hd_tbl_status) {
8167                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8168                                                        vport->vport_id,
8169                                                        vlan->vlan_id, false);
8170                         if (ret) {
8171                                 dev_err(&hdev->pdev->dev,
8172                                         "restore vport vlan list failed, ret=%d\n",
8173                                         ret);
8174                                 return ret;
8175                         }
8176                 }
8177                 vlan->hd_tbl_status = true;
8178         }
8179
8180         return 0;
8181 }
8182
8183 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8184                                       bool is_write_tbl)
8185 {
8186         struct hclge_vport_vlan_cfg *vlan, *tmp;
8187         struct hclge_dev *hdev = vport->back;
8188
8189         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8190                 if (vlan->vlan_id == vlan_id) {
8191                         if (is_write_tbl && vlan->hd_tbl_status)
8192                                 hclge_set_vlan_filter_hw(hdev,
8193                                                          htons(ETH_P_8021Q),
8194                                                          vport->vport_id,
8195                                                          vlan_id,
8196                                                          true);
8197
8198                         list_del(&vlan->node);
8199                         kfree(vlan);
8200                         break;
8201                 }
8202         }
8203 }
8204
8205 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8206 {
8207         struct hclge_vport_vlan_cfg *vlan, *tmp;
8208         struct hclge_dev *hdev = vport->back;
8209
8210         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8211                 if (vlan->hd_tbl_status)
8212                         hclge_set_vlan_filter_hw(hdev,
8213                                                  htons(ETH_P_8021Q),
8214                                                  vport->vport_id,
8215                                                  vlan->vlan_id,
8216                                                  true);
8217
8218                 vlan->hd_tbl_status = false;
8219                 if (is_del_list) {
8220                         list_del(&vlan->node);
8221                         kfree(vlan);
8222                 }
8223         }
8224 }
8225
8226 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8227 {
8228         struct hclge_vport_vlan_cfg *vlan, *tmp;
8229         struct hclge_vport *vport;
8230         int i;
8231
8232         mutex_lock(&hdev->vport_cfg_mutex);
8233         for (i = 0; i < hdev->num_alloc_vport; i++) {
8234                 vport = &hdev->vport[i];
8235                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8236                         list_del(&vlan->node);
8237                         kfree(vlan);
8238                 }
8239         }
8240         mutex_unlock(&hdev->vport_cfg_mutex);
8241 }
8242
8243 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8244 {
8245         struct hclge_vport *vport = hclge_get_vport(handle);
8246         struct hclge_vport_vlan_cfg *vlan, *tmp;
8247         struct hclge_dev *hdev = vport->back;
8248         u16 vlan_proto;
8249         u16 state, vlan_id;
8250         int i;
8251
8252         mutex_lock(&hdev->vport_cfg_mutex);
8253         for (i = 0; i < hdev->num_alloc_vport; i++) {
8254                 vport = &hdev->vport[i];
8255                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8256                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8257                 state = vport->port_base_vlan_cfg.state;
8258
8259                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8260                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8261                                                  vport->vport_id, vlan_id,
8262                                                  false);
8263                         continue;
8264                 }
8265
8266                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8267                         int ret;
8268
8269                         if (!vlan->hd_tbl_status)
8270                                 continue;
8271                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8272                                                        vport->vport_id,
8273                                                        vlan->vlan_id, false);
8274                         if (ret)
8275                                 break;
8276                 }
8277         }
8278
8279         mutex_unlock(&hdev->vport_cfg_mutex);
8280 }
8281
8282 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8283 {
8284         struct hclge_vport *vport = hclge_get_vport(handle);
8285
8286         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8287                 vport->rxvlan_cfg.strip_tag1_en = false;
8288                 vport->rxvlan_cfg.strip_tag2_en = enable;
8289         } else {
8290                 vport->rxvlan_cfg.strip_tag1_en = enable;
8291                 vport->rxvlan_cfg.strip_tag2_en = true;
8292         }
8293         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8294         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8295         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8296
8297         return hclge_set_vlan_rx_offload_cfg(vport);
8298 }
8299
8300 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8301                                             u16 port_base_vlan_state,
8302                                             struct hclge_vlan_info *new_info,
8303                                             struct hclge_vlan_info *old_info)
8304 {
8305         struct hclge_dev *hdev = vport->back;
8306         int ret;
8307
8308         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8309                 hclge_rm_vport_all_vlan_table(vport, false);
8310                 return hclge_set_vlan_filter_hw(hdev,
8311                                                  htons(new_info->vlan_proto),
8312                                                  vport->vport_id,
8313                                                  new_info->vlan_tag,
8314                                                  false);
8315         }
8316
8317         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8318                                        vport->vport_id, old_info->vlan_tag,
8319                                        true);
8320         if (ret)
8321                 return ret;
8322
8323         return hclge_add_vport_all_vlan_table(vport);
8324 }
8325
8326 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8327                                     struct hclge_vlan_info *vlan_info)
8328 {
8329         struct hnae3_handle *nic = &vport->nic;
8330         struct hclge_vlan_info *old_vlan_info;
8331         struct hclge_dev *hdev = vport->back;
8332         int ret;
8333
8334         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8335
8336         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8337         if (ret)
8338                 return ret;
8339
8340         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8341                 /* add new VLAN tag */
8342                 ret = hclge_set_vlan_filter_hw(hdev,
8343                                                htons(vlan_info->vlan_proto),
8344                                                vport->vport_id,
8345                                                vlan_info->vlan_tag,
8346                                                false);
8347                 if (ret)
8348                         return ret;
8349
8350                 /* remove old VLAN tag */
8351                 ret = hclge_set_vlan_filter_hw(hdev,
8352                                                htons(old_vlan_info->vlan_proto),
8353                                                vport->vport_id,
8354                                                old_vlan_info->vlan_tag,
8355                                                true);
8356                 if (ret)
8357                         return ret;
8358
8359                 goto update;
8360         }
8361
8362         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8363                                                old_vlan_info);
8364         if (ret)
8365                 return ret;
8366
8367         /* update state only when disable/enable port based VLAN */
8368         vport->port_base_vlan_cfg.state = state;
8369         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8370                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8371         else
8372                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8373
8374 update:
8375         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8376         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8377         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8378
8379         return 0;
8380 }
8381
8382 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8383                                           enum hnae3_port_base_vlan_state state,
8384                                           u16 vlan)
8385 {
8386         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8387                 if (!vlan)
8388                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8389                 else
8390                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8391         } else {
8392                 if (!vlan)
8393                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8394                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8395                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8396                 else
8397                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8398         }
8399 }
8400
8401 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8402                                     u16 vlan, u8 qos, __be16 proto)
8403 {
8404         struct hclge_vport *vport = hclge_get_vport(handle);
8405         struct hclge_dev *hdev = vport->back;
8406         struct hclge_vlan_info vlan_info;
8407         u16 state;
8408         int ret;
8409
8410         if (hdev->pdev->revision == 0x20)
8411                 return -EOPNOTSUPP;
8412
8413         /* qos is a 3 bits value, so can not be bigger than 7 */
8414         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
8415                 return -EINVAL;
8416         if (proto != htons(ETH_P_8021Q))
8417                 return -EPROTONOSUPPORT;
8418
8419         vport = &hdev->vport[vfid];
8420         state = hclge_get_port_base_vlan_state(vport,
8421                                                vport->port_base_vlan_cfg.state,
8422                                                vlan);
8423         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8424                 return 0;
8425
8426         vlan_info.vlan_tag = vlan;
8427         vlan_info.qos = qos;
8428         vlan_info.vlan_proto = ntohs(proto);
8429
8430         /* update port based VLAN for PF */
8431         if (!vfid) {
8432                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8433                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8434                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8435
8436                 return ret;
8437         }
8438
8439         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8440                 return hclge_update_port_base_vlan_cfg(vport, state,
8441                                                        &vlan_info);
8442         } else {
8443                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8444                                                         (u8)vfid, state,
8445                                                         vlan, qos,
8446                                                         ntohs(proto));
8447                 return ret;
8448         }
8449 }
8450
8451 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8452                           u16 vlan_id, bool is_kill)
8453 {
8454         struct hclge_vport *vport = hclge_get_vport(handle);
8455         struct hclge_dev *hdev = vport->back;
8456         bool writen_to_tbl = false;
8457         int ret = 0;
8458
8459         /* When device is resetting, firmware is unable to handle
8460          * mailbox. Just record the vlan id, and remove it after
8461          * reset finished.
8462          */
8463         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8464                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8465                 return -EBUSY;
8466         }
8467
8468         /* when port base vlan enabled, we use port base vlan as the vlan
8469          * filter entry. In this case, we don't update vlan filter table
8470          * when user add new vlan or remove exist vlan, just update the vport
8471          * vlan list. The vlan id in vlan list will be writen in vlan filter
8472          * table until port base vlan disabled
8473          */
8474         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8475                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8476                                                vlan_id, is_kill);
8477                 writen_to_tbl = true;
8478         }
8479
8480         if (!ret) {
8481                 if (is_kill)
8482                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8483                 else
8484                         hclge_add_vport_vlan_table(vport, vlan_id,
8485                                                    writen_to_tbl);
8486         } else if (is_kill) {
8487                 /* when remove hw vlan filter failed, record the vlan id,
8488                  * and try to remove it from hw later, to be consistence
8489                  * with stack
8490                  */
8491                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8492         }
8493         return ret;
8494 }
8495
8496 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8497 {
8498 #define HCLGE_MAX_SYNC_COUNT    60
8499
8500         int i, ret, sync_cnt = 0;
8501         u16 vlan_id;
8502
8503         /* start from vport 1 for PF is always alive */
8504         for (i = 0; i < hdev->num_alloc_vport; i++) {
8505                 struct hclge_vport *vport = &hdev->vport[i];
8506
8507                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8508                                          VLAN_N_VID);
8509                 while (vlan_id != VLAN_N_VID) {
8510                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8511                                                        vport->vport_id, vlan_id,
8512                                                        true);
8513                         if (ret && ret != -EINVAL)
8514                                 return;
8515
8516                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8517                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8518
8519                         sync_cnt++;
8520                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8521                                 return;
8522
8523                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8524                                                  VLAN_N_VID);
8525                 }
8526         }
8527 }
8528
8529 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8530 {
8531         struct hclge_config_max_frm_size_cmd *req;
8532         struct hclge_desc desc;
8533
8534         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8535
8536         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8537         req->max_frm_size = cpu_to_le16(new_mps);
8538         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8539
8540         return hclge_cmd_send(&hdev->hw, &desc, 1);
8541 }
8542
8543 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8544 {
8545         struct hclge_vport *vport = hclge_get_vport(handle);
8546
8547         return hclge_set_vport_mtu(vport, new_mtu);
8548 }
8549
8550 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8551 {
8552         struct hclge_dev *hdev = vport->back;
8553         int i, max_frm_size, ret;
8554
8555         /* HW supprt 2 layer vlan */
8556         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8557         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8558             max_frm_size > HCLGE_MAC_MAX_FRAME)
8559                 return -EINVAL;
8560
8561         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8562         mutex_lock(&hdev->vport_lock);
8563         /* VF's mps must fit within hdev->mps */
8564         if (vport->vport_id && max_frm_size > hdev->mps) {
8565                 mutex_unlock(&hdev->vport_lock);
8566                 return -EINVAL;
8567         } else if (vport->vport_id) {
8568                 vport->mps = max_frm_size;
8569                 mutex_unlock(&hdev->vport_lock);
8570                 return 0;
8571         }
8572
8573         /* PF's mps must be greater then VF's mps */
8574         for (i = 1; i < hdev->num_alloc_vport; i++)
8575                 if (max_frm_size < hdev->vport[i].mps) {
8576                         mutex_unlock(&hdev->vport_lock);
8577                         return -EINVAL;
8578                 }
8579
8580         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8581
8582         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8583         if (ret) {
8584                 dev_err(&hdev->pdev->dev,
8585                         "Change mtu fail, ret =%d\n", ret);
8586                 goto out;
8587         }
8588
8589         hdev->mps = max_frm_size;
8590         vport->mps = max_frm_size;
8591
8592         ret = hclge_buffer_alloc(hdev);
8593         if (ret)
8594                 dev_err(&hdev->pdev->dev,
8595                         "Allocate buffer fail, ret =%d\n", ret);
8596
8597 out:
8598         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8599         mutex_unlock(&hdev->vport_lock);
8600         return ret;
8601 }
8602
8603 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8604                                     bool enable)
8605 {
8606         struct hclge_reset_tqp_queue_cmd *req;
8607         struct hclge_desc desc;
8608         int ret;
8609
8610         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8611
8612         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8613         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8614         if (enable)
8615                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8616
8617         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8618         if (ret) {
8619                 dev_err(&hdev->pdev->dev,
8620                         "Send tqp reset cmd error, status =%d\n", ret);
8621                 return ret;
8622         }
8623
8624         return 0;
8625 }
8626
8627 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8628 {
8629         struct hclge_reset_tqp_queue_cmd *req;
8630         struct hclge_desc desc;
8631         int ret;
8632
8633         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8634
8635         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8636         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8637
8638         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8639         if (ret) {
8640                 dev_err(&hdev->pdev->dev,
8641                         "Get reset status error, status =%d\n", ret);
8642                 return ret;
8643         }
8644
8645         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8646 }
8647
8648 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8649 {
8650         struct hnae3_queue *queue;
8651         struct hclge_tqp *tqp;
8652
8653         queue = handle->kinfo.tqp[queue_id];
8654         tqp = container_of(queue, struct hclge_tqp, q);
8655
8656         return tqp->index;
8657 }
8658
8659 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8660 {
8661         struct hclge_vport *vport = hclge_get_vport(handle);
8662         struct hclge_dev *hdev = vport->back;
8663         int reset_try_times = 0;
8664         int reset_status;
8665         u16 queue_gid;
8666         int ret;
8667
8668         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8669
8670         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8671         if (ret) {
8672                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8673                 return ret;
8674         }
8675
8676         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8677         if (ret) {
8678                 dev_err(&hdev->pdev->dev,
8679                         "Send reset tqp cmd fail, ret = %d\n", ret);
8680                 return ret;
8681         }
8682
8683         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8684                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8685                 if (reset_status)
8686                         break;
8687
8688                 /* Wait for tqp hw reset */
8689                 usleep_range(1000, 1200);
8690         }
8691
8692         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8693                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8694                 return ret;
8695         }
8696
8697         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8698         if (ret)
8699                 dev_err(&hdev->pdev->dev,
8700                         "Deassert the soft reset fail, ret = %d\n", ret);
8701
8702         return ret;
8703 }
8704
8705 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8706 {
8707         struct hclge_dev *hdev = vport->back;
8708         int reset_try_times = 0;
8709         int reset_status;
8710         u16 queue_gid;
8711         int ret;
8712
8713         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8714
8715         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8716         if (ret) {
8717                 dev_warn(&hdev->pdev->dev,
8718                          "Send reset tqp cmd fail, ret = %d\n", ret);
8719                 return;
8720         }
8721
8722         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8723                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8724                 if (reset_status)
8725                         break;
8726
8727                 /* Wait for tqp hw reset */
8728                 usleep_range(1000, 1200);
8729         }
8730
8731         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8732                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8733                 return;
8734         }
8735
8736         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8737         if (ret)
8738                 dev_warn(&hdev->pdev->dev,
8739                          "Deassert the soft reset fail, ret = %d\n", ret);
8740 }
8741
8742 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8743 {
8744         struct hclge_vport *vport = hclge_get_vport(handle);
8745         struct hclge_dev *hdev = vport->back;
8746
8747         return hdev->fw_version;
8748 }
8749
8750 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8751 {
8752         struct phy_device *phydev = hdev->hw.mac.phydev;
8753
8754         if (!phydev)
8755                 return;
8756
8757         phy_set_asym_pause(phydev, rx_en, tx_en);
8758 }
8759
8760 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8761 {
8762         int ret;
8763
8764         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8765                 return 0;
8766
8767         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8768         if (ret)
8769                 dev_err(&hdev->pdev->dev,
8770                         "configure pauseparam error, ret = %d.\n", ret);
8771
8772         return ret;
8773 }
8774
8775 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8776 {
8777         struct phy_device *phydev = hdev->hw.mac.phydev;
8778         u16 remote_advertising = 0;
8779         u16 local_advertising;
8780         u32 rx_pause, tx_pause;
8781         u8 flowctl;
8782
8783         if (!phydev->link || !phydev->autoneg)
8784                 return 0;
8785
8786         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8787
8788         if (phydev->pause)
8789                 remote_advertising = LPA_PAUSE_CAP;
8790
8791         if (phydev->asym_pause)
8792                 remote_advertising |= LPA_PAUSE_ASYM;
8793
8794         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8795                                            remote_advertising);
8796         tx_pause = flowctl & FLOW_CTRL_TX;
8797         rx_pause = flowctl & FLOW_CTRL_RX;
8798
8799         if (phydev->duplex == HCLGE_MAC_HALF) {
8800                 tx_pause = 0;
8801                 rx_pause = 0;
8802         }
8803
8804         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8805 }
8806
8807 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8808                                  u32 *rx_en, u32 *tx_en)
8809 {
8810         struct hclge_vport *vport = hclge_get_vport(handle);
8811         struct hclge_dev *hdev = vport->back;
8812         struct phy_device *phydev = hdev->hw.mac.phydev;
8813
8814         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8815
8816         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8817                 *rx_en = 0;
8818                 *tx_en = 0;
8819                 return;
8820         }
8821
8822         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8823                 *rx_en = 1;
8824                 *tx_en = 0;
8825         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8826                 *tx_en = 1;
8827                 *rx_en = 0;
8828         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8829                 *rx_en = 1;
8830                 *tx_en = 1;
8831         } else {
8832                 *rx_en = 0;
8833                 *tx_en = 0;
8834         }
8835 }
8836
8837 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8838                                          u32 rx_en, u32 tx_en)
8839 {
8840         if (rx_en && tx_en)
8841                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8842         else if (rx_en && !tx_en)
8843                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8844         else if (!rx_en && tx_en)
8845                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8846         else
8847                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8848
8849         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8850 }
8851
8852 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8853                                 u32 rx_en, u32 tx_en)
8854 {
8855         struct hclge_vport *vport = hclge_get_vport(handle);
8856         struct hclge_dev *hdev = vport->back;
8857         struct phy_device *phydev = hdev->hw.mac.phydev;
8858         u32 fc_autoneg;
8859
8860         if (phydev) {
8861                 fc_autoneg = hclge_get_autoneg(handle);
8862                 if (auto_neg != fc_autoneg) {
8863                         dev_info(&hdev->pdev->dev,
8864                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8865                         return -EOPNOTSUPP;
8866                 }
8867         }
8868
8869         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8870                 dev_info(&hdev->pdev->dev,
8871                          "Priority flow control enabled. Cannot set link flow control.\n");
8872                 return -EOPNOTSUPP;
8873         }
8874
8875         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8876
8877         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8878
8879         if (!auto_neg)
8880                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8881
8882         if (phydev)
8883                 return phy_start_aneg(phydev);
8884
8885         return -EOPNOTSUPP;
8886 }
8887
8888 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8889                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8890 {
8891         struct hclge_vport *vport = hclge_get_vport(handle);
8892         struct hclge_dev *hdev = vport->back;
8893
8894         if (speed)
8895                 *speed = hdev->hw.mac.speed;
8896         if (duplex)
8897                 *duplex = hdev->hw.mac.duplex;
8898         if (auto_neg)
8899                 *auto_neg = hdev->hw.mac.autoneg;
8900 }
8901
8902 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8903                                  u8 *module_type)
8904 {
8905         struct hclge_vport *vport = hclge_get_vport(handle);
8906         struct hclge_dev *hdev = vport->back;
8907
8908         if (media_type)
8909                 *media_type = hdev->hw.mac.media_type;
8910
8911         if (module_type)
8912                 *module_type = hdev->hw.mac.module_type;
8913 }
8914
8915 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8916                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8917 {
8918         struct hclge_vport *vport = hclge_get_vport(handle);
8919         struct hclge_dev *hdev = vport->back;
8920         struct phy_device *phydev = hdev->hw.mac.phydev;
8921         int mdix_ctrl, mdix, is_resolved;
8922         unsigned int retval;
8923
8924         if (!phydev) {
8925                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8926                 *tp_mdix = ETH_TP_MDI_INVALID;
8927                 return;
8928         }
8929
8930         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8931
8932         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8933         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8934                                     HCLGE_PHY_MDIX_CTRL_S);
8935
8936         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8937         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8938         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8939
8940         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8941
8942         switch (mdix_ctrl) {
8943         case 0x0:
8944                 *tp_mdix_ctrl = ETH_TP_MDI;
8945                 break;
8946         case 0x1:
8947                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8948                 break;
8949         case 0x3:
8950                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8951                 break;
8952         default:
8953                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8954                 break;
8955         }
8956
8957         if (!is_resolved)
8958                 *tp_mdix = ETH_TP_MDI_INVALID;
8959         else if (mdix)
8960                 *tp_mdix = ETH_TP_MDI_X;
8961         else
8962                 *tp_mdix = ETH_TP_MDI;
8963 }
8964
8965 static void hclge_info_show(struct hclge_dev *hdev)
8966 {
8967         struct device *dev = &hdev->pdev->dev;
8968
8969         dev_info(dev, "PF info begin:\n");
8970
8971         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
8972         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
8973         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
8974         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
8975         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
8976         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
8977         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
8978         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
8979         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
8980         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
8981         dev_info(dev, "This is %s PF\n",
8982                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8983         dev_info(dev, "DCB %s\n",
8984                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8985         dev_info(dev, "MQPRIO %s\n",
8986                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8987
8988         dev_info(dev, "PF info end.\n");
8989 }
8990
8991 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8992                                           struct hclge_vport *vport)
8993 {
8994         struct hnae3_client *client = vport->nic.client;
8995         struct hclge_dev *hdev = ae_dev->priv;
8996         int rst_cnt = hdev->rst_stats.reset_cnt;
8997         int ret;
8998
8999         ret = client->ops->init_instance(&vport->nic);
9000         if (ret)
9001                 return ret;
9002
9003         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9004         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9005             rst_cnt != hdev->rst_stats.reset_cnt) {
9006                 ret = -EBUSY;
9007                 goto init_nic_err;
9008         }
9009
9010         /* Enable nic hw error interrupts */
9011         ret = hclge_config_nic_hw_error(hdev, true);
9012         if (ret) {
9013                 dev_err(&ae_dev->pdev->dev,
9014                         "fail(%d) to enable hw error interrupts\n", ret);
9015                 goto init_nic_err;
9016         }
9017
9018         hnae3_set_client_init_flag(client, ae_dev, 1);
9019
9020         if (netif_msg_drv(&hdev->vport->nic))
9021                 hclge_info_show(hdev);
9022
9023         return ret;
9024
9025 init_nic_err:
9026         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9027         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9028                 msleep(HCLGE_WAIT_RESET_DONE);
9029
9030         client->ops->uninit_instance(&vport->nic, 0);
9031
9032         return ret;
9033 }
9034
9035 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9036                                            struct hclge_vport *vport)
9037 {
9038         struct hnae3_client *client = vport->roce.client;
9039         struct hclge_dev *hdev = ae_dev->priv;
9040         int rst_cnt;
9041         int ret;
9042
9043         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9044             !hdev->nic_client)
9045                 return 0;
9046
9047         client = hdev->roce_client;
9048         ret = hclge_init_roce_base_info(vport);
9049         if (ret)
9050                 return ret;
9051
9052         rst_cnt = hdev->rst_stats.reset_cnt;
9053         ret = client->ops->init_instance(&vport->roce);
9054         if (ret)
9055                 return ret;
9056
9057         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9058         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9059             rst_cnt != hdev->rst_stats.reset_cnt) {
9060                 ret = -EBUSY;
9061                 goto init_roce_err;
9062         }
9063
9064         /* Enable roce ras interrupts */
9065         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9066         if (ret) {
9067                 dev_err(&ae_dev->pdev->dev,
9068                         "fail(%d) to enable roce ras interrupts\n", ret);
9069                 goto init_roce_err;
9070         }
9071
9072         hnae3_set_client_init_flag(client, ae_dev, 1);
9073
9074         return 0;
9075
9076 init_roce_err:
9077         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9078         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9079                 msleep(HCLGE_WAIT_RESET_DONE);
9080
9081         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9082
9083         return ret;
9084 }
9085
9086 static int hclge_init_client_instance(struct hnae3_client *client,
9087                                       struct hnae3_ae_dev *ae_dev)
9088 {
9089         struct hclge_dev *hdev = ae_dev->priv;
9090         struct hclge_vport *vport;
9091         int i, ret;
9092
9093         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9094                 vport = &hdev->vport[i];
9095
9096                 switch (client->type) {
9097                 case HNAE3_CLIENT_KNIC:
9098                         hdev->nic_client = client;
9099                         vport->nic.client = client;
9100                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9101                         if (ret)
9102                                 goto clear_nic;
9103
9104                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9105                         if (ret)
9106                                 goto clear_roce;
9107
9108                         break;
9109                 case HNAE3_CLIENT_ROCE:
9110                         if (hnae3_dev_roce_supported(hdev)) {
9111                                 hdev->roce_client = client;
9112                                 vport->roce.client = client;
9113                         }
9114
9115                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9116                         if (ret)
9117                                 goto clear_roce;
9118
9119                         break;
9120                 default:
9121                         return -EINVAL;
9122                 }
9123         }
9124
9125         return 0;
9126
9127 clear_nic:
9128         hdev->nic_client = NULL;
9129         vport->nic.client = NULL;
9130         return ret;
9131 clear_roce:
9132         hdev->roce_client = NULL;
9133         vport->roce.client = NULL;
9134         return ret;
9135 }
9136
9137 static void hclge_uninit_client_instance(struct hnae3_client *client,
9138                                          struct hnae3_ae_dev *ae_dev)
9139 {
9140         struct hclge_dev *hdev = ae_dev->priv;
9141         struct hclge_vport *vport;
9142         int i;
9143
9144         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9145                 vport = &hdev->vport[i];
9146                 if (hdev->roce_client) {
9147                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9148                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9149                                 msleep(HCLGE_WAIT_RESET_DONE);
9150
9151                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9152                                                                 0);
9153                         hdev->roce_client = NULL;
9154                         vport->roce.client = NULL;
9155                 }
9156                 if (client->type == HNAE3_CLIENT_ROCE)
9157                         return;
9158                 if (hdev->nic_client && client->ops->uninit_instance) {
9159                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9160                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9161                                 msleep(HCLGE_WAIT_RESET_DONE);
9162
9163                         client->ops->uninit_instance(&vport->nic, 0);
9164                         hdev->nic_client = NULL;
9165                         vport->nic.client = NULL;
9166                 }
9167         }
9168 }
9169
9170 static int hclge_pci_init(struct hclge_dev *hdev)
9171 {
9172         struct pci_dev *pdev = hdev->pdev;
9173         struct hclge_hw *hw;
9174         int ret;
9175
9176         ret = pci_enable_device(pdev);
9177         if (ret) {
9178                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9179                 return ret;
9180         }
9181
9182         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9183         if (ret) {
9184                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9185                 if (ret) {
9186                         dev_err(&pdev->dev,
9187                                 "can't set consistent PCI DMA");
9188                         goto err_disable_device;
9189                 }
9190                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9191         }
9192
9193         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9194         if (ret) {
9195                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9196                 goto err_disable_device;
9197         }
9198
9199         pci_set_master(pdev);
9200         hw = &hdev->hw;
9201         hw->io_base = pcim_iomap(pdev, 2, 0);
9202         if (!hw->io_base) {
9203                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9204                 ret = -ENOMEM;
9205                 goto err_clr_master;
9206         }
9207
9208         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9209
9210         return 0;
9211 err_clr_master:
9212         pci_clear_master(pdev);
9213         pci_release_regions(pdev);
9214 err_disable_device:
9215         pci_disable_device(pdev);
9216
9217         return ret;
9218 }
9219
9220 static void hclge_pci_uninit(struct hclge_dev *hdev)
9221 {
9222         struct pci_dev *pdev = hdev->pdev;
9223
9224         pcim_iounmap(pdev, hdev->hw.io_base);
9225         pci_free_irq_vectors(pdev);
9226         pci_clear_master(pdev);
9227         pci_release_mem_regions(pdev);
9228         pci_disable_device(pdev);
9229 }
9230
9231 static void hclge_state_init(struct hclge_dev *hdev)
9232 {
9233         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9234         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9235         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9236         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9237         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9238         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9239 }
9240
9241 static void hclge_state_uninit(struct hclge_dev *hdev)
9242 {
9243         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9244         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9245
9246         if (hdev->reset_timer.function)
9247                 del_timer_sync(&hdev->reset_timer);
9248         if (hdev->service_task.work.func)
9249                 cancel_delayed_work_sync(&hdev->service_task);
9250         if (hdev->rst_service_task.func)
9251                 cancel_work_sync(&hdev->rst_service_task);
9252         if (hdev->mbx_service_task.func)
9253                 cancel_work_sync(&hdev->mbx_service_task);
9254 }
9255
9256 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9257 {
9258 #define HCLGE_FLR_WAIT_MS       100
9259 #define HCLGE_FLR_WAIT_CNT      50
9260         struct hclge_dev *hdev = ae_dev->priv;
9261         int cnt = 0;
9262
9263         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9264         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9265         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9266         hclge_reset_event(hdev->pdev, NULL);
9267
9268         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9269                cnt++ < HCLGE_FLR_WAIT_CNT)
9270                 msleep(HCLGE_FLR_WAIT_MS);
9271
9272         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9273                 dev_err(&hdev->pdev->dev,
9274                         "flr wait down timeout: %d\n", cnt);
9275 }
9276
9277 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9278 {
9279         struct hclge_dev *hdev = ae_dev->priv;
9280
9281         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9282 }
9283
9284 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9285 {
9286         u16 i;
9287
9288         for (i = 0; i < hdev->num_alloc_vport; i++) {
9289                 struct hclge_vport *vport = &hdev->vport[i];
9290                 int ret;
9291
9292                  /* Send cmd to clear VF's FUNC_RST_ING */
9293                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9294                 if (ret)
9295                         dev_warn(&hdev->pdev->dev,
9296                                  "clear vf(%u) rst failed %d!\n",
9297                                  vport->vport_id, ret);
9298         }
9299 }
9300
9301 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9302 {
9303         struct pci_dev *pdev = ae_dev->pdev;
9304         struct hclge_dev *hdev;
9305         int ret;
9306
9307         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9308         if (!hdev) {
9309                 ret = -ENOMEM;
9310                 goto out;
9311         }
9312
9313         hdev->pdev = pdev;
9314         hdev->ae_dev = ae_dev;
9315         hdev->reset_type = HNAE3_NONE_RESET;
9316         hdev->reset_level = HNAE3_FUNC_RESET;
9317         ae_dev->priv = hdev;
9318
9319         /* HW supprt 2 layer vlan */
9320         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9321
9322         mutex_init(&hdev->vport_lock);
9323         mutex_init(&hdev->vport_cfg_mutex);
9324         spin_lock_init(&hdev->fd_rule_lock);
9325
9326         ret = hclge_pci_init(hdev);
9327         if (ret) {
9328                 dev_err(&pdev->dev, "PCI init failed\n");
9329                 goto out;
9330         }
9331
9332         /* Firmware command queue initialize */
9333         ret = hclge_cmd_queue_init(hdev);
9334         if (ret) {
9335                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9336                 goto err_pci_uninit;
9337         }
9338
9339         /* Firmware command initialize */
9340         ret = hclge_cmd_init(hdev);
9341         if (ret)
9342                 goto err_cmd_uninit;
9343
9344         ret = hclge_get_cap(hdev);
9345         if (ret) {
9346                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9347                         ret);
9348                 goto err_cmd_uninit;
9349         }
9350
9351         ret = hclge_configure(hdev);
9352         if (ret) {
9353                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9354                 goto err_cmd_uninit;
9355         }
9356
9357         ret = hclge_init_msi(hdev);
9358         if (ret) {
9359                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9360                 goto err_cmd_uninit;
9361         }
9362
9363         ret = hclge_misc_irq_init(hdev);
9364         if (ret) {
9365                 dev_err(&pdev->dev,
9366                         "Misc IRQ(vector0) init error, ret = %d.\n",
9367                         ret);
9368                 goto err_msi_uninit;
9369         }
9370
9371         ret = hclge_alloc_tqps(hdev);
9372         if (ret) {
9373                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9374                 goto err_msi_irq_uninit;
9375         }
9376
9377         ret = hclge_alloc_vport(hdev);
9378         if (ret) {
9379                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9380                 goto err_msi_irq_uninit;
9381         }
9382
9383         ret = hclge_map_tqp(hdev);
9384         if (ret) {
9385                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9386                 goto err_msi_irq_uninit;
9387         }
9388
9389         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9390                 ret = hclge_mac_mdio_config(hdev);
9391                 if (ret) {
9392                         dev_err(&hdev->pdev->dev,
9393                                 "mdio config fail ret=%d\n", ret);
9394                         goto err_msi_irq_uninit;
9395                 }
9396         }
9397
9398         ret = hclge_init_umv_space(hdev);
9399         if (ret) {
9400                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9401                 goto err_mdiobus_unreg;
9402         }
9403
9404         ret = hclge_mac_init(hdev);
9405         if (ret) {
9406                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9407                 goto err_mdiobus_unreg;
9408         }
9409
9410         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9411         if (ret) {
9412                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9413                 goto err_mdiobus_unreg;
9414         }
9415
9416         ret = hclge_config_gro(hdev, true);
9417         if (ret)
9418                 goto err_mdiobus_unreg;
9419
9420         ret = hclge_init_vlan_config(hdev);
9421         if (ret) {
9422                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9423                 goto err_mdiobus_unreg;
9424         }
9425
9426         ret = hclge_tm_schd_init(hdev);
9427         if (ret) {
9428                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9429                 goto err_mdiobus_unreg;
9430         }
9431
9432         hclge_rss_init_cfg(hdev);
9433         ret = hclge_rss_init_hw(hdev);
9434         if (ret) {
9435                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9436                 goto err_mdiobus_unreg;
9437         }
9438
9439         ret = init_mgr_tbl(hdev);
9440         if (ret) {
9441                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9442                 goto err_mdiobus_unreg;
9443         }
9444
9445         ret = hclge_init_fd_config(hdev);
9446         if (ret) {
9447                 dev_err(&pdev->dev,
9448                         "fd table init fail, ret=%d\n", ret);
9449                 goto err_mdiobus_unreg;
9450         }
9451
9452         INIT_KFIFO(hdev->mac_tnl_log);
9453
9454         hclge_dcb_ops_set(hdev);
9455
9456         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9457         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9458         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9459         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9460
9461         /* Setup affinity after service timer setup because add_timer_on
9462          * is called in affinity notify.
9463          */
9464         hclge_misc_affinity_setup(hdev);
9465
9466         hclge_clear_all_event_cause(hdev);
9467         hclge_clear_resetting_state(hdev);
9468
9469         /* Log and clear the hw errors those already occurred */
9470         hclge_handle_all_hns_hw_errors(ae_dev);
9471
9472         /* request delayed reset for the error recovery because an immediate
9473          * global reset on a PF affecting pending initialization of other PFs
9474          */
9475         if (ae_dev->hw_err_reset_req) {
9476                 enum hnae3_reset_type reset_level;
9477
9478                 reset_level = hclge_get_reset_level(ae_dev,
9479                                                     &ae_dev->hw_err_reset_req);
9480                 hclge_set_def_reset_request(ae_dev, reset_level);
9481                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9482         }
9483
9484         /* Enable MISC vector(vector0) */
9485         hclge_enable_vector(&hdev->misc_vector, true);
9486
9487         hclge_state_init(hdev);
9488         hdev->last_reset_time = jiffies;
9489
9490         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9491                  HCLGE_DRIVER_NAME);
9492
9493         return 0;
9494
9495 err_mdiobus_unreg:
9496         if (hdev->hw.mac.phydev)
9497                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9498 err_msi_irq_uninit:
9499         hclge_misc_irq_uninit(hdev);
9500 err_msi_uninit:
9501         pci_free_irq_vectors(pdev);
9502 err_cmd_uninit:
9503         hclge_cmd_uninit(hdev);
9504 err_pci_uninit:
9505         pcim_iounmap(pdev, hdev->hw.io_base);
9506         pci_clear_master(pdev);
9507         pci_release_regions(pdev);
9508         pci_disable_device(pdev);
9509 out:
9510         return ret;
9511 }
9512
9513 static void hclge_stats_clear(struct hclge_dev *hdev)
9514 {
9515         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9516 }
9517
9518 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9519 {
9520         return hclge_config_switch_param(hdev, vf, enable,
9521                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
9522 }
9523
9524 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9525 {
9526         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9527                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
9528                                           enable, vf);
9529 }
9530
9531 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9532 {
9533         int ret;
9534
9535         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9536         if (ret) {
9537                 dev_err(&hdev->pdev->dev,
9538                         "Set vf %d mac spoof check %s failed, ret=%d\n",
9539                         vf, enable ? "on" : "off", ret);
9540                 return ret;
9541         }
9542
9543         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9544         if (ret)
9545                 dev_err(&hdev->pdev->dev,
9546                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
9547                         vf, enable ? "on" : "off", ret);
9548
9549         return ret;
9550 }
9551
9552 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9553                                  bool enable)
9554 {
9555         struct hclge_vport *vport = hclge_get_vport(handle);
9556         struct hclge_dev *hdev = vport->back;
9557         u32 new_spoofchk = enable ? 1 : 0;
9558         int ret;
9559
9560         if (hdev->pdev->revision == 0x20)
9561                 return -EOPNOTSUPP;
9562
9563         vport = hclge_get_vf_vport(hdev, vf);
9564         if (!vport)
9565                 return -EINVAL;
9566
9567         if (vport->vf_info.spoofchk == new_spoofchk)
9568                 return 0;
9569
9570         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9571                 dev_warn(&hdev->pdev->dev,
9572                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9573                          vf);
9574         else if (enable && hclge_is_umv_space_full(vport))
9575                 dev_warn(&hdev->pdev->dev,
9576                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9577                          vf);
9578
9579         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9580         if (ret)
9581                 return ret;
9582
9583         vport->vf_info.spoofchk = new_spoofchk;
9584         return 0;
9585 }
9586
9587 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9588 {
9589         struct hclge_vport *vport = hdev->vport;
9590         int ret;
9591         int i;
9592
9593         if (hdev->pdev->revision == 0x20)
9594                 return 0;
9595
9596         /* resume the vf spoof check state after reset */
9597         for (i = 0; i < hdev->num_alloc_vport; i++) {
9598                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9599                                                vport->vf_info.spoofchk);
9600                 if (ret)
9601                         return ret;
9602
9603                 vport++;
9604         }
9605
9606         return 0;
9607 }
9608
9609 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9610 {
9611         struct hclge_vport *vport = hclge_get_vport(handle);
9612         struct hclge_dev *hdev = vport->back;
9613         u32 new_trusted = enable ? 1 : 0;
9614         bool en_bc_pmc;
9615         int ret;
9616
9617         vport = hclge_get_vf_vport(hdev, vf);
9618         if (!vport)
9619                 return -EINVAL;
9620
9621         if (vport->vf_info.trusted == new_trusted)
9622                 return 0;
9623
9624         /* Disable promisc mode for VF if it is not trusted any more. */
9625         if (!enable && vport->vf_info.promisc_enable) {
9626                 en_bc_pmc = hdev->pdev->revision != 0x20;
9627                 ret = hclge_set_vport_promisc_mode(vport, false, false,
9628                                                    en_bc_pmc);
9629                 if (ret)
9630                         return ret;
9631                 vport->vf_info.promisc_enable = 0;
9632                 hclge_inform_vf_promisc_info(vport);
9633         }
9634
9635         vport->vf_info.trusted = new_trusted;
9636
9637         return 0;
9638 }
9639
9640 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9641 {
9642         int ret;
9643         int vf;
9644
9645         /* reset vf rate to default value */
9646         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9647                 struct hclge_vport *vport = &hdev->vport[vf];
9648
9649                 vport->vf_info.max_tx_rate = 0;
9650                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9651                 if (ret)
9652                         dev_err(&hdev->pdev->dev,
9653                                 "vf%d failed to reset to default, ret=%d\n",
9654                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9655         }
9656 }
9657
9658 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9659                                      int min_tx_rate, int max_tx_rate)
9660 {
9661         if (min_tx_rate != 0 ||
9662             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9663                 dev_err(&hdev->pdev->dev,
9664                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9665                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9666                 return -EINVAL;
9667         }
9668
9669         return 0;
9670 }
9671
9672 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9673                              int min_tx_rate, int max_tx_rate, bool force)
9674 {
9675         struct hclge_vport *vport = hclge_get_vport(handle);
9676         struct hclge_dev *hdev = vport->back;
9677         int ret;
9678
9679         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9680         if (ret)
9681                 return ret;
9682
9683         vport = hclge_get_vf_vport(hdev, vf);
9684         if (!vport)
9685                 return -EINVAL;
9686
9687         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9688                 return 0;
9689
9690         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9691         if (ret)
9692                 return ret;
9693
9694         vport->vf_info.max_tx_rate = max_tx_rate;
9695
9696         return 0;
9697 }
9698
9699 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9700 {
9701         struct hnae3_handle *handle = &hdev->vport->nic;
9702         struct hclge_vport *vport;
9703         int ret;
9704         int vf;
9705
9706         /* resume the vf max_tx_rate after reset */
9707         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9708                 vport = hclge_get_vf_vport(hdev, vf);
9709                 if (!vport)
9710                         return -EINVAL;
9711
9712                 /* zero means max rate, after reset, firmware already set it to
9713                  * max rate, so just continue.
9714                  */
9715                 if (!vport->vf_info.max_tx_rate)
9716                         continue;
9717
9718                 ret = hclge_set_vf_rate(handle, vf, 0,
9719                                         vport->vf_info.max_tx_rate, true);
9720                 if (ret) {
9721                         dev_err(&hdev->pdev->dev,
9722                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9723                                 vf, vport->vf_info.max_tx_rate, ret);
9724                         return ret;
9725                 }
9726         }
9727
9728         return 0;
9729 }
9730
9731 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9732 {
9733         struct hclge_vport *vport = hdev->vport;
9734         int i;
9735
9736         for (i = 0; i < hdev->num_alloc_vport; i++) {
9737                 hclge_vport_stop(vport);
9738                 vport++;
9739         }
9740 }
9741
9742 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9743 {
9744         struct hclge_dev *hdev = ae_dev->priv;
9745         struct pci_dev *pdev = ae_dev->pdev;
9746         int ret;
9747
9748         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9749
9750         hclge_stats_clear(hdev);
9751         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9752         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9753
9754         ret = hclge_cmd_init(hdev);
9755         if (ret) {
9756                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9757                 return ret;
9758         }
9759
9760         ret = hclge_map_tqp(hdev);
9761         if (ret) {
9762                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9763                 return ret;
9764         }
9765
9766         hclge_reset_umv_space(hdev);
9767
9768         ret = hclge_mac_init(hdev);
9769         if (ret) {
9770                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9771                 return ret;
9772         }
9773
9774         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9775         if (ret) {
9776                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9777                 return ret;
9778         }
9779
9780         ret = hclge_config_gro(hdev, true);
9781         if (ret)
9782                 return ret;
9783
9784         ret = hclge_init_vlan_config(hdev);
9785         if (ret) {
9786                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9787                 return ret;
9788         }
9789
9790         ret = hclge_tm_init_hw(hdev, true);
9791         if (ret) {
9792                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9793                 return ret;
9794         }
9795
9796         ret = hclge_rss_init_hw(hdev);
9797         if (ret) {
9798                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9799                 return ret;
9800         }
9801
9802         ret = hclge_init_fd_config(hdev);
9803         if (ret) {
9804                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9805                 return ret;
9806         }
9807
9808         /* Log and clear the hw errors those already occurred */
9809         hclge_handle_all_hns_hw_errors(ae_dev);
9810
9811         /* Re-enable the hw error interrupts because
9812          * the interrupts get disabled on global reset.
9813          */
9814         ret = hclge_config_nic_hw_error(hdev, true);
9815         if (ret) {
9816                 dev_err(&pdev->dev,
9817                         "fail(%d) to re-enable NIC hw error interrupts\n",
9818                         ret);
9819                 return ret;
9820         }
9821
9822         if (hdev->roce_client) {
9823                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9824                 if (ret) {
9825                         dev_err(&pdev->dev,
9826                                 "fail(%d) to re-enable roce ras interrupts\n",
9827                                 ret);
9828                         return ret;
9829                 }
9830         }
9831
9832         hclge_reset_vport_state(hdev);
9833         ret = hclge_reset_vport_spoofchk(hdev);
9834         if (ret)
9835                 return ret;
9836
9837         ret = hclge_resume_vf_rate(hdev);
9838         if (ret)
9839                 return ret;
9840
9841         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9842                  HCLGE_DRIVER_NAME);
9843
9844         return 0;
9845 }
9846
9847 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9848 {
9849         struct hclge_dev *hdev = ae_dev->priv;
9850         struct hclge_mac *mac = &hdev->hw.mac;
9851
9852         hclge_reset_vf_rate(hdev);
9853         hclge_misc_affinity_teardown(hdev);
9854         hclge_state_uninit(hdev);
9855
9856         if (mac->phydev)
9857                 mdiobus_unregister(mac->mdio_bus);
9858
9859         hclge_uninit_umv_space(hdev);
9860
9861         /* Disable MISC vector(vector0) */
9862         hclge_enable_vector(&hdev->misc_vector, false);
9863         synchronize_irq(hdev->misc_vector.vector_irq);
9864
9865         /* Disable all hw interrupts */
9866         hclge_config_mac_tnl_int(hdev, false);
9867         hclge_config_nic_hw_error(hdev, false);
9868         hclge_config_rocee_ras_interrupt(hdev, false);
9869
9870         hclge_cmd_uninit(hdev);
9871         hclge_misc_irq_uninit(hdev);
9872         hclge_pci_uninit(hdev);
9873         mutex_destroy(&hdev->vport_lock);
9874         hclge_uninit_vport_mac_table(hdev);
9875         hclge_uninit_vport_vlan_table(hdev);
9876         mutex_destroy(&hdev->vport_cfg_mutex);
9877         ae_dev->priv = NULL;
9878 }
9879
9880 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9881 {
9882         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9883         struct hclge_vport *vport = hclge_get_vport(handle);
9884         struct hclge_dev *hdev = vport->back;
9885
9886         return min_t(u32, hdev->rss_size_max,
9887                      vport->alloc_tqps / kinfo->num_tc);
9888 }
9889
9890 static void hclge_get_channels(struct hnae3_handle *handle,
9891                                struct ethtool_channels *ch)
9892 {
9893         ch->max_combined = hclge_get_max_channels(handle);
9894         ch->other_count = 1;
9895         ch->max_other = 1;
9896         ch->combined_count = handle->kinfo.rss_size;
9897 }
9898
9899 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9900                                         u16 *alloc_tqps, u16 *max_rss_size)
9901 {
9902         struct hclge_vport *vport = hclge_get_vport(handle);
9903         struct hclge_dev *hdev = vport->back;
9904
9905         *alloc_tqps = vport->alloc_tqps;
9906         *max_rss_size = hdev->rss_size_max;
9907 }
9908
9909 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9910                               bool rxfh_configured)
9911 {
9912         struct hclge_vport *vport = hclge_get_vport(handle);
9913         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9914         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9915         struct hclge_dev *hdev = vport->back;
9916         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9917         u16 cur_rss_size = kinfo->rss_size;
9918         u16 cur_tqps = kinfo->num_tqps;
9919         u16 tc_valid[HCLGE_MAX_TC_NUM];
9920         u16 roundup_size;
9921         u32 *rss_indir;
9922         unsigned int i;
9923         int ret;
9924
9925         kinfo->req_rss_size = new_tqps_num;
9926
9927         ret = hclge_tm_vport_map_update(hdev);
9928         if (ret) {
9929                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9930                 return ret;
9931         }
9932
9933         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9934         roundup_size = ilog2(roundup_size);
9935         /* Set the RSS TC mode according to the new RSS size */
9936         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9937                 tc_valid[i] = 0;
9938
9939                 if (!(hdev->hw_tc_map & BIT(i)))
9940                         continue;
9941
9942                 tc_valid[i] = 1;
9943                 tc_size[i] = roundup_size;
9944                 tc_offset[i] = kinfo->rss_size * i;
9945         }
9946         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9947         if (ret)
9948                 return ret;
9949
9950         /* RSS indirection table has been configuared by user */
9951         if (rxfh_configured)
9952                 goto out;
9953
9954         /* Reinitializes the rss indirect table according to the new RSS size */
9955         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9956         if (!rss_indir)
9957                 return -ENOMEM;
9958
9959         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9960                 rss_indir[i] = i % kinfo->rss_size;
9961
9962         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9963         if (ret)
9964                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9965                         ret);
9966
9967         kfree(rss_indir);
9968
9969 out:
9970         if (!ret)
9971                 dev_info(&hdev->pdev->dev,
9972                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
9973                          cur_rss_size, kinfo->rss_size,
9974                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
9975
9976         return ret;
9977 }
9978
9979 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9980                               u32 *regs_num_64_bit)
9981 {
9982         struct hclge_desc desc;
9983         u32 total_num;
9984         int ret;
9985
9986         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9987         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9988         if (ret) {
9989                 dev_err(&hdev->pdev->dev,
9990                         "Query register number cmd failed, ret = %d.\n", ret);
9991                 return ret;
9992         }
9993
9994         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9995         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9996
9997         total_num = *regs_num_32_bit + *regs_num_64_bit;
9998         if (!total_num)
9999                 return -EINVAL;
10000
10001         return 0;
10002 }
10003
10004 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10005                                  void *data)
10006 {
10007 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10008 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10009
10010         struct hclge_desc *desc;
10011         u32 *reg_val = data;
10012         __le32 *desc_data;
10013         int nodata_num;
10014         int cmd_num;
10015         int i, k, n;
10016         int ret;
10017
10018         if (regs_num == 0)
10019                 return 0;
10020
10021         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10022         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10023                                HCLGE_32_BIT_REG_RTN_DATANUM);
10024         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10025         if (!desc)
10026                 return -ENOMEM;
10027
10028         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10029         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10030         if (ret) {
10031                 dev_err(&hdev->pdev->dev,
10032                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10033                 kfree(desc);
10034                 return ret;
10035         }
10036
10037         for (i = 0; i < cmd_num; i++) {
10038                 if (i == 0) {
10039                         desc_data = (__le32 *)(&desc[i].data[0]);
10040                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10041                 } else {
10042                         desc_data = (__le32 *)(&desc[i]);
10043                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10044                 }
10045                 for (k = 0; k < n; k++) {
10046                         *reg_val++ = le32_to_cpu(*desc_data++);
10047
10048                         regs_num--;
10049                         if (!regs_num)
10050                                 break;
10051                 }
10052         }
10053
10054         kfree(desc);
10055         return 0;
10056 }
10057
10058 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10059                                  void *data)
10060 {
10061 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10062 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10063
10064         struct hclge_desc *desc;
10065         u64 *reg_val = data;
10066         __le64 *desc_data;
10067         int nodata_len;
10068         int cmd_num;
10069         int i, k, n;
10070         int ret;
10071
10072         if (regs_num == 0)
10073                 return 0;
10074
10075         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10076         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10077                                HCLGE_64_BIT_REG_RTN_DATANUM);
10078         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10079         if (!desc)
10080                 return -ENOMEM;
10081
10082         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10083         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10084         if (ret) {
10085                 dev_err(&hdev->pdev->dev,
10086                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10087                 kfree(desc);
10088                 return ret;
10089         }
10090
10091         for (i = 0; i < cmd_num; i++) {
10092                 if (i == 0) {
10093                         desc_data = (__le64 *)(&desc[i].data[0]);
10094                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10095                 } else {
10096                         desc_data = (__le64 *)(&desc[i]);
10097                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10098                 }
10099                 for (k = 0; k < n; k++) {
10100                         *reg_val++ = le64_to_cpu(*desc_data++);
10101
10102                         regs_num--;
10103                         if (!regs_num)
10104                                 break;
10105                 }
10106         }
10107
10108         kfree(desc);
10109         return 0;
10110 }
10111
10112 #define MAX_SEPARATE_NUM        4
10113 #define SEPARATOR_VALUE         0xFDFCFBFA
10114 #define REG_NUM_PER_LINE        4
10115 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10116 #define REG_SEPARATOR_LINE      1
10117 #define REG_NUM_REMAIN_MASK     3
10118 #define BD_LIST_MAX_NUM         30
10119
10120 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10121 {
10122         /*prepare 4 commands to query DFX BD number*/
10123         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10124         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10125         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10126         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10127         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10128         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10129         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10130
10131         return hclge_cmd_send(&hdev->hw, desc, 4);
10132 }
10133
10134 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10135                                     int *bd_num_list,
10136                                     u32 type_num)
10137 {
10138 #define HCLGE_DFX_REG_BD_NUM    4
10139
10140         u32 entries_per_desc, desc_index, index, offset, i;
10141         struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10142         int ret;
10143
10144         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10145         if (ret) {
10146                 dev_err(&hdev->pdev->dev,
10147                         "Get dfx bd num fail, status is %d.\n", ret);
10148                 return ret;
10149         }
10150
10151         entries_per_desc = ARRAY_SIZE(desc[0].data);
10152         for (i = 0; i < type_num; i++) {
10153                 offset = hclge_dfx_bd_offset_list[i];
10154                 index = offset % entries_per_desc;
10155                 desc_index = offset / entries_per_desc;
10156                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10157         }
10158
10159         return ret;
10160 }
10161
10162 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10163                                   struct hclge_desc *desc_src, int bd_num,
10164                                   enum hclge_opcode_type cmd)
10165 {
10166         struct hclge_desc *desc = desc_src;
10167         int i, ret;
10168
10169         hclge_cmd_setup_basic_desc(desc, cmd, true);
10170         for (i = 0; i < bd_num - 1; i++) {
10171                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10172                 desc++;
10173                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10174         }
10175
10176         desc = desc_src;
10177         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10178         if (ret)
10179                 dev_err(&hdev->pdev->dev,
10180                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10181                         cmd, ret);
10182
10183         return ret;
10184 }
10185
10186 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10187                                     void *data)
10188 {
10189         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10190         struct hclge_desc *desc = desc_src;
10191         u32 *reg = data;
10192
10193         entries_per_desc = ARRAY_SIZE(desc->data);
10194         reg_num = entries_per_desc * bd_num;
10195         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10196         for (i = 0; i < reg_num; i++) {
10197                 index = i % entries_per_desc;
10198                 desc_index = i / entries_per_desc;
10199                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10200         }
10201         for (i = 0; i < separator_num; i++)
10202                 *reg++ = SEPARATOR_VALUE;
10203
10204         return reg_num + separator_num;
10205 }
10206
10207 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10208 {
10209         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10210         int data_len_per_desc, data_len, bd_num, i;
10211         int bd_num_list[BD_LIST_MAX_NUM];
10212         int ret;
10213
10214         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10215         if (ret) {
10216                 dev_err(&hdev->pdev->dev,
10217                         "Get dfx reg bd num fail, status is %d.\n", ret);
10218                 return ret;
10219         }
10220
10221         data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
10222         *len = 0;
10223         for (i = 0; i < dfx_reg_type_num; i++) {
10224                 bd_num = bd_num_list[i];
10225                 data_len = data_len_per_desc * bd_num;
10226                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10227         }
10228
10229         return ret;
10230 }
10231
10232 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10233 {
10234         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10235         int bd_num, bd_num_max, buf_len, i;
10236         int bd_num_list[BD_LIST_MAX_NUM];
10237         struct hclge_desc *desc_src;
10238         u32 *reg = data;
10239         int ret;
10240
10241         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10242         if (ret) {
10243                 dev_err(&hdev->pdev->dev,
10244                         "Get dfx reg bd num fail, status is %d.\n", ret);
10245                 return ret;
10246         }
10247
10248         bd_num_max = bd_num_list[0];
10249         for (i = 1; i < dfx_reg_type_num; i++)
10250                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10251
10252         buf_len = sizeof(*desc_src) * bd_num_max;
10253         desc_src = kzalloc(buf_len, GFP_KERNEL);
10254         if (!desc_src) {
10255                 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10256                 return -ENOMEM;
10257         }
10258
10259         for (i = 0; i < dfx_reg_type_num; i++) {
10260                 bd_num = bd_num_list[i];
10261                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10262                                              hclge_dfx_reg_opcode_list[i]);
10263                 if (ret) {
10264                         dev_err(&hdev->pdev->dev,
10265                                 "Get dfx reg fail, status is %d.\n", ret);
10266                         break;
10267                 }
10268
10269                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10270         }
10271
10272         kfree(desc_src);
10273         return ret;
10274 }
10275
10276 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10277                               struct hnae3_knic_private_info *kinfo)
10278 {
10279 #define HCLGE_RING_REG_OFFSET           0x200
10280 #define HCLGE_RING_INT_REG_OFFSET       0x4
10281
10282         int i, j, reg_num, separator_num;
10283         int data_num_sum;
10284         u32 *reg = data;
10285
10286         /* fetching per-PF registers valus from PF PCIe register space */
10287         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10288         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10289         for (i = 0; i < reg_num; i++)
10290                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10291         for (i = 0; i < separator_num; i++)
10292                 *reg++ = SEPARATOR_VALUE;
10293         data_num_sum = reg_num + separator_num;
10294
10295         reg_num = ARRAY_SIZE(common_reg_addr_list);
10296         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10297         for (i = 0; i < reg_num; i++)
10298                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10299         for (i = 0; i < separator_num; i++)
10300                 *reg++ = SEPARATOR_VALUE;
10301         data_num_sum += reg_num + separator_num;
10302
10303         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10304         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10305         for (j = 0; j < kinfo->num_tqps; j++) {
10306                 for (i = 0; i < reg_num; i++)
10307                         *reg++ = hclge_read_dev(&hdev->hw,
10308                                                 ring_reg_addr_list[i] +
10309                                                 HCLGE_RING_REG_OFFSET * j);
10310                 for (i = 0; i < separator_num; i++)
10311                         *reg++ = SEPARATOR_VALUE;
10312         }
10313         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10314
10315         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10316         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10317         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10318                 for (i = 0; i < reg_num; i++)
10319                         *reg++ = hclge_read_dev(&hdev->hw,
10320                                                 tqp_intr_reg_addr_list[i] +
10321                                                 HCLGE_RING_INT_REG_OFFSET * j);
10322                 for (i = 0; i < separator_num; i++)
10323                         *reg++ = SEPARATOR_VALUE;
10324         }
10325         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10326
10327         return data_num_sum;
10328 }
10329
10330 static int hclge_get_regs_len(struct hnae3_handle *handle)
10331 {
10332         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10333         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10334         struct hclge_vport *vport = hclge_get_vport(handle);
10335         struct hclge_dev *hdev = vport->back;
10336         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10337         int regs_lines_32_bit, regs_lines_64_bit;
10338         int ret;
10339
10340         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10341         if (ret) {
10342                 dev_err(&hdev->pdev->dev,
10343                         "Get register number failed, ret = %d.\n", ret);
10344                 return ret;
10345         }
10346
10347         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10348         if (ret) {
10349                 dev_err(&hdev->pdev->dev,
10350                         "Get dfx reg len failed, ret = %d.\n", ret);
10351                 return ret;
10352         }
10353
10354         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10355                 REG_SEPARATOR_LINE;
10356         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10357                 REG_SEPARATOR_LINE;
10358         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10359                 REG_SEPARATOR_LINE;
10360         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10361                 REG_SEPARATOR_LINE;
10362         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10363                 REG_SEPARATOR_LINE;
10364         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10365                 REG_SEPARATOR_LINE;
10366
10367         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10368                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10369                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10370 }
10371
10372 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10373                            void *data)
10374 {
10375         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10376         struct hclge_vport *vport = hclge_get_vport(handle);
10377         struct hclge_dev *hdev = vport->back;
10378         u32 regs_num_32_bit, regs_num_64_bit;
10379         int i, reg_num, separator_num, ret;
10380         u32 *reg = data;
10381
10382         *version = hdev->fw_version;
10383
10384         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10385         if (ret) {
10386                 dev_err(&hdev->pdev->dev,
10387                         "Get register number failed, ret = %d.\n", ret);
10388                 return;
10389         }
10390
10391         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10392
10393         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10394         if (ret) {
10395                 dev_err(&hdev->pdev->dev,
10396                         "Get 32 bit register failed, ret = %d.\n", ret);
10397                 return;
10398         }
10399         reg_num = regs_num_32_bit;
10400         reg += reg_num;
10401         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10402         for (i = 0; i < separator_num; i++)
10403                 *reg++ = SEPARATOR_VALUE;
10404
10405         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10406         if (ret) {
10407                 dev_err(&hdev->pdev->dev,
10408                         "Get 64 bit register failed, ret = %d.\n", ret);
10409                 return;
10410         }
10411         reg_num = regs_num_64_bit * 2;
10412         reg += reg_num;
10413         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10414         for (i = 0; i < separator_num; i++)
10415                 *reg++ = SEPARATOR_VALUE;
10416
10417         ret = hclge_get_dfx_reg(hdev, reg);
10418         if (ret)
10419                 dev_err(&hdev->pdev->dev,
10420                         "Get dfx register failed, ret = %d.\n", ret);
10421 }
10422
10423 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10424 {
10425         struct hclge_set_led_state_cmd *req;
10426         struct hclge_desc desc;
10427         int ret;
10428
10429         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10430
10431         req = (struct hclge_set_led_state_cmd *)desc.data;
10432         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10433                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10434
10435         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10436         if (ret)
10437                 dev_err(&hdev->pdev->dev,
10438                         "Send set led state cmd error, ret =%d\n", ret);
10439
10440         return ret;
10441 }
10442
10443 enum hclge_led_status {
10444         HCLGE_LED_OFF,
10445         HCLGE_LED_ON,
10446         HCLGE_LED_NO_CHANGE = 0xFF,
10447 };
10448
10449 static int hclge_set_led_id(struct hnae3_handle *handle,
10450                             enum ethtool_phys_id_state status)
10451 {
10452         struct hclge_vport *vport = hclge_get_vport(handle);
10453         struct hclge_dev *hdev = vport->back;
10454
10455         switch (status) {
10456         case ETHTOOL_ID_ACTIVE:
10457                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10458         case ETHTOOL_ID_INACTIVE:
10459                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10460         default:
10461                 return -EINVAL;
10462         }
10463 }
10464
10465 static void hclge_get_link_mode(struct hnae3_handle *handle,
10466                                 unsigned long *supported,
10467                                 unsigned long *advertising)
10468 {
10469         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10470         struct hclge_vport *vport = hclge_get_vport(handle);
10471         struct hclge_dev *hdev = vport->back;
10472         unsigned int idx = 0;
10473
10474         for (; idx < size; idx++) {
10475                 supported[idx] = hdev->hw.mac.supported[idx];
10476                 advertising[idx] = hdev->hw.mac.advertising[idx];
10477         }
10478 }
10479
10480 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10481 {
10482         struct hclge_vport *vport = hclge_get_vport(handle);
10483         struct hclge_dev *hdev = vport->back;
10484
10485         return hclge_config_gro(hdev, enable);
10486 }
10487
10488 static const struct hnae3_ae_ops hclge_ops = {
10489         .init_ae_dev = hclge_init_ae_dev,
10490         .uninit_ae_dev = hclge_uninit_ae_dev,
10491         .flr_prepare = hclge_flr_prepare,
10492         .flr_done = hclge_flr_done,
10493         .init_client_instance = hclge_init_client_instance,
10494         .uninit_client_instance = hclge_uninit_client_instance,
10495         .map_ring_to_vector = hclge_map_ring_to_vector,
10496         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10497         .get_vector = hclge_get_vector,
10498         .put_vector = hclge_put_vector,
10499         .set_promisc_mode = hclge_set_promisc_mode,
10500         .set_loopback = hclge_set_loopback,
10501         .start = hclge_ae_start,
10502         .stop = hclge_ae_stop,
10503         .client_start = hclge_client_start,
10504         .client_stop = hclge_client_stop,
10505         .get_status = hclge_get_status,
10506         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10507         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10508         .get_media_type = hclge_get_media_type,
10509         .check_port_speed = hclge_check_port_speed,
10510         .get_fec = hclge_get_fec,
10511         .set_fec = hclge_set_fec,
10512         .get_rss_key_size = hclge_get_rss_key_size,
10513         .get_rss_indir_size = hclge_get_rss_indir_size,
10514         .get_rss = hclge_get_rss,
10515         .set_rss = hclge_set_rss,
10516         .set_rss_tuple = hclge_set_rss_tuple,
10517         .get_rss_tuple = hclge_get_rss_tuple,
10518         .get_tc_size = hclge_get_tc_size,
10519         .get_mac_addr = hclge_get_mac_addr,
10520         .set_mac_addr = hclge_set_mac_addr,
10521         .do_ioctl = hclge_do_ioctl,
10522         .add_uc_addr = hclge_add_uc_addr,
10523         .rm_uc_addr = hclge_rm_uc_addr,
10524         .add_mc_addr = hclge_add_mc_addr,
10525         .rm_mc_addr = hclge_rm_mc_addr,
10526         .set_autoneg = hclge_set_autoneg,
10527         .get_autoneg = hclge_get_autoneg,
10528         .restart_autoneg = hclge_restart_autoneg,
10529         .halt_autoneg = hclge_halt_autoneg,
10530         .get_pauseparam = hclge_get_pauseparam,
10531         .set_pauseparam = hclge_set_pauseparam,
10532         .set_mtu = hclge_set_mtu,
10533         .reset_queue = hclge_reset_tqp,
10534         .get_stats = hclge_get_stats,
10535         .get_mac_stats = hclge_get_mac_stat,
10536         .update_stats = hclge_update_stats,
10537         .get_strings = hclge_get_strings,
10538         .get_sset_count = hclge_get_sset_count,
10539         .get_fw_version = hclge_get_fw_version,
10540         .get_mdix_mode = hclge_get_mdix_mode,
10541         .enable_vlan_filter = hclge_enable_vlan_filter,
10542         .set_vlan_filter = hclge_set_vlan_filter,
10543         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10544         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10545         .reset_event = hclge_reset_event,
10546         .get_reset_level = hclge_get_reset_level,
10547         .set_default_reset_request = hclge_set_def_reset_request,
10548         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10549         .set_channels = hclge_set_channels,
10550         .get_channels = hclge_get_channels,
10551         .get_regs_len = hclge_get_regs_len,
10552         .get_regs = hclge_get_regs,
10553         .set_led_id = hclge_set_led_id,
10554         .get_link_mode = hclge_get_link_mode,
10555         .add_fd_entry = hclge_add_fd_entry,
10556         .del_fd_entry = hclge_del_fd_entry,
10557         .del_all_fd_entries = hclge_del_all_fd_entries,
10558         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10559         .get_fd_rule_info = hclge_get_fd_rule_info,
10560         .get_fd_all_rules = hclge_get_all_rules,
10561         .restore_fd_rules = hclge_restore_fd_entries,
10562         .enable_fd = hclge_enable_fd,
10563         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10564         .dbg_run_cmd = hclge_dbg_run_cmd,
10565         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10566         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10567         .ae_dev_resetting = hclge_ae_dev_resetting,
10568         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10569         .set_gro_en = hclge_gro_en,
10570         .get_global_queue_id = hclge_covert_handle_qid_global,
10571         .set_timer_task = hclge_set_timer_task,
10572         .mac_connect_phy = hclge_mac_connect_phy,
10573         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10574         .restore_vlan_table = hclge_restore_vlan_table,
10575         .get_vf_config = hclge_get_vf_config,
10576         .set_vf_link_state = hclge_set_vf_link_state,
10577         .set_vf_spoofchk = hclge_set_vf_spoofchk,
10578         .set_vf_trust = hclge_set_vf_trust,
10579         .set_vf_rate = hclge_set_vf_rate,
10580         .set_vf_mac = hclge_set_vf_mac,
10581 };
10582
10583 static struct hnae3_ae_algo ae_algo = {
10584         .ops = &hclge_ops,
10585         .pdev_id_table = ae_algo_pci_tbl,
10586 };
10587
10588 static int hclge_init(void)
10589 {
10590         pr_info("%s is initializing\n", HCLGE_NAME);
10591
10592         hnae3_register_ae_algo(&ae_algo);
10593
10594         return 0;
10595 }
10596
10597 static void hclge_exit(void)
10598 {
10599         hnae3_unregister_ae_algo(&ae_algo);
10600 }
10601 module_init(hclge_init);
10602 module_exit(hclge_exit);
10603
10604 MODULE_LICENSE("GPL");
10605 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10606 MODULE_DESCRIPTION("HCLGE Driver");
10607 MODULE_VERSION(HCLGE_MOD_VERSION);