]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
24b59f0c58a4f883bd08a89ab52a571c3bc7652d
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
57 static int hclge_init_vlan_config(struct hclge_dev *hdev);
58 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
59 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
60 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
61 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
62                                u16 *allocated_size, bool is_alloc);
63 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
64 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
65 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
66                                                    unsigned long *addr);
67
68 static struct hnae3_ae_algo ae_algo;
69
70 static const struct pci_device_id ae_algo_pci_tbl[] = {
71         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
72         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
73         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
74         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
75         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
76         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
77         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
78         /* required last entry */
79         {0, }
80 };
81
82 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
83
84 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
85                                          HCLGE_CMDQ_TX_ADDR_H_REG,
86                                          HCLGE_CMDQ_TX_DEPTH_REG,
87                                          HCLGE_CMDQ_TX_TAIL_REG,
88                                          HCLGE_CMDQ_TX_HEAD_REG,
89                                          HCLGE_CMDQ_RX_ADDR_L_REG,
90                                          HCLGE_CMDQ_RX_ADDR_H_REG,
91                                          HCLGE_CMDQ_RX_DEPTH_REG,
92                                          HCLGE_CMDQ_RX_TAIL_REG,
93                                          HCLGE_CMDQ_RX_HEAD_REG,
94                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
95                                          HCLGE_CMDQ_INTR_STS_REG,
96                                          HCLGE_CMDQ_INTR_EN_REG,
97                                          HCLGE_CMDQ_INTR_GEN_REG};
98
99 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
100                                            HCLGE_VECTOR0_OTER_EN_REG,
101                                            HCLGE_MISC_RESET_STS_REG,
102                                            HCLGE_MISC_VECTOR_INT_STS,
103                                            HCLGE_GLOBAL_RESET_REG,
104                                            HCLGE_FUN_RST_ING,
105                                            HCLGE_GRO_EN_REG};
106
107 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
108                                          HCLGE_RING_RX_ADDR_H_REG,
109                                          HCLGE_RING_RX_BD_NUM_REG,
110                                          HCLGE_RING_RX_BD_LENGTH_REG,
111                                          HCLGE_RING_RX_MERGE_EN_REG,
112                                          HCLGE_RING_RX_TAIL_REG,
113                                          HCLGE_RING_RX_HEAD_REG,
114                                          HCLGE_RING_RX_FBD_NUM_REG,
115                                          HCLGE_RING_RX_OFFSET_REG,
116                                          HCLGE_RING_RX_FBD_OFFSET_REG,
117                                          HCLGE_RING_RX_STASH_REG,
118                                          HCLGE_RING_RX_BD_ERR_REG,
119                                          HCLGE_RING_TX_ADDR_L_REG,
120                                          HCLGE_RING_TX_ADDR_H_REG,
121                                          HCLGE_RING_TX_BD_NUM_REG,
122                                          HCLGE_RING_TX_PRIORITY_REG,
123                                          HCLGE_RING_TX_TC_REG,
124                                          HCLGE_RING_TX_MERGE_EN_REG,
125                                          HCLGE_RING_TX_TAIL_REG,
126                                          HCLGE_RING_TX_HEAD_REG,
127                                          HCLGE_RING_TX_FBD_NUM_REG,
128                                          HCLGE_RING_TX_OFFSET_REG,
129                                          HCLGE_RING_TX_EBD_NUM_REG,
130                                          HCLGE_RING_TX_EBD_OFFSET_REG,
131                                          HCLGE_RING_TX_BD_ERR_REG,
132                                          HCLGE_RING_EN_REG};
133
134 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
135                                              HCLGE_TQP_INTR_GL0_REG,
136                                              HCLGE_TQP_INTR_GL1_REG,
137                                              HCLGE_TQP_INTR_GL2_REG,
138                                              HCLGE_TQP_INTR_RL_REG};
139
140 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
141         "App    Loopback test",
142         "Serdes serial Loopback test",
143         "Serdes parallel Loopback test",
144         "Phy    Loopback test"
145 };
146
147 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
148         {"mac_tx_mac_pause_num",
149                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
150         {"mac_rx_mac_pause_num",
151                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
152         {"mac_tx_control_pkt_num",
153                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
154         {"mac_rx_control_pkt_num",
155                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
156         {"mac_tx_pfc_pkt_num",
157                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
158         {"mac_tx_pfc_pri0_pkt_num",
159                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
160         {"mac_tx_pfc_pri1_pkt_num",
161                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
162         {"mac_tx_pfc_pri2_pkt_num",
163                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
164         {"mac_tx_pfc_pri3_pkt_num",
165                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
166         {"mac_tx_pfc_pri4_pkt_num",
167                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
168         {"mac_tx_pfc_pri5_pkt_num",
169                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
170         {"mac_tx_pfc_pri6_pkt_num",
171                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
172         {"mac_tx_pfc_pri7_pkt_num",
173                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
174         {"mac_rx_pfc_pkt_num",
175                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
176         {"mac_rx_pfc_pri0_pkt_num",
177                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
178         {"mac_rx_pfc_pri1_pkt_num",
179                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
180         {"mac_rx_pfc_pri2_pkt_num",
181                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
182         {"mac_rx_pfc_pri3_pkt_num",
183                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
184         {"mac_rx_pfc_pri4_pkt_num",
185                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
186         {"mac_rx_pfc_pri5_pkt_num",
187                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
188         {"mac_rx_pfc_pri6_pkt_num",
189                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
190         {"mac_rx_pfc_pri7_pkt_num",
191                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
192         {"mac_tx_total_pkt_num",
193                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
194         {"mac_tx_total_oct_num",
195                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
196         {"mac_tx_good_pkt_num",
197                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
198         {"mac_tx_bad_pkt_num",
199                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
200         {"mac_tx_good_oct_num",
201                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
202         {"mac_tx_bad_oct_num",
203                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
204         {"mac_tx_uni_pkt_num",
205                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
206         {"mac_tx_multi_pkt_num",
207                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
208         {"mac_tx_broad_pkt_num",
209                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
210         {"mac_tx_undersize_pkt_num",
211                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
212         {"mac_tx_oversize_pkt_num",
213                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
214         {"mac_tx_64_oct_pkt_num",
215                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
216         {"mac_tx_65_127_oct_pkt_num",
217                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
218         {"mac_tx_128_255_oct_pkt_num",
219                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
220         {"mac_tx_256_511_oct_pkt_num",
221                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
222         {"mac_tx_512_1023_oct_pkt_num",
223                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
224         {"mac_tx_1024_1518_oct_pkt_num",
225                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
226         {"mac_tx_1519_2047_oct_pkt_num",
227                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
228         {"mac_tx_2048_4095_oct_pkt_num",
229                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
230         {"mac_tx_4096_8191_oct_pkt_num",
231                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
232         {"mac_tx_8192_9216_oct_pkt_num",
233                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
234         {"mac_tx_9217_12287_oct_pkt_num",
235                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
236         {"mac_tx_12288_16383_oct_pkt_num",
237                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
238         {"mac_tx_1519_max_good_pkt_num",
239                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
240         {"mac_tx_1519_max_bad_pkt_num",
241                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
242         {"mac_rx_total_pkt_num",
243                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
244         {"mac_rx_total_oct_num",
245                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
246         {"mac_rx_good_pkt_num",
247                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
248         {"mac_rx_bad_pkt_num",
249                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
250         {"mac_rx_good_oct_num",
251                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
252         {"mac_rx_bad_oct_num",
253                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
254         {"mac_rx_uni_pkt_num",
255                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
256         {"mac_rx_multi_pkt_num",
257                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
258         {"mac_rx_broad_pkt_num",
259                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
260         {"mac_rx_undersize_pkt_num",
261                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
262         {"mac_rx_oversize_pkt_num",
263                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
264         {"mac_rx_64_oct_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
266         {"mac_rx_65_127_oct_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
268         {"mac_rx_128_255_oct_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
270         {"mac_rx_256_511_oct_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
272         {"mac_rx_512_1023_oct_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
274         {"mac_rx_1024_1518_oct_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
276         {"mac_rx_1519_2047_oct_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
278         {"mac_rx_2048_4095_oct_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
280         {"mac_rx_4096_8191_oct_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
282         {"mac_rx_8192_9216_oct_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
284         {"mac_rx_9217_12287_oct_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
286         {"mac_rx_12288_16383_oct_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
288         {"mac_rx_1519_max_good_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
290         {"mac_rx_1519_max_bad_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
292
293         {"mac_tx_fragment_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
295         {"mac_tx_undermin_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
297         {"mac_tx_jabber_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
299         {"mac_tx_err_all_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
301         {"mac_tx_from_app_good_pkt_num",
302                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
303         {"mac_tx_from_app_bad_pkt_num",
304                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
305         {"mac_rx_fragment_pkt_num",
306                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
307         {"mac_rx_undermin_pkt_num",
308                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
309         {"mac_rx_jabber_pkt_num",
310                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
311         {"mac_rx_fcs_err_pkt_num",
312                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
313         {"mac_rx_send_app_good_pkt_num",
314                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
315         {"mac_rx_send_app_bad_pkt_num",
316                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
317 };
318
319 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
320         {
321                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
322                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
323                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
324                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
325                 .i_port_bitmap = 0x1,
326         },
327 };
328
329 static const u8 hclge_hash_key[] = {
330         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
331         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
332         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
333         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
334         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
335 };
336
337 static const u32 hclge_dfx_bd_offset_list[] = {
338         HCLGE_DFX_BIOS_BD_OFFSET,
339         HCLGE_DFX_SSU_0_BD_OFFSET,
340         HCLGE_DFX_SSU_1_BD_OFFSET,
341         HCLGE_DFX_IGU_BD_OFFSET,
342         HCLGE_DFX_RPU_0_BD_OFFSET,
343         HCLGE_DFX_RPU_1_BD_OFFSET,
344         HCLGE_DFX_NCSI_BD_OFFSET,
345         HCLGE_DFX_RTC_BD_OFFSET,
346         HCLGE_DFX_PPP_BD_OFFSET,
347         HCLGE_DFX_RCB_BD_OFFSET,
348         HCLGE_DFX_TQP_BD_OFFSET,
349         HCLGE_DFX_SSU_2_BD_OFFSET
350 };
351
352 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
353         HCLGE_OPC_DFX_BIOS_COMMON_REG,
354         HCLGE_OPC_DFX_SSU_REG_0,
355         HCLGE_OPC_DFX_SSU_REG_1,
356         HCLGE_OPC_DFX_IGU_EGU_REG,
357         HCLGE_OPC_DFX_RPU_REG_0,
358         HCLGE_OPC_DFX_RPU_REG_1,
359         HCLGE_OPC_DFX_NCSI_REG,
360         HCLGE_OPC_DFX_RTC_REG,
361         HCLGE_OPC_DFX_PPP_REG,
362         HCLGE_OPC_DFX_RCB_REG,
363         HCLGE_OPC_DFX_TQP_REG,
364         HCLGE_OPC_DFX_SSU_REG_2
365 };
366
367 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
368 {
369 #define HCLGE_MAC_CMD_NUM 21
370
371         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
372         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
373         __le64 *desc_data;
374         int i, k, n;
375         int ret;
376
377         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
378         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
379         if (ret) {
380                 dev_err(&hdev->pdev->dev,
381                         "Get MAC pkt stats fail, status = %d.\n", ret);
382
383                 return ret;
384         }
385
386         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
387                 /* for special opcode 0032, only the first desc has the head */
388                 if (unlikely(i == 0)) {
389                         desc_data = (__le64 *)(&desc[i].data[0]);
390                         n = HCLGE_RD_FIRST_STATS_NUM;
391                 } else {
392                         desc_data = (__le64 *)(&desc[i]);
393                         n = HCLGE_RD_OTHER_STATS_NUM;
394                 }
395
396                 for (k = 0; k < n; k++) {
397                         *data += le64_to_cpu(*desc_data);
398                         data++;
399                         desc_data++;
400                 }
401         }
402
403         return 0;
404 }
405
406 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
407 {
408         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
409         struct hclge_desc *desc;
410         __le64 *desc_data;
411         u16 i, k, n;
412         int ret;
413
414         /* This may be called inside atomic sections,
415          * so GFP_ATOMIC is more suitalbe here
416          */
417         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
418         if (!desc)
419                 return -ENOMEM;
420
421         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
422         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
423         if (ret) {
424                 kfree(desc);
425                 return ret;
426         }
427
428         for (i = 0; i < desc_num; i++) {
429                 /* for special opcode 0034, only the first desc has the head */
430                 if (i == 0) {
431                         desc_data = (__le64 *)(&desc[i].data[0]);
432                         n = HCLGE_RD_FIRST_STATS_NUM;
433                 } else {
434                         desc_data = (__le64 *)(&desc[i]);
435                         n = HCLGE_RD_OTHER_STATS_NUM;
436                 }
437
438                 for (k = 0; k < n; k++) {
439                         *data += le64_to_cpu(*desc_data);
440                         data++;
441                         desc_data++;
442                 }
443         }
444
445         kfree(desc);
446
447         return 0;
448 }
449
450 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
451 {
452         struct hclge_desc desc;
453         __le32 *desc_data;
454         u32 reg_num;
455         int ret;
456
457         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
458         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
459         if (ret)
460                 return ret;
461
462         desc_data = (__le32 *)(&desc.data[0]);
463         reg_num = le32_to_cpu(*desc_data);
464
465         *desc_num = 1 + ((reg_num - 3) >> 2) +
466                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
467
468         return 0;
469 }
470
471 static int hclge_mac_update_stats(struct hclge_dev *hdev)
472 {
473         u32 desc_num;
474         int ret;
475
476         ret = hclge_mac_query_reg_num(hdev, &desc_num);
477
478         /* The firmware supports the new statistics acquisition method */
479         if (!ret)
480                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
481         else if (ret == -EOPNOTSUPP)
482                 ret = hclge_mac_update_stats_defective(hdev);
483         else
484                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
485
486         return ret;
487 }
488
489 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
490 {
491         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
492         struct hclge_vport *vport = hclge_get_vport(handle);
493         struct hclge_dev *hdev = vport->back;
494         struct hnae3_queue *queue;
495         struct hclge_desc desc[1];
496         struct hclge_tqp *tqp;
497         int ret, i;
498
499         for (i = 0; i < kinfo->num_tqps; i++) {
500                 queue = handle->kinfo.tqp[i];
501                 tqp = container_of(queue, struct hclge_tqp, q);
502                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
503                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
504                                            true);
505
506                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
507                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
508                 if (ret) {
509                         dev_err(&hdev->pdev->dev,
510                                 "Query tqp stat fail, status = %d,queue = %d\n",
511                                 ret, i);
512                         return ret;
513                 }
514                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
515                         le32_to_cpu(desc[0].data[1]);
516         }
517
518         for (i = 0; i < kinfo->num_tqps; i++) {
519                 queue = handle->kinfo.tqp[i];
520                 tqp = container_of(queue, struct hclge_tqp, q);
521                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
522                 hclge_cmd_setup_basic_desc(&desc[0],
523                                            HCLGE_OPC_QUERY_TX_STATUS,
524                                            true);
525
526                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
527                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
528                 if (ret) {
529                         dev_err(&hdev->pdev->dev,
530                                 "Query tqp stat fail, status = %d,queue = %d\n",
531                                 ret, i);
532                         return ret;
533                 }
534                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
535                         le32_to_cpu(desc[0].data[1]);
536         }
537
538         return 0;
539 }
540
541 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
542 {
543         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544         struct hclge_tqp *tqp;
545         u64 *buff = data;
546         int i;
547
548         for (i = 0; i < kinfo->num_tqps; i++) {
549                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
550                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
551         }
552
553         for (i = 0; i < kinfo->num_tqps; i++) {
554                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
555                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
556         }
557
558         return buff;
559 }
560
561 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
562 {
563         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
564
565         /* each tqp has TX & RX two queues */
566         return kinfo->num_tqps * (2);
567 }
568
569 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
570 {
571         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
572         u8 *buff = data;
573         int i = 0;
574
575         for (i = 0; i < kinfo->num_tqps; i++) {
576                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
577                         struct hclge_tqp, q);
578                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
579                          tqp->index);
580                 buff = buff + ETH_GSTRING_LEN;
581         }
582
583         for (i = 0; i < kinfo->num_tqps; i++) {
584                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
585                         struct hclge_tqp, q);
586                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
587                          tqp->index);
588                 buff = buff + ETH_GSTRING_LEN;
589         }
590
591         return buff;
592 }
593
594 static u64 *hclge_comm_get_stats(const void *comm_stats,
595                                  const struct hclge_comm_stats_str strs[],
596                                  int size, u64 *data)
597 {
598         u64 *buf = data;
599         u32 i;
600
601         for (i = 0; i < size; i++)
602                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
603
604         return buf + size;
605 }
606
607 static u8 *hclge_comm_get_strings(u32 stringset,
608                                   const struct hclge_comm_stats_str strs[],
609                                   int size, u8 *data)
610 {
611         char *buff = (char *)data;
612         u32 i;
613
614         if (stringset != ETH_SS_STATS)
615                 return buff;
616
617         for (i = 0; i < size; i++) {
618                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
619                 buff = buff + ETH_GSTRING_LEN;
620         }
621
622         return (u8 *)buff;
623 }
624
625 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
626 {
627         struct hnae3_handle *handle;
628         int status;
629
630         handle = &hdev->vport[0].nic;
631         if (handle->client) {
632                 status = hclge_tqps_update_stats(handle);
633                 if (status) {
634                         dev_err(&hdev->pdev->dev,
635                                 "Update TQPS stats fail, status = %d.\n",
636                                 status);
637                 }
638         }
639
640         status = hclge_mac_update_stats(hdev);
641         if (status)
642                 dev_err(&hdev->pdev->dev,
643                         "Update MAC stats fail, status = %d.\n", status);
644 }
645
646 static void hclge_update_stats(struct hnae3_handle *handle,
647                                struct net_device_stats *net_stats)
648 {
649         struct hclge_vport *vport = hclge_get_vport(handle);
650         struct hclge_dev *hdev = vport->back;
651         int status;
652
653         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
654                 return;
655
656         status = hclge_mac_update_stats(hdev);
657         if (status)
658                 dev_err(&hdev->pdev->dev,
659                         "Update MAC stats fail, status = %d.\n",
660                         status);
661
662         status = hclge_tqps_update_stats(handle);
663         if (status)
664                 dev_err(&hdev->pdev->dev,
665                         "Update TQPS stats fail, status = %d.\n",
666                         status);
667
668         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
669 }
670
671 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
672 {
673 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
674                 HNAE3_SUPPORT_PHY_LOOPBACK |\
675                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
676                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
677
678         struct hclge_vport *vport = hclge_get_vport(handle);
679         struct hclge_dev *hdev = vport->back;
680         int count = 0;
681
682         /* Loopback test support rules:
683          * mac: only GE mode support
684          * serdes: all mac mode will support include GE/XGE/LGE/CGE
685          * phy: only support when phy device exist on board
686          */
687         if (stringset == ETH_SS_TEST) {
688                 /* clear loopback bit flags at first */
689                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
690                 if (hdev->pdev->revision >= 0x21 ||
691                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
692                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
693                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
694                         count += 1;
695                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
696                 }
697
698                 count += 2;
699                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
700                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
701         } else if (stringset == ETH_SS_STATS) {
702                 count = ARRAY_SIZE(g_mac_stats_string) +
703                         hclge_tqps_get_sset_count(handle, stringset);
704         }
705
706         return count;
707 }
708
709 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
710                               u8 *data)
711 {
712         u8 *p = (char *)data;
713         int size;
714
715         if (stringset == ETH_SS_STATS) {
716                 size = ARRAY_SIZE(g_mac_stats_string);
717                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
718                                            size, p);
719                 p = hclge_tqps_get_strings(handle, p);
720         } else if (stringset == ETH_SS_TEST) {
721                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
722                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
723                                ETH_GSTRING_LEN);
724                         p += ETH_GSTRING_LEN;
725                 }
726                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
727                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
728                                ETH_GSTRING_LEN);
729                         p += ETH_GSTRING_LEN;
730                 }
731                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
732                         memcpy(p,
733                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
734                                ETH_GSTRING_LEN);
735                         p += ETH_GSTRING_LEN;
736                 }
737                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
738                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
739                                ETH_GSTRING_LEN);
740                         p += ETH_GSTRING_LEN;
741                 }
742         }
743 }
744
745 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
746 {
747         struct hclge_vport *vport = hclge_get_vport(handle);
748         struct hclge_dev *hdev = vport->back;
749         u64 *p;
750
751         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
752                                  ARRAY_SIZE(g_mac_stats_string), data);
753         p = hclge_tqps_get_stats(handle, p);
754 }
755
756 static void hclge_get_mac_stat(struct hnae3_handle *handle,
757                                struct hns3_mac_stats *mac_stats)
758 {
759         struct hclge_vport *vport = hclge_get_vport(handle);
760         struct hclge_dev *hdev = vport->back;
761
762         hclge_update_stats(handle, NULL);
763
764         mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
765         mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
766 }
767
768 static int hclge_parse_func_status(struct hclge_dev *hdev,
769                                    struct hclge_func_status_cmd *status)
770 {
771         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
772                 return -EINVAL;
773
774         /* Set the pf to main pf */
775         if (status->pf_state & HCLGE_PF_STATE_MAIN)
776                 hdev->flag |= HCLGE_FLAG_MAIN;
777         else
778                 hdev->flag &= ~HCLGE_FLAG_MAIN;
779
780         return 0;
781 }
782
783 static int hclge_query_function_status(struct hclge_dev *hdev)
784 {
785 #define HCLGE_QUERY_MAX_CNT     5
786
787         struct hclge_func_status_cmd *req;
788         struct hclge_desc desc;
789         int timeout = 0;
790         int ret;
791
792         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
793         req = (struct hclge_func_status_cmd *)desc.data;
794
795         do {
796                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
797                 if (ret) {
798                         dev_err(&hdev->pdev->dev,
799                                 "query function status failed %d.\n", ret);
800                         return ret;
801                 }
802
803                 /* Check pf reset is done */
804                 if (req->pf_state)
805                         break;
806                 usleep_range(1000, 2000);
807         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
808
809         ret = hclge_parse_func_status(hdev, req);
810
811         return ret;
812 }
813
814 static int hclge_query_pf_resource(struct hclge_dev *hdev)
815 {
816         struct hclge_pf_res_cmd *req;
817         struct hclge_desc desc;
818         int ret;
819
820         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
821         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
822         if (ret) {
823                 dev_err(&hdev->pdev->dev,
824                         "query pf resource failed %d.\n", ret);
825                 return ret;
826         }
827
828         req = (struct hclge_pf_res_cmd *)desc.data;
829         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
830         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
831
832         if (req->tx_buf_size)
833                 hdev->tx_buf_size =
834                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
835         else
836                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
837
838         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
839
840         if (req->dv_buf_size)
841                 hdev->dv_buf_size =
842                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
843         else
844                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
845
846         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
847
848         if (hnae3_dev_roce_supported(hdev)) {
849                 hdev->roce_base_msix_offset =
850                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
851                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
852                 hdev->num_roce_msi =
853                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
854                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
855
856                 /* PF should have NIC vectors and Roce vectors,
857                  * NIC vectors are queued before Roce vectors.
858                  */
859                 hdev->num_msi = hdev->num_roce_msi +
860                                 hdev->roce_base_msix_offset;
861         } else {
862                 hdev->num_msi =
863                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
864                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
865         }
866
867         return 0;
868 }
869
870 static int hclge_parse_speed(int speed_cmd, int *speed)
871 {
872         switch (speed_cmd) {
873         case 6:
874                 *speed = HCLGE_MAC_SPEED_10M;
875                 break;
876         case 7:
877                 *speed = HCLGE_MAC_SPEED_100M;
878                 break;
879         case 0:
880                 *speed = HCLGE_MAC_SPEED_1G;
881                 break;
882         case 1:
883                 *speed = HCLGE_MAC_SPEED_10G;
884                 break;
885         case 2:
886                 *speed = HCLGE_MAC_SPEED_25G;
887                 break;
888         case 3:
889                 *speed = HCLGE_MAC_SPEED_40G;
890                 break;
891         case 4:
892                 *speed = HCLGE_MAC_SPEED_50G;
893                 break;
894         case 5:
895                 *speed = HCLGE_MAC_SPEED_100G;
896                 break;
897         default:
898                 return -EINVAL;
899         }
900
901         return 0;
902 }
903
904 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
905 {
906         struct hclge_vport *vport = hclge_get_vport(handle);
907         struct hclge_dev *hdev = vport->back;
908         u32 speed_ability = hdev->hw.mac.speed_ability;
909         u32 speed_bit = 0;
910
911         switch (speed) {
912         case HCLGE_MAC_SPEED_10M:
913                 speed_bit = HCLGE_SUPPORT_10M_BIT;
914                 break;
915         case HCLGE_MAC_SPEED_100M:
916                 speed_bit = HCLGE_SUPPORT_100M_BIT;
917                 break;
918         case HCLGE_MAC_SPEED_1G:
919                 speed_bit = HCLGE_SUPPORT_1G_BIT;
920                 break;
921         case HCLGE_MAC_SPEED_10G:
922                 speed_bit = HCLGE_SUPPORT_10G_BIT;
923                 break;
924         case HCLGE_MAC_SPEED_25G:
925                 speed_bit = HCLGE_SUPPORT_25G_BIT;
926                 break;
927         case HCLGE_MAC_SPEED_40G:
928                 speed_bit = HCLGE_SUPPORT_40G_BIT;
929                 break;
930         case HCLGE_MAC_SPEED_50G:
931                 speed_bit = HCLGE_SUPPORT_50G_BIT;
932                 break;
933         case HCLGE_MAC_SPEED_100G:
934                 speed_bit = HCLGE_SUPPORT_100G_BIT;
935                 break;
936         default:
937                 return -EINVAL;
938         }
939
940         if (speed_bit & speed_ability)
941                 return 0;
942
943         return -EINVAL;
944 }
945
946 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
947 {
948         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
949                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
950                                  mac->supported);
951         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
952                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
953                                  mac->supported);
954         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
955                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
956                                  mac->supported);
957         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
962                                  mac->supported);
963 }
964
965 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
966 {
967         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
968                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
969                                  mac->supported);
970         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
971                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
972                                  mac->supported);
973         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
974                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
975                                  mac->supported);
976         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
977                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
978                                  mac->supported);
979         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
980                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
981                                  mac->supported);
982 }
983
984 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
985 {
986         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
987                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
988                                  mac->supported);
989         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
990                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
991                                  mac->supported);
992         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
993                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
994                                  mac->supported);
995         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
996                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
997                                  mac->supported);
998         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
999                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1000                                  mac->supported);
1001 }
1002
1003 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1004 {
1005         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1006                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1007                                  mac->supported);
1008         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1009                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1010                                  mac->supported);
1011         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1012                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1013                                  mac->supported);
1014         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1015                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1016                                  mac->supported);
1017         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1019                                  mac->supported);
1020         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1021                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1022                                  mac->supported);
1023 }
1024
1025 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1026 {
1027         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1028         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1029
1030         switch (mac->speed) {
1031         case HCLGE_MAC_SPEED_10G:
1032         case HCLGE_MAC_SPEED_40G:
1033                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1034                                  mac->supported);
1035                 mac->fec_ability =
1036                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1037                 break;
1038         case HCLGE_MAC_SPEED_25G:
1039         case HCLGE_MAC_SPEED_50G:
1040                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1041                                  mac->supported);
1042                 mac->fec_ability =
1043                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1044                         BIT(HNAE3_FEC_AUTO);
1045                 break;
1046         case HCLGE_MAC_SPEED_100G:
1047                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1048                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1049                 break;
1050         default:
1051                 mac->fec_ability = 0;
1052                 break;
1053         }
1054 }
1055
1056 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1057                                         u8 speed_ability)
1058 {
1059         struct hclge_mac *mac = &hdev->hw.mac;
1060
1061         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1063                                  mac->supported);
1064
1065         hclge_convert_setting_sr(mac, speed_ability);
1066         hclge_convert_setting_lr(mac, speed_ability);
1067         hclge_convert_setting_cr(mac, speed_ability);
1068         if (hdev->pdev->revision >= 0x21)
1069                 hclge_convert_setting_fec(mac);
1070
1071         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1072         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1073         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1074 }
1075
1076 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1077                                             u8 speed_ability)
1078 {
1079         struct hclge_mac *mac = &hdev->hw.mac;
1080
1081         hclge_convert_setting_kr(mac, speed_ability);
1082         if (hdev->pdev->revision >= 0x21)
1083                 hclge_convert_setting_fec(mac);
1084         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1085         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1086         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1087 }
1088
1089 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1090                                          u8 speed_ability)
1091 {
1092         unsigned long *supported = hdev->hw.mac.supported;
1093
1094         /* default to support all speed for GE port */
1095         if (!speed_ability)
1096                 speed_ability = HCLGE_SUPPORT_GE;
1097
1098         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1100                                  supported);
1101
1102         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1104                                  supported);
1105                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1106                                  supported);
1107         }
1108
1109         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1110                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1111                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1112         }
1113
1114         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1115         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1116         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1117         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1118 }
1119
1120 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1121 {
1122         u8 media_type = hdev->hw.mac.media_type;
1123
1124         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1125                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1126         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1127                 hclge_parse_copper_link_mode(hdev, speed_ability);
1128         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1129                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1130 }
1131
1132 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1133 {
1134         struct hclge_cfg_param_cmd *req;
1135         u64 mac_addr_tmp_high;
1136         u64 mac_addr_tmp;
1137         unsigned int i;
1138
1139         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1140
1141         /* get the configuration */
1142         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1143                                               HCLGE_CFG_VMDQ_M,
1144                                               HCLGE_CFG_VMDQ_S);
1145         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1146                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1147         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1148                                             HCLGE_CFG_TQP_DESC_N_M,
1149                                             HCLGE_CFG_TQP_DESC_N_S);
1150
1151         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1152                                         HCLGE_CFG_PHY_ADDR_M,
1153                                         HCLGE_CFG_PHY_ADDR_S);
1154         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1155                                           HCLGE_CFG_MEDIA_TP_M,
1156                                           HCLGE_CFG_MEDIA_TP_S);
1157         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1158                                           HCLGE_CFG_RX_BUF_LEN_M,
1159                                           HCLGE_CFG_RX_BUF_LEN_S);
1160         /* get mac_address */
1161         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1162         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1163                                             HCLGE_CFG_MAC_ADDR_H_M,
1164                                             HCLGE_CFG_MAC_ADDR_H_S);
1165
1166         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1167
1168         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1169                                              HCLGE_CFG_DEFAULT_SPEED_M,
1170                                              HCLGE_CFG_DEFAULT_SPEED_S);
1171         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1172                                             HCLGE_CFG_RSS_SIZE_M,
1173                                             HCLGE_CFG_RSS_SIZE_S);
1174
1175         for (i = 0; i < ETH_ALEN; i++)
1176                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1177
1178         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1179         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1180
1181         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1182                                              HCLGE_CFG_SPEED_ABILITY_M,
1183                                              HCLGE_CFG_SPEED_ABILITY_S);
1184         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1185                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1186                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1187         if (!cfg->umv_space)
1188                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1189 }
1190
1191 /* hclge_get_cfg: query the static parameter from flash
1192  * @hdev: pointer to struct hclge_dev
1193  * @hcfg: the config structure to be getted
1194  */
1195 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1196 {
1197         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1198         struct hclge_cfg_param_cmd *req;
1199         unsigned int i;
1200         int ret;
1201
1202         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1203                 u32 offset = 0;
1204
1205                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1206                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1207                                            true);
1208                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1209                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1210                 /* Len should be united by 4 bytes when send to hardware */
1211                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1212                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1213                 req->offset = cpu_to_le32(offset);
1214         }
1215
1216         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1217         if (ret) {
1218                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1219                 return ret;
1220         }
1221
1222         hclge_parse_cfg(hcfg, desc);
1223
1224         return 0;
1225 }
1226
1227 static int hclge_get_cap(struct hclge_dev *hdev)
1228 {
1229         int ret;
1230
1231         ret = hclge_query_function_status(hdev);
1232         if (ret) {
1233                 dev_err(&hdev->pdev->dev,
1234                         "query function status error %d.\n", ret);
1235                 return ret;
1236         }
1237
1238         /* get pf resource */
1239         ret = hclge_query_pf_resource(hdev);
1240         if (ret)
1241                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1242
1243         return ret;
1244 }
1245
1246 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1247 {
1248 #define HCLGE_MIN_TX_DESC       64
1249 #define HCLGE_MIN_RX_DESC       64
1250
1251         if (!is_kdump_kernel())
1252                 return;
1253
1254         dev_info(&hdev->pdev->dev,
1255                  "Running kdump kernel. Using minimal resources\n");
1256
1257         /* minimal queue pairs equals to the number of vports */
1258         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1259         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1260         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1261 }
1262
1263 static int hclge_configure(struct hclge_dev *hdev)
1264 {
1265         struct hclge_cfg cfg;
1266         unsigned int i;
1267         int ret;
1268
1269         ret = hclge_get_cfg(hdev, &cfg);
1270         if (ret) {
1271                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1272                 return ret;
1273         }
1274
1275         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1276         hdev->base_tqp_pid = 0;
1277         hdev->rss_size_max = cfg.rss_size_max;
1278         hdev->rx_buf_len = cfg.rx_buf_len;
1279         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1280         hdev->hw.mac.media_type = cfg.media_type;
1281         hdev->hw.mac.phy_addr = cfg.phy_addr;
1282         hdev->num_tx_desc = cfg.tqp_desc_num;
1283         hdev->num_rx_desc = cfg.tqp_desc_num;
1284         hdev->tm_info.num_pg = 1;
1285         hdev->tc_max = cfg.tc_num;
1286         hdev->tm_info.hw_pfc_map = 0;
1287         hdev->wanted_umv_size = cfg.umv_space;
1288
1289         if (hnae3_dev_fd_supported(hdev)) {
1290                 hdev->fd_en = true;
1291                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1292         }
1293
1294         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1295         if (ret) {
1296                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1297                 return ret;
1298         }
1299
1300         hclge_parse_link_mode(hdev, cfg.speed_ability);
1301
1302         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1303             (hdev->tc_max < 1)) {
1304                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1305                          hdev->tc_max);
1306                 hdev->tc_max = 1;
1307         }
1308
1309         /* Dev does not support DCB */
1310         if (!hnae3_dev_dcb_supported(hdev)) {
1311                 hdev->tc_max = 1;
1312                 hdev->pfc_max = 0;
1313         } else {
1314                 hdev->pfc_max = hdev->tc_max;
1315         }
1316
1317         hdev->tm_info.num_tc = 1;
1318
1319         /* Currently not support uncontiuous tc */
1320         for (i = 0; i < hdev->tm_info.num_tc; i++)
1321                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1322
1323         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1324
1325         hclge_init_kdump_kernel_config(hdev);
1326
1327         /* Set the init affinity based on pci func number */
1328         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1329         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1330         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1331                         &hdev->affinity_mask);
1332
1333         return ret;
1334 }
1335
1336 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1337                             unsigned int tso_mss_max)
1338 {
1339         struct hclge_cfg_tso_status_cmd *req;
1340         struct hclge_desc desc;
1341         u16 tso_mss;
1342
1343         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1344
1345         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1346
1347         tso_mss = 0;
1348         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1349                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1350         req->tso_mss_min = cpu_to_le16(tso_mss);
1351
1352         tso_mss = 0;
1353         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1354                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1355         req->tso_mss_max = cpu_to_le16(tso_mss);
1356
1357         return hclge_cmd_send(&hdev->hw, &desc, 1);
1358 }
1359
1360 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1361 {
1362         struct hclge_cfg_gro_status_cmd *req;
1363         struct hclge_desc desc;
1364         int ret;
1365
1366         if (!hnae3_dev_gro_supported(hdev))
1367                 return 0;
1368
1369         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1370         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1371
1372         req->gro_en = cpu_to_le16(en ? 1 : 0);
1373
1374         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1375         if (ret)
1376                 dev_err(&hdev->pdev->dev,
1377                         "GRO hardware config cmd failed, ret = %d\n", ret);
1378
1379         return ret;
1380 }
1381
1382 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1383 {
1384         struct hclge_tqp *tqp;
1385         int i;
1386
1387         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1388                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1389         if (!hdev->htqp)
1390                 return -ENOMEM;
1391
1392         tqp = hdev->htqp;
1393
1394         for (i = 0; i < hdev->num_tqps; i++) {
1395                 tqp->dev = &hdev->pdev->dev;
1396                 tqp->index = i;
1397
1398                 tqp->q.ae_algo = &ae_algo;
1399                 tqp->q.buf_size = hdev->rx_buf_len;
1400                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1401                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1402                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1403                         i * HCLGE_TQP_REG_SIZE;
1404
1405                 tqp++;
1406         }
1407
1408         return 0;
1409 }
1410
1411 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1412                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1413 {
1414         struct hclge_tqp_map_cmd *req;
1415         struct hclge_desc desc;
1416         int ret;
1417
1418         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1419
1420         req = (struct hclge_tqp_map_cmd *)desc.data;
1421         req->tqp_id = cpu_to_le16(tqp_pid);
1422         req->tqp_vf = func_id;
1423         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1424         if (!is_pf)
1425                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1426         req->tqp_vid = cpu_to_le16(tqp_vid);
1427
1428         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1429         if (ret)
1430                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1431
1432         return ret;
1433 }
1434
1435 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1436 {
1437         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1438         struct hclge_dev *hdev = vport->back;
1439         int i, alloced;
1440
1441         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1442              alloced < num_tqps; i++) {
1443                 if (!hdev->htqp[i].alloced) {
1444                         hdev->htqp[i].q.handle = &vport->nic;
1445                         hdev->htqp[i].q.tqp_index = alloced;
1446                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1447                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1448                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1449                         hdev->htqp[i].alloced = true;
1450                         alloced++;
1451                 }
1452         }
1453         vport->alloc_tqps = alloced;
1454         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1455                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1456
1457         return 0;
1458 }
1459
1460 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1461                             u16 num_tx_desc, u16 num_rx_desc)
1462
1463 {
1464         struct hnae3_handle *nic = &vport->nic;
1465         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1466         struct hclge_dev *hdev = vport->back;
1467         int ret;
1468
1469         kinfo->num_tx_desc = num_tx_desc;
1470         kinfo->num_rx_desc = num_rx_desc;
1471
1472         kinfo->rx_buf_len = hdev->rx_buf_len;
1473
1474         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1475                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1476         if (!kinfo->tqp)
1477                 return -ENOMEM;
1478
1479         ret = hclge_assign_tqp(vport, num_tqps);
1480         if (ret)
1481                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1482
1483         return ret;
1484 }
1485
1486 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1487                                   struct hclge_vport *vport)
1488 {
1489         struct hnae3_handle *nic = &vport->nic;
1490         struct hnae3_knic_private_info *kinfo;
1491         u16 i;
1492
1493         kinfo = &nic->kinfo;
1494         for (i = 0; i < vport->alloc_tqps; i++) {
1495                 struct hclge_tqp *q =
1496                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1497                 bool is_pf;
1498                 int ret;
1499
1500                 is_pf = !(vport->vport_id);
1501                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1502                                              i, is_pf);
1503                 if (ret)
1504                         return ret;
1505         }
1506
1507         return 0;
1508 }
1509
1510 static int hclge_map_tqp(struct hclge_dev *hdev)
1511 {
1512         struct hclge_vport *vport = hdev->vport;
1513         u16 i, num_vport;
1514
1515         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1516         for (i = 0; i < num_vport; i++) {
1517                 int ret;
1518
1519                 ret = hclge_map_tqp_to_vport(hdev, vport);
1520                 if (ret)
1521                         return ret;
1522
1523                 vport++;
1524         }
1525
1526         return 0;
1527 }
1528
1529 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1530 {
1531         struct hnae3_handle *nic = &vport->nic;
1532         struct hclge_dev *hdev = vport->back;
1533         int ret;
1534
1535         nic->pdev = hdev->pdev;
1536         nic->ae_algo = &ae_algo;
1537         nic->numa_node_mask = hdev->numa_node_mask;
1538
1539         ret = hclge_knic_setup(vport, num_tqps,
1540                                hdev->num_tx_desc, hdev->num_rx_desc);
1541         if (ret)
1542                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1543
1544         return ret;
1545 }
1546
1547 static int hclge_alloc_vport(struct hclge_dev *hdev)
1548 {
1549         struct pci_dev *pdev = hdev->pdev;
1550         struct hclge_vport *vport;
1551         u32 tqp_main_vport;
1552         u32 tqp_per_vport;
1553         int num_vport, i;
1554         int ret;
1555
1556         /* We need to alloc a vport for main NIC of PF */
1557         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1558
1559         if (hdev->num_tqps < num_vport) {
1560                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1561                         hdev->num_tqps, num_vport);
1562                 return -EINVAL;
1563         }
1564
1565         /* Alloc the same number of TQPs for every vport */
1566         tqp_per_vport = hdev->num_tqps / num_vport;
1567         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1568
1569         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1570                              GFP_KERNEL);
1571         if (!vport)
1572                 return -ENOMEM;
1573
1574         hdev->vport = vport;
1575         hdev->num_alloc_vport = num_vport;
1576
1577         if (IS_ENABLED(CONFIG_PCI_IOV))
1578                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1579
1580         for (i = 0; i < num_vport; i++) {
1581                 vport->back = hdev;
1582                 vport->vport_id = i;
1583                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1584                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1585                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1586                 INIT_LIST_HEAD(&vport->vlan_list);
1587                 INIT_LIST_HEAD(&vport->uc_mac_list);
1588                 INIT_LIST_HEAD(&vport->mc_mac_list);
1589
1590                 if (i == 0)
1591                         ret = hclge_vport_setup(vport, tqp_main_vport);
1592                 else
1593                         ret = hclge_vport_setup(vport, tqp_per_vport);
1594                 if (ret) {
1595                         dev_err(&pdev->dev,
1596                                 "vport setup failed for vport %d, %d\n",
1597                                 i, ret);
1598                         return ret;
1599                 }
1600
1601                 vport++;
1602         }
1603
1604         return 0;
1605 }
1606
1607 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1608                                     struct hclge_pkt_buf_alloc *buf_alloc)
1609 {
1610 /* TX buffer size is unit by 128 byte */
1611 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1612 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1613         struct hclge_tx_buff_alloc_cmd *req;
1614         struct hclge_desc desc;
1615         int ret;
1616         u8 i;
1617
1618         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1619
1620         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1621         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1622                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1623
1624                 req->tx_pkt_buff[i] =
1625                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1626                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1627         }
1628
1629         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1630         if (ret)
1631                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1632                         ret);
1633
1634         return ret;
1635 }
1636
1637 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1638                                  struct hclge_pkt_buf_alloc *buf_alloc)
1639 {
1640         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1641
1642         if (ret)
1643                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1644
1645         return ret;
1646 }
1647
1648 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1649 {
1650         unsigned int i;
1651         u32 cnt = 0;
1652
1653         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1654                 if (hdev->hw_tc_map & BIT(i))
1655                         cnt++;
1656         return cnt;
1657 }
1658
1659 /* Get the number of pfc enabled TCs, which have private buffer */
1660 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1661                                   struct hclge_pkt_buf_alloc *buf_alloc)
1662 {
1663         struct hclge_priv_buf *priv;
1664         unsigned int i;
1665         int cnt = 0;
1666
1667         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1668                 priv = &buf_alloc->priv_buf[i];
1669                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1670                     priv->enable)
1671                         cnt++;
1672         }
1673
1674         return cnt;
1675 }
1676
1677 /* Get the number of pfc disabled TCs, which have private buffer */
1678 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1679                                      struct hclge_pkt_buf_alloc *buf_alloc)
1680 {
1681         struct hclge_priv_buf *priv;
1682         unsigned int i;
1683         int cnt = 0;
1684
1685         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1686                 priv = &buf_alloc->priv_buf[i];
1687                 if (hdev->hw_tc_map & BIT(i) &&
1688                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1689                     priv->enable)
1690                         cnt++;
1691         }
1692
1693         return cnt;
1694 }
1695
1696 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1697 {
1698         struct hclge_priv_buf *priv;
1699         u32 rx_priv = 0;
1700         int i;
1701
1702         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1703                 priv = &buf_alloc->priv_buf[i];
1704                 if (priv->enable)
1705                         rx_priv += priv->buf_size;
1706         }
1707         return rx_priv;
1708 }
1709
1710 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1711 {
1712         u32 i, total_tx_size = 0;
1713
1714         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1715                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1716
1717         return total_tx_size;
1718 }
1719
1720 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1721                                 struct hclge_pkt_buf_alloc *buf_alloc,
1722                                 u32 rx_all)
1723 {
1724         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1725         u32 tc_num = hclge_get_tc_num(hdev);
1726         u32 shared_buf, aligned_mps;
1727         u32 rx_priv;
1728         int i;
1729
1730         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1731
1732         if (hnae3_dev_dcb_supported(hdev))
1733                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1734                                         hdev->dv_buf_size;
1735         else
1736                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1737                                         + hdev->dv_buf_size;
1738
1739         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1740         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1741                              HCLGE_BUF_SIZE_UNIT);
1742
1743         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1744         if (rx_all < rx_priv + shared_std)
1745                 return false;
1746
1747         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1748         buf_alloc->s_buf.buf_size = shared_buf;
1749         if (hnae3_dev_dcb_supported(hdev)) {
1750                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1751                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1752                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1753                                   HCLGE_BUF_SIZE_UNIT);
1754         } else {
1755                 buf_alloc->s_buf.self.high = aligned_mps +
1756                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1757                 buf_alloc->s_buf.self.low = aligned_mps;
1758         }
1759
1760         if (hnae3_dev_dcb_supported(hdev)) {
1761                 hi_thrd = shared_buf - hdev->dv_buf_size;
1762
1763                 if (tc_num <= NEED_RESERVE_TC_NUM)
1764                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1765                                         / BUF_MAX_PERCENT;
1766
1767                 if (tc_num)
1768                         hi_thrd = hi_thrd / tc_num;
1769
1770                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1771                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1772                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1773         } else {
1774                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1775                 lo_thrd = aligned_mps;
1776         }
1777
1778         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1779                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1780                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1781         }
1782
1783         return true;
1784 }
1785
1786 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1787                                 struct hclge_pkt_buf_alloc *buf_alloc)
1788 {
1789         u32 i, total_size;
1790
1791         total_size = hdev->pkt_buf_size;
1792
1793         /* alloc tx buffer for all enabled tc */
1794         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1795                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1796
1797                 if (hdev->hw_tc_map & BIT(i)) {
1798                         if (total_size < hdev->tx_buf_size)
1799                                 return -ENOMEM;
1800
1801                         priv->tx_buf_size = hdev->tx_buf_size;
1802                 } else {
1803                         priv->tx_buf_size = 0;
1804                 }
1805
1806                 total_size -= priv->tx_buf_size;
1807         }
1808
1809         return 0;
1810 }
1811
1812 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1813                                   struct hclge_pkt_buf_alloc *buf_alloc)
1814 {
1815         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1816         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1817         unsigned int i;
1818
1819         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1820                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1821
1822                 priv->enable = 0;
1823                 priv->wl.low = 0;
1824                 priv->wl.high = 0;
1825                 priv->buf_size = 0;
1826
1827                 if (!(hdev->hw_tc_map & BIT(i)))
1828                         continue;
1829
1830                 priv->enable = 1;
1831
1832                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1833                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1834                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1835                                                 HCLGE_BUF_SIZE_UNIT);
1836                 } else {
1837                         priv->wl.low = 0;
1838                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1839                                         aligned_mps;
1840                 }
1841
1842                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1843         }
1844
1845         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1846 }
1847
1848 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1849                                           struct hclge_pkt_buf_alloc *buf_alloc)
1850 {
1851         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1852         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1853         int i;
1854
1855         /* let the last to be cleared first */
1856         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1857                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1858                 unsigned int mask = BIT((unsigned int)i);
1859
1860                 if (hdev->hw_tc_map & mask &&
1861                     !(hdev->tm_info.hw_pfc_map & mask)) {
1862                         /* Clear the no pfc TC private buffer */
1863                         priv->wl.low = 0;
1864                         priv->wl.high = 0;
1865                         priv->buf_size = 0;
1866                         priv->enable = 0;
1867                         no_pfc_priv_num--;
1868                 }
1869
1870                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1871                     no_pfc_priv_num == 0)
1872                         break;
1873         }
1874
1875         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1876 }
1877
1878 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1879                                         struct hclge_pkt_buf_alloc *buf_alloc)
1880 {
1881         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1882         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1883         int i;
1884
1885         /* let the last to be cleared first */
1886         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1887                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1888                 unsigned int mask = BIT((unsigned int)i);
1889
1890                 if (hdev->hw_tc_map & mask &&
1891                     hdev->tm_info.hw_pfc_map & mask) {
1892                         /* Reduce the number of pfc TC with private buffer */
1893                         priv->wl.low = 0;
1894                         priv->enable = 0;
1895                         priv->wl.high = 0;
1896                         priv->buf_size = 0;
1897                         pfc_priv_num--;
1898                 }
1899
1900                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1901                     pfc_priv_num == 0)
1902                         break;
1903         }
1904
1905         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1906 }
1907
1908 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1909                                       struct hclge_pkt_buf_alloc *buf_alloc)
1910 {
1911 #define COMPENSATE_BUFFER       0x3C00
1912 #define COMPENSATE_HALF_MPS_NUM 5
1913 #define PRIV_WL_GAP             0x1800
1914
1915         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1916         u32 tc_num = hclge_get_tc_num(hdev);
1917         u32 half_mps = hdev->mps >> 1;
1918         u32 min_rx_priv;
1919         unsigned int i;
1920
1921         if (tc_num)
1922                 rx_priv = rx_priv / tc_num;
1923
1924         if (tc_num <= NEED_RESERVE_TC_NUM)
1925                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1926
1927         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1928                         COMPENSATE_HALF_MPS_NUM * half_mps;
1929         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1930         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1931
1932         if (rx_priv < min_rx_priv)
1933                 return false;
1934
1935         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1936                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1937
1938                 priv->enable = 0;
1939                 priv->wl.low = 0;
1940                 priv->wl.high = 0;
1941                 priv->buf_size = 0;
1942
1943                 if (!(hdev->hw_tc_map & BIT(i)))
1944                         continue;
1945
1946                 priv->enable = 1;
1947                 priv->buf_size = rx_priv;
1948                 priv->wl.high = rx_priv - hdev->dv_buf_size;
1949                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1950         }
1951
1952         buf_alloc->s_buf.buf_size = 0;
1953
1954         return true;
1955 }
1956
1957 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1958  * @hdev: pointer to struct hclge_dev
1959  * @buf_alloc: pointer to buffer calculation data
1960  * @return: 0: calculate sucessful, negative: fail
1961  */
1962 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1963                                 struct hclge_pkt_buf_alloc *buf_alloc)
1964 {
1965         /* When DCB is not supported, rx private buffer is not allocated. */
1966         if (!hnae3_dev_dcb_supported(hdev)) {
1967                 u32 rx_all = hdev->pkt_buf_size;
1968
1969                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1970                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1971                         return -ENOMEM;
1972
1973                 return 0;
1974         }
1975
1976         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1977                 return 0;
1978
1979         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1980                 return 0;
1981
1982         /* try to decrease the buffer size */
1983         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1984                 return 0;
1985
1986         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1987                 return 0;
1988
1989         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1990                 return 0;
1991
1992         return -ENOMEM;
1993 }
1994
1995 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1996                                    struct hclge_pkt_buf_alloc *buf_alloc)
1997 {
1998         struct hclge_rx_priv_buff_cmd *req;
1999         struct hclge_desc desc;
2000         int ret;
2001         int i;
2002
2003         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2004         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2005
2006         /* Alloc private buffer TCs */
2007         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2008                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2009
2010                 req->buf_num[i] =
2011                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2012                 req->buf_num[i] |=
2013                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2014         }
2015
2016         req->shared_buf =
2017                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2018                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2019
2020         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2021         if (ret)
2022                 dev_err(&hdev->pdev->dev,
2023                         "rx private buffer alloc cmd failed %d\n", ret);
2024
2025         return ret;
2026 }
2027
2028 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2029                                    struct hclge_pkt_buf_alloc *buf_alloc)
2030 {
2031         struct hclge_rx_priv_wl_buf *req;
2032         struct hclge_priv_buf *priv;
2033         struct hclge_desc desc[2];
2034         int i, j;
2035         int ret;
2036
2037         for (i = 0; i < 2; i++) {
2038                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2039                                            false);
2040                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2041
2042                 /* The first descriptor set the NEXT bit to 1 */
2043                 if (i == 0)
2044                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2045                 else
2046                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2047
2048                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2049                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2050
2051                         priv = &buf_alloc->priv_buf[idx];
2052                         req->tc_wl[j].high =
2053                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2054                         req->tc_wl[j].high |=
2055                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2056                         req->tc_wl[j].low =
2057                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2058                         req->tc_wl[j].low |=
2059                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2060                 }
2061         }
2062
2063         /* Send 2 descriptor at one time */
2064         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2065         if (ret)
2066                 dev_err(&hdev->pdev->dev,
2067                         "rx private waterline config cmd failed %d\n",
2068                         ret);
2069         return ret;
2070 }
2071
2072 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2073                                     struct hclge_pkt_buf_alloc *buf_alloc)
2074 {
2075         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2076         struct hclge_rx_com_thrd *req;
2077         struct hclge_desc desc[2];
2078         struct hclge_tc_thrd *tc;
2079         int i, j;
2080         int ret;
2081
2082         for (i = 0; i < 2; i++) {
2083                 hclge_cmd_setup_basic_desc(&desc[i],
2084                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2085                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2086
2087                 /* The first descriptor set the NEXT bit to 1 */
2088                 if (i == 0)
2089                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2090                 else
2091                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2092
2093                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2094                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2095
2096                         req->com_thrd[j].high =
2097                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2098                         req->com_thrd[j].high |=
2099                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2100                         req->com_thrd[j].low =
2101                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2102                         req->com_thrd[j].low |=
2103                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2104                 }
2105         }
2106
2107         /* Send 2 descriptors at one time */
2108         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2109         if (ret)
2110                 dev_err(&hdev->pdev->dev,
2111                         "common threshold config cmd failed %d\n", ret);
2112         return ret;
2113 }
2114
2115 static int hclge_common_wl_config(struct hclge_dev *hdev,
2116                                   struct hclge_pkt_buf_alloc *buf_alloc)
2117 {
2118         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2119         struct hclge_rx_com_wl *req;
2120         struct hclge_desc desc;
2121         int ret;
2122
2123         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2124
2125         req = (struct hclge_rx_com_wl *)desc.data;
2126         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2127         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2128
2129         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2130         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2131
2132         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2133         if (ret)
2134                 dev_err(&hdev->pdev->dev,
2135                         "common waterline config cmd failed %d\n", ret);
2136
2137         return ret;
2138 }
2139
2140 int hclge_buffer_alloc(struct hclge_dev *hdev)
2141 {
2142         struct hclge_pkt_buf_alloc *pkt_buf;
2143         int ret;
2144
2145         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2146         if (!pkt_buf)
2147                 return -ENOMEM;
2148
2149         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2150         if (ret) {
2151                 dev_err(&hdev->pdev->dev,
2152                         "could not calc tx buffer size for all TCs %d\n", ret);
2153                 goto out;
2154         }
2155
2156         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2157         if (ret) {
2158                 dev_err(&hdev->pdev->dev,
2159                         "could not alloc tx buffers %d\n", ret);
2160                 goto out;
2161         }
2162
2163         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2164         if (ret) {
2165                 dev_err(&hdev->pdev->dev,
2166                         "could not calc rx priv buffer size for all TCs %d\n",
2167                         ret);
2168                 goto out;
2169         }
2170
2171         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2172         if (ret) {
2173                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2174                         ret);
2175                 goto out;
2176         }
2177
2178         if (hnae3_dev_dcb_supported(hdev)) {
2179                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2180                 if (ret) {
2181                         dev_err(&hdev->pdev->dev,
2182                                 "could not configure rx private waterline %d\n",
2183                                 ret);
2184                         goto out;
2185                 }
2186
2187                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2188                 if (ret) {
2189                         dev_err(&hdev->pdev->dev,
2190                                 "could not configure common threshold %d\n",
2191                                 ret);
2192                         goto out;
2193                 }
2194         }
2195
2196         ret = hclge_common_wl_config(hdev, pkt_buf);
2197         if (ret)
2198                 dev_err(&hdev->pdev->dev,
2199                         "could not configure common waterline %d\n", ret);
2200
2201 out:
2202         kfree(pkt_buf);
2203         return ret;
2204 }
2205
2206 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2207 {
2208         struct hnae3_handle *roce = &vport->roce;
2209         struct hnae3_handle *nic = &vport->nic;
2210
2211         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2212
2213         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2214             vport->back->num_msi_left == 0)
2215                 return -EINVAL;
2216
2217         roce->rinfo.base_vector = vport->back->roce_base_vector;
2218
2219         roce->rinfo.netdev = nic->kinfo.netdev;
2220         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2221
2222         roce->pdev = nic->pdev;
2223         roce->ae_algo = nic->ae_algo;
2224         roce->numa_node_mask = nic->numa_node_mask;
2225
2226         return 0;
2227 }
2228
2229 static int hclge_init_msi(struct hclge_dev *hdev)
2230 {
2231         struct pci_dev *pdev = hdev->pdev;
2232         int vectors;
2233         int i;
2234
2235         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2236                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2237         if (vectors < 0) {
2238                 dev_err(&pdev->dev,
2239                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2240                         vectors);
2241                 return vectors;
2242         }
2243         if (vectors < hdev->num_msi)
2244                 dev_warn(&hdev->pdev->dev,
2245                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2246                          hdev->num_msi, vectors);
2247
2248         hdev->num_msi = vectors;
2249         hdev->num_msi_left = vectors;
2250         hdev->base_msi_vector = pdev->irq;
2251         hdev->roce_base_vector = hdev->base_msi_vector +
2252                                 hdev->roce_base_msix_offset;
2253
2254         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2255                                            sizeof(u16), GFP_KERNEL);
2256         if (!hdev->vector_status) {
2257                 pci_free_irq_vectors(pdev);
2258                 return -ENOMEM;
2259         }
2260
2261         for (i = 0; i < hdev->num_msi; i++)
2262                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2263
2264         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2265                                         sizeof(int), GFP_KERNEL);
2266         if (!hdev->vector_irq) {
2267                 pci_free_irq_vectors(pdev);
2268                 return -ENOMEM;
2269         }
2270
2271         return 0;
2272 }
2273
2274 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2275 {
2276         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2277                 duplex = HCLGE_MAC_FULL;
2278
2279         return duplex;
2280 }
2281
2282 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2283                                       u8 duplex)
2284 {
2285         struct hclge_config_mac_speed_dup_cmd *req;
2286         struct hclge_desc desc;
2287         int ret;
2288
2289         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2290
2291         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2292
2293         if (duplex)
2294                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2295
2296         switch (speed) {
2297         case HCLGE_MAC_SPEED_10M:
2298                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2299                                 HCLGE_CFG_SPEED_S, 6);
2300                 break;
2301         case HCLGE_MAC_SPEED_100M:
2302                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2303                                 HCLGE_CFG_SPEED_S, 7);
2304                 break;
2305         case HCLGE_MAC_SPEED_1G:
2306                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2307                                 HCLGE_CFG_SPEED_S, 0);
2308                 break;
2309         case HCLGE_MAC_SPEED_10G:
2310                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2311                                 HCLGE_CFG_SPEED_S, 1);
2312                 break;
2313         case HCLGE_MAC_SPEED_25G:
2314                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2315                                 HCLGE_CFG_SPEED_S, 2);
2316                 break;
2317         case HCLGE_MAC_SPEED_40G:
2318                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2319                                 HCLGE_CFG_SPEED_S, 3);
2320                 break;
2321         case HCLGE_MAC_SPEED_50G:
2322                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2323                                 HCLGE_CFG_SPEED_S, 4);
2324                 break;
2325         case HCLGE_MAC_SPEED_100G:
2326                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2327                                 HCLGE_CFG_SPEED_S, 5);
2328                 break;
2329         default:
2330                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2331                 return -EINVAL;
2332         }
2333
2334         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2335                       1);
2336
2337         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2338         if (ret) {
2339                 dev_err(&hdev->pdev->dev,
2340                         "mac speed/duplex config cmd failed %d.\n", ret);
2341                 return ret;
2342         }
2343
2344         return 0;
2345 }
2346
2347 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2348 {
2349         int ret;
2350
2351         duplex = hclge_check_speed_dup(duplex, speed);
2352         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2353                 return 0;
2354
2355         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2356         if (ret)
2357                 return ret;
2358
2359         hdev->hw.mac.speed = speed;
2360         hdev->hw.mac.duplex = duplex;
2361
2362         return 0;
2363 }
2364
2365 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2366                                      u8 duplex)
2367 {
2368         struct hclge_vport *vport = hclge_get_vport(handle);
2369         struct hclge_dev *hdev = vport->back;
2370
2371         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2372 }
2373
2374 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2375 {
2376         struct hclge_config_auto_neg_cmd *req;
2377         struct hclge_desc desc;
2378         u32 flag = 0;
2379         int ret;
2380
2381         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2382
2383         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2384         if (enable)
2385                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2386         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2387
2388         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2389         if (ret)
2390                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2391                         ret);
2392
2393         return ret;
2394 }
2395
2396 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2397 {
2398         struct hclge_vport *vport = hclge_get_vport(handle);
2399         struct hclge_dev *hdev = vport->back;
2400
2401         if (!hdev->hw.mac.support_autoneg) {
2402                 if (enable) {
2403                         dev_err(&hdev->pdev->dev,
2404                                 "autoneg is not supported by current port\n");
2405                         return -EOPNOTSUPP;
2406                 } else {
2407                         return 0;
2408                 }
2409         }
2410
2411         return hclge_set_autoneg_en(hdev, enable);
2412 }
2413
2414 static int hclge_get_autoneg(struct hnae3_handle *handle)
2415 {
2416         struct hclge_vport *vport = hclge_get_vport(handle);
2417         struct hclge_dev *hdev = vport->back;
2418         struct phy_device *phydev = hdev->hw.mac.phydev;
2419
2420         if (phydev)
2421                 return phydev->autoneg;
2422
2423         return hdev->hw.mac.autoneg;
2424 }
2425
2426 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2427 {
2428         struct hclge_vport *vport = hclge_get_vport(handle);
2429         struct hclge_dev *hdev = vport->back;
2430         int ret;
2431
2432         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2433
2434         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2435         if (ret)
2436                 return ret;
2437         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2438 }
2439
2440 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2441 {
2442         struct hclge_vport *vport = hclge_get_vport(handle);
2443         struct hclge_dev *hdev = vport->back;
2444
2445         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2446                 return hclge_set_autoneg_en(hdev, !halt);
2447
2448         return 0;
2449 }
2450
2451 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2452 {
2453         struct hclge_config_fec_cmd *req;
2454         struct hclge_desc desc;
2455         int ret;
2456
2457         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2458
2459         req = (struct hclge_config_fec_cmd *)desc.data;
2460         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2461                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2462         if (fec_mode & BIT(HNAE3_FEC_RS))
2463                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2464                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2465         if (fec_mode & BIT(HNAE3_FEC_BASER))
2466                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2467                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2468
2469         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2470         if (ret)
2471                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2472
2473         return ret;
2474 }
2475
2476 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2477 {
2478         struct hclge_vport *vport = hclge_get_vport(handle);
2479         struct hclge_dev *hdev = vport->back;
2480         struct hclge_mac *mac = &hdev->hw.mac;
2481         int ret;
2482
2483         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2484                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2485                 return -EINVAL;
2486         }
2487
2488         ret = hclge_set_fec_hw(hdev, fec_mode);
2489         if (ret)
2490                 return ret;
2491
2492         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2493         return 0;
2494 }
2495
2496 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2497                           u8 *fec_mode)
2498 {
2499         struct hclge_vport *vport = hclge_get_vport(handle);
2500         struct hclge_dev *hdev = vport->back;
2501         struct hclge_mac *mac = &hdev->hw.mac;
2502
2503         if (fec_ability)
2504                 *fec_ability = mac->fec_ability;
2505         if (fec_mode)
2506                 *fec_mode = mac->fec_mode;
2507 }
2508
2509 static int hclge_mac_init(struct hclge_dev *hdev)
2510 {
2511         struct hclge_mac *mac = &hdev->hw.mac;
2512         int ret;
2513
2514         hdev->support_sfp_query = true;
2515         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2516         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2517                                          hdev->hw.mac.duplex);
2518         if (ret) {
2519                 dev_err(&hdev->pdev->dev,
2520                         "Config mac speed dup fail ret=%d\n", ret);
2521                 return ret;
2522         }
2523
2524         if (hdev->hw.mac.support_autoneg) {
2525                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2526                 if (ret) {
2527                         dev_err(&hdev->pdev->dev,
2528                                 "Config mac autoneg fail ret=%d\n", ret);
2529                         return ret;
2530                 }
2531         }
2532
2533         mac->link = 0;
2534
2535         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2536                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2537                 if (ret) {
2538                         dev_err(&hdev->pdev->dev,
2539                                 "Fec mode init fail, ret = %d\n", ret);
2540                         return ret;
2541                 }
2542         }
2543
2544         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2545         if (ret) {
2546                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2547                 return ret;
2548         }
2549
2550         ret = hclge_buffer_alloc(hdev);
2551         if (ret)
2552                 dev_err(&hdev->pdev->dev,
2553                         "allocate buffer fail, ret=%d\n", ret);
2554
2555         return ret;
2556 }
2557
2558 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2559 {
2560         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2561             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2562                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2563                               &hdev->mbx_service_task);
2564 }
2565
2566 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2567 {
2568         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2569             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2570                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2571                               &hdev->rst_service_task);
2572 }
2573
2574 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2575 {
2576         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2577             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2578             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2579                 hdev->hw_stats.stats_timer++;
2580                 hdev->fd_arfs_expire_timer++;
2581                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2582                                     system_wq, &hdev->service_task,
2583                                     delay_time);
2584         }
2585 }
2586
2587 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2588 {
2589         struct hclge_link_status_cmd *req;
2590         struct hclge_desc desc;
2591         int link_status;
2592         int ret;
2593
2594         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2595         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2596         if (ret) {
2597                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2598                         ret);
2599                 return ret;
2600         }
2601
2602         req = (struct hclge_link_status_cmd *)desc.data;
2603         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2604
2605         return !!link_status;
2606 }
2607
2608 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2609 {
2610         unsigned int mac_state;
2611         int link_stat;
2612
2613         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2614                 return 0;
2615
2616         mac_state = hclge_get_mac_link_status(hdev);
2617
2618         if (hdev->hw.mac.phydev) {
2619                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2620                         link_stat = mac_state &
2621                                 hdev->hw.mac.phydev->link;
2622                 else
2623                         link_stat = 0;
2624
2625         } else {
2626                 link_stat = mac_state;
2627         }
2628
2629         return !!link_stat;
2630 }
2631
2632 static void hclge_update_link_status(struct hclge_dev *hdev)
2633 {
2634         struct hnae3_client *rclient = hdev->roce_client;
2635         struct hnae3_client *client = hdev->nic_client;
2636         struct hnae3_handle *rhandle;
2637         struct hnae3_handle *handle;
2638         int state;
2639         int i;
2640
2641         if (!client)
2642                 return;
2643         state = hclge_get_mac_phy_link(hdev);
2644         if (state != hdev->hw.mac.link) {
2645                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2646                         handle = &hdev->vport[i].nic;
2647                         client->ops->link_status_change(handle, state);
2648                         hclge_config_mac_tnl_int(hdev, state);
2649                         rhandle = &hdev->vport[i].roce;
2650                         if (rclient && rclient->ops->link_status_change)
2651                                 rclient->ops->link_status_change(rhandle,
2652                                                                  state);
2653                 }
2654                 hdev->hw.mac.link = state;
2655         }
2656 }
2657
2658 static void hclge_update_port_capability(struct hclge_mac *mac)
2659 {
2660         /* update fec ability by speed */
2661         hclge_convert_setting_fec(mac);
2662
2663         /* firmware can not identify back plane type, the media type
2664          * read from configuration can help deal it
2665          */
2666         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2667             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2668                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2669         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2670                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2671
2672         if (mac->support_autoneg == true) {
2673                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2674                 linkmode_copy(mac->advertising, mac->supported);
2675         } else {
2676                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2677                                    mac->supported);
2678                 linkmode_zero(mac->advertising);
2679         }
2680 }
2681
2682 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2683 {
2684         struct hclge_sfp_info_cmd *resp;
2685         struct hclge_desc desc;
2686         int ret;
2687
2688         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2689         resp = (struct hclge_sfp_info_cmd *)desc.data;
2690         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2691         if (ret == -EOPNOTSUPP) {
2692                 dev_warn(&hdev->pdev->dev,
2693                          "IMP do not support get SFP speed %d\n", ret);
2694                 return ret;
2695         } else if (ret) {
2696                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2697                 return ret;
2698         }
2699
2700         *speed = le32_to_cpu(resp->speed);
2701
2702         return 0;
2703 }
2704
2705 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2706 {
2707         struct hclge_sfp_info_cmd *resp;
2708         struct hclge_desc desc;
2709         int ret;
2710
2711         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2712         resp = (struct hclge_sfp_info_cmd *)desc.data;
2713
2714         resp->query_type = QUERY_ACTIVE_SPEED;
2715
2716         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2717         if (ret == -EOPNOTSUPP) {
2718                 dev_warn(&hdev->pdev->dev,
2719                          "IMP does not support get SFP info %d\n", ret);
2720                 return ret;
2721         } else if (ret) {
2722                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2723                 return ret;
2724         }
2725
2726         mac->speed = le32_to_cpu(resp->speed);
2727         /* if resp->speed_ability is 0, it means it's an old version
2728          * firmware, do not update these params
2729          */
2730         if (resp->speed_ability) {
2731                 mac->module_type = le32_to_cpu(resp->module_type);
2732                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2733                 mac->autoneg = resp->autoneg;
2734                 mac->support_autoneg = resp->autoneg_ability;
2735                 mac->speed_type = QUERY_ACTIVE_SPEED;
2736                 if (!resp->active_fec)
2737                         mac->fec_mode = 0;
2738                 else
2739                         mac->fec_mode = BIT(resp->active_fec);
2740         } else {
2741                 mac->speed_type = QUERY_SFP_SPEED;
2742         }
2743
2744         return 0;
2745 }
2746
2747 static int hclge_update_port_info(struct hclge_dev *hdev)
2748 {
2749         struct hclge_mac *mac = &hdev->hw.mac;
2750         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2751         int ret;
2752
2753         /* get the port info from SFP cmd if not copper port */
2754         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2755                 return 0;
2756
2757         /* if IMP does not support get SFP/qSFP info, return directly */
2758         if (!hdev->support_sfp_query)
2759                 return 0;
2760
2761         if (hdev->pdev->revision >= 0x21)
2762                 ret = hclge_get_sfp_info(hdev, mac);
2763         else
2764                 ret = hclge_get_sfp_speed(hdev, &speed);
2765
2766         if (ret == -EOPNOTSUPP) {
2767                 hdev->support_sfp_query = false;
2768                 return ret;
2769         } else if (ret) {
2770                 return ret;
2771         }
2772
2773         if (hdev->pdev->revision >= 0x21) {
2774                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2775                         hclge_update_port_capability(mac);
2776                         return 0;
2777                 }
2778                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2779                                                HCLGE_MAC_FULL);
2780         } else {
2781                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2782                         return 0; /* do nothing if no SFP */
2783
2784                 /* must config full duplex for SFP */
2785                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2786         }
2787 }
2788
2789 static int hclge_get_status(struct hnae3_handle *handle)
2790 {
2791         struct hclge_vport *vport = hclge_get_vport(handle);
2792         struct hclge_dev *hdev = vport->back;
2793
2794         hclge_update_link_status(hdev);
2795
2796         return hdev->hw.mac.link;
2797 }
2798
2799 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2800 {
2801         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2802
2803         /* fetch the events from their corresponding regs */
2804         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2805         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2806         msix_src_reg = hclge_read_dev(&hdev->hw,
2807                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2808
2809         /* Assumption: If by any chance reset and mailbox events are reported
2810          * together then we will only process reset event in this go and will
2811          * defer the processing of the mailbox events. Since, we would have not
2812          * cleared RX CMDQ event this time we would receive again another
2813          * interrupt from H/W just for the mailbox.
2814          *
2815          * check for vector0 reset event sources
2816          */
2817         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2818                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2819                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2820                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2821                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2822                 hdev->rst_stats.imp_rst_cnt++;
2823                 return HCLGE_VECTOR0_EVENT_RST;
2824         }
2825
2826         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2827                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2828                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2829                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2830                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2831                 hdev->rst_stats.global_rst_cnt++;
2832                 return HCLGE_VECTOR0_EVENT_RST;
2833         }
2834
2835         /* check for vector0 msix event source */
2836         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2837                 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2838                          msix_src_reg);
2839                 *clearval = msix_src_reg;
2840                 return HCLGE_VECTOR0_EVENT_ERR;
2841         }
2842
2843         /* check for vector0 mailbox(=CMDQ RX) event source */
2844         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2845                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2846                 *clearval = cmdq_src_reg;
2847                 return HCLGE_VECTOR0_EVENT_MBX;
2848         }
2849
2850         /* print other vector0 event source */
2851         dev_info(&hdev->pdev->dev,
2852                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
2853                  cmdq_src_reg, msix_src_reg);
2854         *clearval = msix_src_reg;
2855
2856         return HCLGE_VECTOR0_EVENT_OTHER;
2857 }
2858
2859 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2860                                     u32 regclr)
2861 {
2862         switch (event_type) {
2863         case HCLGE_VECTOR0_EVENT_RST:
2864                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2865                 break;
2866         case HCLGE_VECTOR0_EVENT_MBX:
2867                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2868                 break;
2869         default:
2870                 break;
2871         }
2872 }
2873
2874 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2875 {
2876         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2877                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2878                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2879                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2880         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2881 }
2882
2883 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2884 {
2885         writel(enable ? 1 : 0, vector->addr);
2886 }
2887
2888 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2889 {
2890         struct hclge_dev *hdev = data;
2891         u32 clearval = 0;
2892         u32 event_cause;
2893
2894         hclge_enable_vector(&hdev->misc_vector, false);
2895         event_cause = hclge_check_event_cause(hdev, &clearval);
2896
2897         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2898         switch (event_cause) {
2899         case HCLGE_VECTOR0_EVENT_ERR:
2900                 /* we do not know what type of reset is required now. This could
2901                  * only be decided after we fetch the type of errors which
2902                  * caused this event. Therefore, we will do below for now:
2903                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2904                  *    have defered type of reset to be used.
2905                  * 2. Schedule the reset serivce task.
2906                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2907                  *    will fetch the correct type of reset.  This would be done
2908                  *    by first decoding the types of errors.
2909                  */
2910                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2911                 /* fall through */
2912         case HCLGE_VECTOR0_EVENT_RST:
2913                 hclge_reset_task_schedule(hdev);
2914                 break;
2915         case HCLGE_VECTOR0_EVENT_MBX:
2916                 /* If we are here then,
2917                  * 1. Either we are not handling any mbx task and we are not
2918                  *    scheduled as well
2919                  *                        OR
2920                  * 2. We could be handling a mbx task but nothing more is
2921                  *    scheduled.
2922                  * In both cases, we should schedule mbx task as there are more
2923                  * mbx messages reported by this interrupt.
2924                  */
2925                 hclge_mbx_task_schedule(hdev);
2926                 break;
2927         default:
2928                 dev_warn(&hdev->pdev->dev,
2929                          "received unknown or unhandled event of vector0\n");
2930                 break;
2931         }
2932
2933         hclge_clear_event_cause(hdev, event_cause, clearval);
2934
2935         /* Enable interrupt if it is not cause by reset. And when
2936          * clearval equal to 0, it means interrupt status may be
2937          * cleared by hardware before driver reads status register.
2938          * For this case, vector0 interrupt also should be enabled.
2939          */
2940         if (!clearval ||
2941             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2942                 hclge_enable_vector(&hdev->misc_vector, true);
2943         }
2944
2945         return IRQ_HANDLED;
2946 }
2947
2948 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2949 {
2950         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2951                 dev_warn(&hdev->pdev->dev,
2952                          "vector(vector_id %d) has been freed.\n", vector_id);
2953                 return;
2954         }
2955
2956         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2957         hdev->num_msi_left += 1;
2958         hdev->num_msi_used -= 1;
2959 }
2960
2961 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2962 {
2963         struct hclge_misc_vector *vector = &hdev->misc_vector;
2964
2965         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2966
2967         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2968         hdev->vector_status[0] = 0;
2969
2970         hdev->num_msi_left -= 1;
2971         hdev->num_msi_used += 1;
2972 }
2973
2974 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
2975                                       const cpumask_t *mask)
2976 {
2977         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
2978                                               affinity_notify);
2979
2980         cpumask_copy(&hdev->affinity_mask, mask);
2981 }
2982
2983 static void hclge_irq_affinity_release(struct kref *ref)
2984 {
2985 }
2986
2987 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
2988 {
2989         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
2990                               &hdev->affinity_mask);
2991
2992         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
2993         hdev->affinity_notify.release = hclge_irq_affinity_release;
2994         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
2995                                   &hdev->affinity_notify);
2996 }
2997
2998 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
2999 {
3000         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3001         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3002 }
3003
3004 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3005 {
3006         int ret;
3007
3008         hclge_get_misc_vector(hdev);
3009
3010         /* this would be explicitly freed in the end */
3011         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3012                           0, "hclge_misc", hdev);
3013         if (ret) {
3014                 hclge_free_vector(hdev, 0);
3015                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3016                         hdev->misc_vector.vector_irq);
3017         }
3018
3019         return ret;
3020 }
3021
3022 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3023 {
3024         free_irq(hdev->misc_vector.vector_irq, hdev);
3025         hclge_free_vector(hdev, 0);
3026 }
3027
3028 int hclge_notify_client(struct hclge_dev *hdev,
3029                         enum hnae3_reset_notify_type type)
3030 {
3031         struct hnae3_client *client = hdev->nic_client;
3032         u16 i;
3033
3034         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3035                 return 0;
3036
3037         if (!client->ops->reset_notify)
3038                 return -EOPNOTSUPP;
3039
3040         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3041                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3042                 int ret;
3043
3044                 ret = client->ops->reset_notify(handle, type);
3045                 if (ret) {
3046                         dev_err(&hdev->pdev->dev,
3047                                 "notify nic client failed %d(%d)\n", type, ret);
3048                         return ret;
3049                 }
3050         }
3051
3052         return 0;
3053 }
3054
3055 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3056                                     enum hnae3_reset_notify_type type)
3057 {
3058         struct hnae3_client *client = hdev->roce_client;
3059         int ret = 0;
3060         u16 i;
3061
3062         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3063                 return 0;
3064
3065         if (!client->ops->reset_notify)
3066                 return -EOPNOTSUPP;
3067
3068         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3069                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3070
3071                 ret = client->ops->reset_notify(handle, type);
3072                 if (ret) {
3073                         dev_err(&hdev->pdev->dev,
3074                                 "notify roce client failed %d(%d)",
3075                                 type, ret);
3076                         return ret;
3077                 }
3078         }
3079
3080         return ret;
3081 }
3082
3083 static int hclge_reset_wait(struct hclge_dev *hdev)
3084 {
3085 #define HCLGE_RESET_WATI_MS     100
3086 #define HCLGE_RESET_WAIT_CNT    200
3087         u32 val, reg, reg_bit;
3088         u32 cnt = 0;
3089
3090         switch (hdev->reset_type) {
3091         case HNAE3_IMP_RESET:
3092                 reg = HCLGE_GLOBAL_RESET_REG;
3093                 reg_bit = HCLGE_IMP_RESET_BIT;
3094                 break;
3095         case HNAE3_GLOBAL_RESET:
3096                 reg = HCLGE_GLOBAL_RESET_REG;
3097                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3098                 break;
3099         case HNAE3_FUNC_RESET:
3100                 reg = HCLGE_FUN_RST_ING;
3101                 reg_bit = HCLGE_FUN_RST_ING_B;
3102                 break;
3103         case HNAE3_FLR_RESET:
3104                 break;
3105         default:
3106                 dev_err(&hdev->pdev->dev,
3107                         "Wait for unsupported reset type: %d\n",
3108                         hdev->reset_type);
3109                 return -EINVAL;
3110         }
3111
3112         if (hdev->reset_type == HNAE3_FLR_RESET) {
3113                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3114                        cnt++ < HCLGE_RESET_WAIT_CNT)
3115                         msleep(HCLGE_RESET_WATI_MS);
3116
3117                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3118                         dev_err(&hdev->pdev->dev,
3119                                 "flr wait timeout: %d\n", cnt);
3120                         return -EBUSY;
3121                 }
3122
3123                 return 0;
3124         }
3125
3126         val = hclge_read_dev(&hdev->hw, reg);
3127         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3128                 msleep(HCLGE_RESET_WATI_MS);
3129                 val = hclge_read_dev(&hdev->hw, reg);
3130                 cnt++;
3131         }
3132
3133         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3134                 dev_warn(&hdev->pdev->dev,
3135                          "Wait for reset timeout: %d\n", hdev->reset_type);
3136                 return -EBUSY;
3137         }
3138
3139         return 0;
3140 }
3141
3142 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3143 {
3144         struct hclge_vf_rst_cmd *req;
3145         struct hclge_desc desc;
3146
3147         req = (struct hclge_vf_rst_cmd *)desc.data;
3148         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3149         req->dest_vfid = func_id;
3150
3151         if (reset)
3152                 req->vf_rst = 0x1;
3153
3154         return hclge_cmd_send(&hdev->hw, &desc, 1);
3155 }
3156
3157 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3158 {
3159         int i;
3160
3161         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3162                 struct hclge_vport *vport = &hdev->vport[i];
3163                 int ret;
3164
3165                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3166                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3167                 if (ret) {
3168                         dev_err(&hdev->pdev->dev,
3169                                 "set vf(%d) rst failed %d!\n",
3170                                 vport->vport_id, ret);
3171                         return ret;
3172                 }
3173
3174                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3175                         continue;
3176
3177                 /* Inform VF to process the reset.
3178                  * hclge_inform_reset_assert_to_vf may fail if VF
3179                  * driver is not loaded.
3180                  */
3181                 ret = hclge_inform_reset_assert_to_vf(vport);
3182                 if (ret)
3183                         dev_warn(&hdev->pdev->dev,
3184                                  "inform reset to vf(%d) failed %d!\n",
3185                                  vport->vport_id, ret);
3186         }
3187
3188         return 0;
3189 }
3190
3191 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3192 {
3193         struct hclge_pf_rst_sync_cmd *req;
3194         struct hclge_desc desc;
3195         int cnt = 0;
3196         int ret;
3197
3198         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3199         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3200
3201         do {
3202                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3203                 /* for compatible with old firmware, wait
3204                  * 100 ms for VF to stop IO
3205                  */
3206                 if (ret == -EOPNOTSUPP) {
3207                         msleep(HCLGE_RESET_SYNC_TIME);
3208                         return 0;
3209                 } else if (ret) {
3210                         dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3211                                 ret);
3212                         return ret;
3213                 } else if (req->all_vf_ready) {
3214                         return 0;
3215                 }
3216                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3217                 hclge_cmd_reuse_desc(&desc, true);
3218         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3219
3220         dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3221         return -ETIME;
3222 }
3223
3224 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3225 {
3226         struct hclge_desc desc;
3227         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3228         int ret;
3229
3230         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3231         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3232         req->fun_reset_vfid = func_id;
3233
3234         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3235         if (ret)
3236                 dev_err(&hdev->pdev->dev,
3237                         "send function reset cmd fail, status =%d\n", ret);
3238
3239         return ret;
3240 }
3241
3242 static void hclge_do_reset(struct hclge_dev *hdev)
3243 {
3244         struct hnae3_handle *handle = &hdev->vport[0].nic;
3245         struct pci_dev *pdev = hdev->pdev;
3246         u32 val;
3247
3248         if (hclge_get_hw_reset_stat(handle)) {
3249                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3250                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3251                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3252                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3253                 return;
3254         }
3255
3256         switch (hdev->reset_type) {
3257         case HNAE3_GLOBAL_RESET:
3258                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3259                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3260                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3261                 dev_info(&pdev->dev, "Global Reset requested\n");
3262                 break;
3263         case HNAE3_FUNC_RESET:
3264                 dev_info(&pdev->dev, "PF Reset requested\n");
3265                 /* schedule again to check later */
3266                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3267                 hclge_reset_task_schedule(hdev);
3268                 break;
3269         case HNAE3_FLR_RESET:
3270                 dev_info(&pdev->dev, "FLR requested\n");
3271                 /* schedule again to check later */
3272                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3273                 hclge_reset_task_schedule(hdev);
3274                 break;
3275         default:
3276                 dev_warn(&pdev->dev,
3277                          "Unsupported reset type: %d\n", hdev->reset_type);
3278                 break;
3279         }
3280 }
3281
3282 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3283                                                    unsigned long *addr)
3284 {
3285         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3286         struct hclge_dev *hdev = ae_dev->priv;
3287
3288         /* first, resolve any unknown reset type to the known type(s) */
3289         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3290                 /* we will intentionally ignore any errors from this function
3291                  *  as we will end up in *some* reset request in any case
3292                  */
3293                 hclge_handle_hw_msix_error(hdev, addr);
3294                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3295                 /* We defered the clearing of the error event which caused
3296                  * interrupt since it was not posssible to do that in
3297                  * interrupt context (and this is the reason we introduced
3298                  * new UNKNOWN reset type). Now, the errors have been
3299                  * handled and cleared in hardware we can safely enable
3300                  * interrupts. This is an exception to the norm.
3301                  */
3302                 hclge_enable_vector(&hdev->misc_vector, true);
3303         }
3304
3305         /* return the highest priority reset level amongst all */
3306         if (test_bit(HNAE3_IMP_RESET, addr)) {
3307                 rst_level = HNAE3_IMP_RESET;
3308                 clear_bit(HNAE3_IMP_RESET, addr);
3309                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3310                 clear_bit(HNAE3_FUNC_RESET, addr);
3311         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3312                 rst_level = HNAE3_GLOBAL_RESET;
3313                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3314                 clear_bit(HNAE3_FUNC_RESET, addr);
3315         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3316                 rst_level = HNAE3_FUNC_RESET;
3317                 clear_bit(HNAE3_FUNC_RESET, addr);
3318         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3319                 rst_level = HNAE3_FLR_RESET;
3320                 clear_bit(HNAE3_FLR_RESET, addr);
3321         }
3322
3323         if (hdev->reset_type != HNAE3_NONE_RESET &&
3324             rst_level < hdev->reset_type)
3325                 return HNAE3_NONE_RESET;
3326
3327         return rst_level;
3328 }
3329
3330 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3331 {
3332         u32 clearval = 0;
3333
3334         switch (hdev->reset_type) {
3335         case HNAE3_IMP_RESET:
3336                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3337                 break;
3338         case HNAE3_GLOBAL_RESET:
3339                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3340                 break;
3341         default:
3342                 break;
3343         }
3344
3345         if (!clearval)
3346                 return;
3347
3348         /* For revision 0x20, the reset interrupt source
3349          * can only be cleared after hardware reset done
3350          */
3351         if (hdev->pdev->revision == 0x20)
3352                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3353                                 clearval);
3354
3355         hclge_enable_vector(&hdev->misc_vector, true);
3356 }
3357
3358 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3359 {
3360         int ret = 0;
3361
3362         switch (hdev->reset_type) {
3363         case HNAE3_FUNC_RESET:
3364                 /* fall through */
3365         case HNAE3_FLR_RESET:
3366                 ret = hclge_set_all_vf_rst(hdev, true);
3367                 break;
3368         default:
3369                 break;
3370         }
3371
3372         return ret;
3373 }
3374
3375 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3376 {
3377         u32 reg_val;
3378
3379         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3380         if (enable)
3381                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3382         else
3383                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3384
3385         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3386 }
3387
3388 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3389 {
3390         u32 reg_val;
3391         int ret = 0;
3392
3393         switch (hdev->reset_type) {
3394         case HNAE3_FUNC_RESET:
3395                 /* to confirm whether all running VF is ready
3396                  * before request PF reset
3397                  */
3398                 ret = hclge_func_reset_sync_vf(hdev);
3399                 if (ret)
3400                         return ret;
3401
3402                 ret = hclge_func_reset_cmd(hdev, 0);
3403                 if (ret) {
3404                         dev_err(&hdev->pdev->dev,
3405                                 "asserting function reset fail %d!\n", ret);
3406                         return ret;
3407                 }
3408
3409                 /* After performaning pf reset, it is not necessary to do the
3410                  * mailbox handling or send any command to firmware, because
3411                  * any mailbox handling or command to firmware is only valid
3412                  * after hclge_cmd_init is called.
3413                  */
3414                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3415                 hdev->rst_stats.pf_rst_cnt++;
3416                 break;
3417         case HNAE3_FLR_RESET:
3418                 /* to confirm whether all running VF is ready
3419                  * before request PF reset
3420                  */
3421                 ret = hclge_func_reset_sync_vf(hdev);
3422                 if (ret)
3423                         return ret;
3424
3425                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3426                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3427                 hdev->rst_stats.flr_rst_cnt++;
3428                 break;
3429         case HNAE3_IMP_RESET:
3430                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3431                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3432                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3433                 break;
3434         default:
3435                 break;
3436         }
3437
3438         /* inform hardware that preparatory work is done */
3439         msleep(HCLGE_RESET_SYNC_TIME);
3440         hclge_reset_handshake(hdev, true);
3441         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3442
3443         return ret;
3444 }
3445
3446 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3447 {
3448 #define MAX_RESET_FAIL_CNT 5
3449
3450         if (hdev->reset_pending) {
3451                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3452                          hdev->reset_pending);
3453                 return true;
3454         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3455                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3456                     BIT(HCLGE_IMP_RESET_BIT))) {
3457                 dev_info(&hdev->pdev->dev,
3458                          "reset failed because IMP Reset is pending\n");
3459                 hclge_clear_reset_cause(hdev);
3460                 return false;
3461         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3462                 hdev->reset_fail_cnt++;
3463                 set_bit(hdev->reset_type, &hdev->reset_pending);
3464                 dev_info(&hdev->pdev->dev,
3465                          "re-schedule reset task(%d)\n",
3466                          hdev->reset_fail_cnt);
3467                 return true;
3468         }
3469
3470         hclge_clear_reset_cause(hdev);
3471
3472         /* recover the handshake status when reset fail */
3473         hclge_reset_handshake(hdev, true);
3474
3475         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3476         return false;
3477 }
3478
3479 static int hclge_set_rst_done(struct hclge_dev *hdev)
3480 {
3481         struct hclge_pf_rst_done_cmd *req;
3482         struct hclge_desc desc;
3483
3484         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3485         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3486         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3487
3488         return hclge_cmd_send(&hdev->hw, &desc, 1);
3489 }
3490
3491 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3492 {
3493         int ret = 0;
3494
3495         switch (hdev->reset_type) {
3496         case HNAE3_FUNC_RESET:
3497                 /* fall through */
3498         case HNAE3_FLR_RESET:
3499                 ret = hclge_set_all_vf_rst(hdev, false);
3500                 break;
3501         case HNAE3_GLOBAL_RESET:
3502                 /* fall through */
3503         case HNAE3_IMP_RESET:
3504                 ret = hclge_set_rst_done(hdev);
3505                 break;
3506         default:
3507                 break;
3508         }
3509
3510         /* clear up the handshake status after re-initialize done */
3511         hclge_reset_handshake(hdev, false);
3512
3513         return ret;
3514 }
3515
3516 static int hclge_reset_stack(struct hclge_dev *hdev)
3517 {
3518         int ret;
3519
3520         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3521         if (ret)
3522                 return ret;
3523
3524         ret = hclge_reset_ae_dev(hdev->ae_dev);
3525         if (ret)
3526                 return ret;
3527
3528         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3529         if (ret)
3530                 return ret;
3531
3532         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3533 }
3534
3535 static void hclge_reset(struct hclge_dev *hdev)
3536 {
3537         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3538         int ret;
3539
3540         /* Initialize ae_dev reset status as well, in case enet layer wants to
3541          * know if device is undergoing reset
3542          */
3543         ae_dev->reset_type = hdev->reset_type;
3544         hdev->rst_stats.reset_cnt++;
3545         /* perform reset of the stack & ae device for a client */
3546         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3547         if (ret)
3548                 goto err_reset;
3549
3550         ret = hclge_reset_prepare_down(hdev);
3551         if (ret)
3552                 goto err_reset;
3553
3554         rtnl_lock();
3555         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3556         if (ret)
3557                 goto err_reset_lock;
3558
3559         rtnl_unlock();
3560
3561         ret = hclge_reset_prepare_wait(hdev);
3562         if (ret)
3563                 goto err_reset;
3564
3565         if (hclge_reset_wait(hdev))
3566                 goto err_reset;
3567
3568         hdev->rst_stats.hw_reset_done_cnt++;
3569
3570         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3571         if (ret)
3572                 goto err_reset;
3573
3574         rtnl_lock();
3575
3576         ret = hclge_reset_stack(hdev);
3577         if (ret)
3578                 goto err_reset_lock;
3579
3580         hclge_clear_reset_cause(hdev);
3581
3582         ret = hclge_reset_prepare_up(hdev);
3583         if (ret)
3584                 goto err_reset_lock;
3585
3586         rtnl_unlock();
3587
3588         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3589         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3590          * times
3591          */
3592         if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3593                 goto err_reset;
3594
3595         rtnl_lock();
3596
3597         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3598         if (ret)
3599                 goto err_reset_lock;
3600
3601         rtnl_unlock();
3602
3603         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3604         if (ret)
3605                 goto err_reset;
3606
3607         hdev->last_reset_time = jiffies;
3608         hdev->reset_fail_cnt = 0;
3609         hdev->rst_stats.reset_done_cnt++;
3610         ae_dev->reset_type = HNAE3_NONE_RESET;
3611
3612         /* if default_reset_request has a higher level reset request,
3613          * it should be handled as soon as possible. since some errors
3614          * need this kind of reset to fix.
3615          */
3616         hdev->reset_level = hclge_get_reset_level(ae_dev,
3617                                                   &hdev->default_reset_request);
3618         if (hdev->reset_level != HNAE3_NONE_RESET)
3619                 set_bit(hdev->reset_level, &hdev->reset_request);
3620
3621         return;
3622
3623 err_reset_lock:
3624         rtnl_unlock();
3625 err_reset:
3626         if (hclge_reset_err_handle(hdev))
3627                 hclge_reset_task_schedule(hdev);
3628 }
3629
3630 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3631 {
3632         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3633         struct hclge_dev *hdev = ae_dev->priv;
3634
3635         /* We might end up getting called broadly because of 2 below cases:
3636          * 1. Recoverable error was conveyed through APEI and only way to bring
3637          *    normalcy is to reset.
3638          * 2. A new reset request from the stack due to timeout
3639          *
3640          * For the first case,error event might not have ae handle available.
3641          * check if this is a new reset request and we are not here just because
3642          * last reset attempt did not succeed and watchdog hit us again. We will
3643          * know this if last reset request did not occur very recently (watchdog
3644          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3645          * In case of new request we reset the "reset level" to PF reset.
3646          * And if it is a repeat reset request of the most recent one then we
3647          * want to make sure we throttle the reset request. Therefore, we will
3648          * not allow it again before 3*HZ times.
3649          */
3650         if (!handle)
3651                 handle = &hdev->vport[0].nic;
3652
3653         if (time_before(jiffies, (hdev->last_reset_time +
3654                                   HCLGE_RESET_INTERVAL))) {
3655                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3656                 return;
3657         } else if (hdev->default_reset_request)
3658                 hdev->reset_level =
3659                         hclge_get_reset_level(ae_dev,
3660                                               &hdev->default_reset_request);
3661         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3662                 hdev->reset_level = HNAE3_FUNC_RESET;
3663
3664         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3665                  hdev->reset_level);
3666
3667         /* request reset & schedule reset task */
3668         set_bit(hdev->reset_level, &hdev->reset_request);
3669         hclge_reset_task_schedule(hdev);
3670
3671         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3672                 hdev->reset_level++;
3673 }
3674
3675 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3676                                         enum hnae3_reset_type rst_type)
3677 {
3678         struct hclge_dev *hdev = ae_dev->priv;
3679
3680         set_bit(rst_type, &hdev->default_reset_request);
3681 }
3682
3683 static void hclge_reset_timer(struct timer_list *t)
3684 {
3685         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3686
3687         /* if default_reset_request has no value, it means that this reset
3688          * request has already be handled, so just return here
3689          */
3690         if (!hdev->default_reset_request)
3691                 return;
3692
3693         dev_info(&hdev->pdev->dev,
3694                  "triggering reset in reset timer\n");
3695         hclge_reset_event(hdev->pdev, NULL);
3696 }
3697
3698 static void hclge_reset_subtask(struct hclge_dev *hdev)
3699 {
3700         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3701
3702         /* check if there is any ongoing reset in the hardware. This status can
3703          * be checked from reset_pending. If there is then, we need to wait for
3704          * hardware to complete reset.
3705          *    a. If we are able to figure out in reasonable time that hardware
3706          *       has fully resetted then, we can proceed with driver, client
3707          *       reset.
3708          *    b. else, we can come back later to check this status so re-sched
3709          *       now.
3710          */
3711         hdev->last_reset_time = jiffies;
3712         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3713         if (hdev->reset_type != HNAE3_NONE_RESET)
3714                 hclge_reset(hdev);
3715
3716         /* check if we got any *new* reset requests to be honored */
3717         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3718         if (hdev->reset_type != HNAE3_NONE_RESET)
3719                 hclge_do_reset(hdev);
3720
3721         hdev->reset_type = HNAE3_NONE_RESET;
3722 }
3723
3724 static void hclge_reset_service_task(struct work_struct *work)
3725 {
3726         struct hclge_dev *hdev =
3727                 container_of(work, struct hclge_dev, rst_service_task);
3728
3729         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3730                 return;
3731
3732         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3733
3734         hclge_reset_subtask(hdev);
3735
3736         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3737 }
3738
3739 static void hclge_mailbox_service_task(struct work_struct *work)
3740 {
3741         struct hclge_dev *hdev =
3742                 container_of(work, struct hclge_dev, mbx_service_task);
3743
3744         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3745                 return;
3746
3747         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3748
3749         hclge_mbx_handler(hdev);
3750
3751         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3752 }
3753
3754 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3755 {
3756         int i;
3757
3758         /* start from vport 1 for PF is always alive */
3759         for (i = 1; i < hdev->num_alloc_vport; i++) {
3760                 struct hclge_vport *vport = &hdev->vport[i];
3761
3762                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3763                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3764
3765                 /* If vf is not alive, set to default value */
3766                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3767                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3768         }
3769 }
3770
3771 static void hclge_service_task(struct work_struct *work)
3772 {
3773         struct hclge_dev *hdev =
3774                 container_of(work, struct hclge_dev, service_task.work);
3775
3776         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3777
3778         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3779                 hclge_update_stats_for_all(hdev);
3780                 hdev->hw_stats.stats_timer = 0;
3781         }
3782
3783         hclge_update_port_info(hdev);
3784         hclge_update_link_status(hdev);
3785         hclge_update_vport_alive(hdev);
3786         hclge_sync_vlan_filter(hdev);
3787         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3788                 hclge_rfs_filter_expire(hdev);
3789                 hdev->fd_arfs_expire_timer = 0;
3790         }
3791
3792         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3793 }
3794
3795 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3796 {
3797         /* VF handle has no client */
3798         if (!handle->client)
3799                 return container_of(handle, struct hclge_vport, nic);
3800         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3801                 return container_of(handle, struct hclge_vport, roce);
3802         else
3803                 return container_of(handle, struct hclge_vport, nic);
3804 }
3805
3806 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3807                             struct hnae3_vector_info *vector_info)
3808 {
3809         struct hclge_vport *vport = hclge_get_vport(handle);
3810         struct hnae3_vector_info *vector = vector_info;
3811         struct hclge_dev *hdev = vport->back;
3812         int alloc = 0;
3813         int i, j;
3814
3815         vector_num = min(hdev->num_msi_left, vector_num);
3816
3817         for (j = 0; j < vector_num; j++) {
3818                 for (i = 1; i < hdev->num_msi; i++) {
3819                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3820                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3821                                 vector->io_addr = hdev->hw.io_base +
3822                                         HCLGE_VECTOR_REG_BASE +
3823                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3824                                         vport->vport_id *
3825                                         HCLGE_VECTOR_VF_OFFSET;
3826                                 hdev->vector_status[i] = vport->vport_id;
3827                                 hdev->vector_irq[i] = vector->vector;
3828
3829                                 vector++;
3830                                 alloc++;
3831
3832                                 break;
3833                         }
3834                 }
3835         }
3836         hdev->num_msi_left -= alloc;
3837         hdev->num_msi_used += alloc;
3838
3839         return alloc;
3840 }
3841
3842 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3843 {
3844         int i;
3845
3846         for (i = 0; i < hdev->num_msi; i++)
3847                 if (vector == hdev->vector_irq[i])
3848                         return i;
3849
3850         return -EINVAL;
3851 }
3852
3853 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3854 {
3855         struct hclge_vport *vport = hclge_get_vport(handle);
3856         struct hclge_dev *hdev = vport->back;
3857         int vector_id;
3858
3859         vector_id = hclge_get_vector_index(hdev, vector);
3860         if (vector_id < 0) {
3861                 dev_err(&hdev->pdev->dev,
3862                         "Get vector index fail. vector_id =%d\n", vector_id);
3863                 return vector_id;
3864         }
3865
3866         hclge_free_vector(hdev, vector_id);
3867
3868         return 0;
3869 }
3870
3871 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3872 {
3873         return HCLGE_RSS_KEY_SIZE;
3874 }
3875
3876 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3877 {
3878         return HCLGE_RSS_IND_TBL_SIZE;
3879 }
3880
3881 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3882                                   const u8 hfunc, const u8 *key)
3883 {
3884         struct hclge_rss_config_cmd *req;
3885         unsigned int key_offset = 0;
3886         struct hclge_desc desc;
3887         int key_counts;
3888         int key_size;
3889         int ret;
3890
3891         key_counts = HCLGE_RSS_KEY_SIZE;
3892         req = (struct hclge_rss_config_cmd *)desc.data;
3893
3894         while (key_counts) {
3895                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3896                                            false);
3897
3898                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3899                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3900
3901                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3902                 memcpy(req->hash_key,
3903                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3904
3905                 key_counts -= key_size;
3906                 key_offset++;
3907                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3908                 if (ret) {
3909                         dev_err(&hdev->pdev->dev,
3910                                 "Configure RSS config fail, status = %d\n",
3911                                 ret);
3912                         return ret;
3913                 }
3914         }
3915         return 0;
3916 }
3917
3918 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3919 {
3920         struct hclge_rss_indirection_table_cmd *req;
3921         struct hclge_desc desc;
3922         int i, j;
3923         int ret;
3924
3925         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3926
3927         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3928                 hclge_cmd_setup_basic_desc
3929                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3930
3931                 req->start_table_index =
3932                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3933                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3934
3935                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3936                         req->rss_result[j] =
3937                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3938
3939                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3940                 if (ret) {
3941                         dev_err(&hdev->pdev->dev,
3942                                 "Configure rss indir table fail,status = %d\n",
3943                                 ret);
3944                         return ret;
3945                 }
3946         }
3947         return 0;
3948 }
3949
3950 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3951                                  u16 *tc_size, u16 *tc_offset)
3952 {
3953         struct hclge_rss_tc_mode_cmd *req;
3954         struct hclge_desc desc;
3955         int ret;
3956         int i;
3957
3958         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3959         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3960
3961         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3962                 u16 mode = 0;
3963
3964                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3965                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3966                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3967                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3968                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3969
3970                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3971         }
3972
3973         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3974         if (ret)
3975                 dev_err(&hdev->pdev->dev,
3976                         "Configure rss tc mode fail, status = %d\n", ret);
3977
3978         return ret;
3979 }
3980
3981 static void hclge_get_rss_type(struct hclge_vport *vport)
3982 {
3983         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3984             vport->rss_tuple_sets.ipv4_udp_en ||
3985             vport->rss_tuple_sets.ipv4_sctp_en ||
3986             vport->rss_tuple_sets.ipv6_tcp_en ||
3987             vport->rss_tuple_sets.ipv6_udp_en ||
3988             vport->rss_tuple_sets.ipv6_sctp_en)
3989                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3990         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3991                  vport->rss_tuple_sets.ipv6_fragment_en)
3992                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3993         else
3994                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3995 }
3996
3997 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3998 {
3999         struct hclge_rss_input_tuple_cmd *req;
4000         struct hclge_desc desc;
4001         int ret;
4002
4003         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4004
4005         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4006
4007         /* Get the tuple cfg from pf */
4008         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4009         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4010         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4011         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4012         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4013         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4014         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4015         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4016         hclge_get_rss_type(&hdev->vport[0]);
4017         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4018         if (ret)
4019                 dev_err(&hdev->pdev->dev,
4020                         "Configure rss input fail, status = %d\n", ret);
4021         return ret;
4022 }
4023
4024 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4025                          u8 *key, u8 *hfunc)
4026 {
4027         struct hclge_vport *vport = hclge_get_vport(handle);
4028         int i;
4029
4030         /* Get hash algorithm */
4031         if (hfunc) {
4032                 switch (vport->rss_algo) {
4033                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4034                         *hfunc = ETH_RSS_HASH_TOP;
4035                         break;
4036                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4037                         *hfunc = ETH_RSS_HASH_XOR;
4038                         break;
4039                 default:
4040                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4041                         break;
4042                 }
4043         }
4044
4045         /* Get the RSS Key required by the user */
4046         if (key)
4047                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4048
4049         /* Get indirect table */
4050         if (indir)
4051                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4052                         indir[i] =  vport->rss_indirection_tbl[i];
4053
4054         return 0;
4055 }
4056
4057 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4058                          const  u8 *key, const  u8 hfunc)
4059 {
4060         struct hclge_vport *vport = hclge_get_vport(handle);
4061         struct hclge_dev *hdev = vport->back;
4062         u8 hash_algo;
4063         int ret, i;
4064
4065         /* Set the RSS Hash Key if specififed by the user */
4066         if (key) {
4067                 switch (hfunc) {
4068                 case ETH_RSS_HASH_TOP:
4069                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4070                         break;
4071                 case ETH_RSS_HASH_XOR:
4072                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4073                         break;
4074                 case ETH_RSS_HASH_NO_CHANGE:
4075                         hash_algo = vport->rss_algo;
4076                         break;
4077                 default:
4078                         return -EINVAL;
4079                 }
4080
4081                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4082                 if (ret)
4083                         return ret;
4084
4085                 /* Update the shadow RSS key with user specified qids */
4086                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4087                 vport->rss_algo = hash_algo;
4088         }
4089
4090         /* Update the shadow RSS table with user specified qids */
4091         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4092                 vport->rss_indirection_tbl[i] = indir[i];
4093
4094         /* Update the hardware */
4095         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4096 }
4097
4098 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4099 {
4100         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4101
4102         if (nfc->data & RXH_L4_B_2_3)
4103                 hash_sets |= HCLGE_D_PORT_BIT;
4104         else
4105                 hash_sets &= ~HCLGE_D_PORT_BIT;
4106
4107         if (nfc->data & RXH_IP_SRC)
4108                 hash_sets |= HCLGE_S_IP_BIT;
4109         else
4110                 hash_sets &= ~HCLGE_S_IP_BIT;
4111
4112         if (nfc->data & RXH_IP_DST)
4113                 hash_sets |= HCLGE_D_IP_BIT;
4114         else
4115                 hash_sets &= ~HCLGE_D_IP_BIT;
4116
4117         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4118                 hash_sets |= HCLGE_V_TAG_BIT;
4119
4120         return hash_sets;
4121 }
4122
4123 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4124                                struct ethtool_rxnfc *nfc)
4125 {
4126         struct hclge_vport *vport = hclge_get_vport(handle);
4127         struct hclge_dev *hdev = vport->back;
4128         struct hclge_rss_input_tuple_cmd *req;
4129         struct hclge_desc desc;
4130         u8 tuple_sets;
4131         int ret;
4132
4133         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4134                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4135                 return -EINVAL;
4136
4137         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4138         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4139
4140         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4141         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4142         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4143         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4144         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4145         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4146         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4147         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4148
4149         tuple_sets = hclge_get_rss_hash_bits(nfc);
4150         switch (nfc->flow_type) {
4151         case TCP_V4_FLOW:
4152                 req->ipv4_tcp_en = tuple_sets;
4153                 break;
4154         case TCP_V6_FLOW:
4155                 req->ipv6_tcp_en = tuple_sets;
4156                 break;
4157         case UDP_V4_FLOW:
4158                 req->ipv4_udp_en = tuple_sets;
4159                 break;
4160         case UDP_V6_FLOW:
4161                 req->ipv6_udp_en = tuple_sets;
4162                 break;
4163         case SCTP_V4_FLOW:
4164                 req->ipv4_sctp_en = tuple_sets;
4165                 break;
4166         case SCTP_V6_FLOW:
4167                 if ((nfc->data & RXH_L4_B_0_1) ||
4168                     (nfc->data & RXH_L4_B_2_3))
4169                         return -EINVAL;
4170
4171                 req->ipv6_sctp_en = tuple_sets;
4172                 break;
4173         case IPV4_FLOW:
4174                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4175                 break;
4176         case IPV6_FLOW:
4177                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4178                 break;
4179         default:
4180                 return -EINVAL;
4181         }
4182
4183         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4184         if (ret) {
4185                 dev_err(&hdev->pdev->dev,
4186                         "Set rss tuple fail, status = %d\n", ret);
4187                 return ret;
4188         }
4189
4190         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4191         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4192         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4193         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4194         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4195         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4196         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4197         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4198         hclge_get_rss_type(vport);
4199         return 0;
4200 }
4201
4202 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4203                                struct ethtool_rxnfc *nfc)
4204 {
4205         struct hclge_vport *vport = hclge_get_vport(handle);
4206         u8 tuple_sets;
4207
4208         nfc->data = 0;
4209
4210         switch (nfc->flow_type) {
4211         case TCP_V4_FLOW:
4212                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4213                 break;
4214         case UDP_V4_FLOW:
4215                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4216                 break;
4217         case TCP_V6_FLOW:
4218                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4219                 break;
4220         case UDP_V6_FLOW:
4221                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4222                 break;
4223         case SCTP_V4_FLOW:
4224                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4225                 break;
4226         case SCTP_V6_FLOW:
4227                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4228                 break;
4229         case IPV4_FLOW:
4230         case IPV6_FLOW:
4231                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4232                 break;
4233         default:
4234                 return -EINVAL;
4235         }
4236
4237         if (!tuple_sets)
4238                 return 0;
4239
4240         if (tuple_sets & HCLGE_D_PORT_BIT)
4241                 nfc->data |= RXH_L4_B_2_3;
4242         if (tuple_sets & HCLGE_S_PORT_BIT)
4243                 nfc->data |= RXH_L4_B_0_1;
4244         if (tuple_sets & HCLGE_D_IP_BIT)
4245                 nfc->data |= RXH_IP_DST;
4246         if (tuple_sets & HCLGE_S_IP_BIT)
4247                 nfc->data |= RXH_IP_SRC;
4248
4249         return 0;
4250 }
4251
4252 static int hclge_get_tc_size(struct hnae3_handle *handle)
4253 {
4254         struct hclge_vport *vport = hclge_get_vport(handle);
4255         struct hclge_dev *hdev = vport->back;
4256
4257         return hdev->rss_size_max;
4258 }
4259
4260 int hclge_rss_init_hw(struct hclge_dev *hdev)
4261 {
4262         struct hclge_vport *vport = hdev->vport;
4263         u8 *rss_indir = vport[0].rss_indirection_tbl;
4264         u16 rss_size = vport[0].alloc_rss_size;
4265         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4266         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4267         u8 *key = vport[0].rss_hash_key;
4268         u8 hfunc = vport[0].rss_algo;
4269         u16 tc_valid[HCLGE_MAX_TC_NUM];
4270         u16 roundup_size;
4271         unsigned int i;
4272         int ret;
4273
4274         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4275         if (ret)
4276                 return ret;
4277
4278         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4279         if (ret)
4280                 return ret;
4281
4282         ret = hclge_set_rss_input_tuple(hdev);
4283         if (ret)
4284                 return ret;
4285
4286         /* Each TC have the same queue size, and tc_size set to hardware is
4287          * the log2 of roundup power of two of rss_size, the acutal queue
4288          * size is limited by indirection table.
4289          */
4290         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4291                 dev_err(&hdev->pdev->dev,
4292                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4293                         rss_size);
4294                 return -EINVAL;
4295         }
4296
4297         roundup_size = roundup_pow_of_two(rss_size);
4298         roundup_size = ilog2(roundup_size);
4299
4300         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4301                 tc_valid[i] = 0;
4302
4303                 if (!(hdev->hw_tc_map & BIT(i)))
4304                         continue;
4305
4306                 tc_valid[i] = 1;
4307                 tc_size[i] = roundup_size;
4308                 tc_offset[i] = rss_size * i;
4309         }
4310
4311         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4312 }
4313
4314 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4315 {
4316         struct hclge_vport *vport = hdev->vport;
4317         int i, j;
4318
4319         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4320                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4321                         vport[j].rss_indirection_tbl[i] =
4322                                 i % vport[j].alloc_rss_size;
4323         }
4324 }
4325
4326 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4327 {
4328         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4329         struct hclge_vport *vport = hdev->vport;
4330
4331         if (hdev->pdev->revision >= 0x21)
4332                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4333
4334         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4335                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4336                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4337                 vport[i].rss_tuple_sets.ipv4_udp_en =
4338                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4339                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4340                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4341                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4342                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4343                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4344                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4345                 vport[i].rss_tuple_sets.ipv6_udp_en =
4346                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4347                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4348                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4349                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4350                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4351
4352                 vport[i].rss_algo = rss_algo;
4353
4354                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4355                        HCLGE_RSS_KEY_SIZE);
4356         }
4357
4358         hclge_rss_indir_init_cfg(hdev);
4359 }
4360
4361 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4362                                 int vector_id, bool en,
4363                                 struct hnae3_ring_chain_node *ring_chain)
4364 {
4365         struct hclge_dev *hdev = vport->back;
4366         struct hnae3_ring_chain_node *node;
4367         struct hclge_desc desc;
4368         struct hclge_ctrl_vector_chain_cmd *req =
4369                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4370         enum hclge_cmd_status status;
4371         enum hclge_opcode_type op;
4372         u16 tqp_type_and_id;
4373         int i;
4374
4375         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4376         hclge_cmd_setup_basic_desc(&desc, op, false);
4377         req->int_vector_id = vector_id;
4378
4379         i = 0;
4380         for (node = ring_chain; node; node = node->next) {
4381                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4382                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4383                                 HCLGE_INT_TYPE_S,
4384                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4385                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4386                                 HCLGE_TQP_ID_S, node->tqp_index);
4387                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4388                                 HCLGE_INT_GL_IDX_S,
4389                                 hnae3_get_field(node->int_gl_idx,
4390                                                 HNAE3_RING_GL_IDX_M,
4391                                                 HNAE3_RING_GL_IDX_S));
4392                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4393                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4394                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4395                         req->vfid = vport->vport_id;
4396
4397                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4398                         if (status) {
4399                                 dev_err(&hdev->pdev->dev,
4400                                         "Map TQP fail, status is %d.\n",
4401                                         status);
4402                                 return -EIO;
4403                         }
4404                         i = 0;
4405
4406                         hclge_cmd_setup_basic_desc(&desc,
4407                                                    op,
4408                                                    false);
4409                         req->int_vector_id = vector_id;
4410                 }
4411         }
4412
4413         if (i > 0) {
4414                 req->int_cause_num = i;
4415                 req->vfid = vport->vport_id;
4416                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4417                 if (status) {
4418                         dev_err(&hdev->pdev->dev,
4419                                 "Map TQP fail, status is %d.\n", status);
4420                         return -EIO;
4421                 }
4422         }
4423
4424         return 0;
4425 }
4426
4427 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4428                                     struct hnae3_ring_chain_node *ring_chain)
4429 {
4430         struct hclge_vport *vport = hclge_get_vport(handle);
4431         struct hclge_dev *hdev = vport->back;
4432         int vector_id;
4433
4434         vector_id = hclge_get_vector_index(hdev, vector);
4435         if (vector_id < 0) {
4436                 dev_err(&hdev->pdev->dev,
4437                         "Get vector index fail. vector_id =%d\n", vector_id);
4438                 return vector_id;
4439         }
4440
4441         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4442 }
4443
4444 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4445                                        struct hnae3_ring_chain_node *ring_chain)
4446 {
4447         struct hclge_vport *vport = hclge_get_vport(handle);
4448         struct hclge_dev *hdev = vport->back;
4449         int vector_id, ret;
4450
4451         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4452                 return 0;
4453
4454         vector_id = hclge_get_vector_index(hdev, vector);
4455         if (vector_id < 0) {
4456                 dev_err(&handle->pdev->dev,
4457                         "Get vector index fail. ret =%d\n", vector_id);
4458                 return vector_id;
4459         }
4460
4461         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4462         if (ret)
4463                 dev_err(&handle->pdev->dev,
4464                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4465                         vector_id, ret);
4466
4467         return ret;
4468 }
4469
4470 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4471                                struct hclge_promisc_param *param)
4472 {
4473         struct hclge_promisc_cfg_cmd *req;
4474         struct hclge_desc desc;
4475         int ret;
4476
4477         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4478
4479         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4480         req->vf_id = param->vf_id;
4481
4482         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4483          * pdev revision(0x20), new revision support them. The
4484          * value of this two fields will not return error when driver
4485          * send command to fireware in revision(0x20).
4486          */
4487         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4488                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4489
4490         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4491         if (ret)
4492                 dev_err(&hdev->pdev->dev,
4493                         "Set promisc mode fail, status is %d.\n", ret);
4494
4495         return ret;
4496 }
4497
4498 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4499                               bool en_mc, bool en_bc, int vport_id)
4500 {
4501         if (!param)
4502                 return;
4503
4504         memset(param, 0, sizeof(struct hclge_promisc_param));
4505         if (en_uc)
4506                 param->enable = HCLGE_PROMISC_EN_UC;
4507         if (en_mc)
4508                 param->enable |= HCLGE_PROMISC_EN_MC;
4509         if (en_bc)
4510                 param->enable |= HCLGE_PROMISC_EN_BC;
4511         param->vf_id = vport_id;
4512 }
4513
4514 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4515                                   bool en_mc_pmc)
4516 {
4517         struct hclge_vport *vport = hclge_get_vport(handle);
4518         struct hclge_dev *hdev = vport->back;
4519         struct hclge_promisc_param param;
4520         bool en_bc_pmc = true;
4521
4522         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4523          * always bypassed. So broadcast promisc should be disabled until
4524          * user enable promisc mode
4525          */
4526         if (handle->pdev->revision == 0x20)
4527                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4528
4529         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4530                                  vport->vport_id);
4531         return hclge_cmd_set_promisc_mode(hdev, &param);
4532 }
4533
4534 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4535 {
4536         struct hclge_get_fd_mode_cmd *req;
4537         struct hclge_desc desc;
4538         int ret;
4539
4540         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4541
4542         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4543
4544         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4545         if (ret) {
4546                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4547                 return ret;
4548         }
4549
4550         *fd_mode = req->mode;
4551
4552         return ret;
4553 }
4554
4555 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4556                                    u32 *stage1_entry_num,
4557                                    u32 *stage2_entry_num,
4558                                    u16 *stage1_counter_num,
4559                                    u16 *stage2_counter_num)
4560 {
4561         struct hclge_get_fd_allocation_cmd *req;
4562         struct hclge_desc desc;
4563         int ret;
4564
4565         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4566
4567         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4568
4569         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4570         if (ret) {
4571                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4572                         ret);
4573                 return ret;
4574         }
4575
4576         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4577         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4578         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4579         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4580
4581         return ret;
4582 }
4583
4584 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4585 {
4586         struct hclge_set_fd_key_config_cmd *req;
4587         struct hclge_fd_key_cfg *stage;
4588         struct hclge_desc desc;
4589         int ret;
4590
4591         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4592
4593         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4594         stage = &hdev->fd_cfg.key_cfg[stage_num];
4595         req->stage = stage_num;
4596         req->key_select = stage->key_sel;
4597         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4598         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4599         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4600         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4601         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4602         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4603
4604         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4605         if (ret)
4606                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4607
4608         return ret;
4609 }
4610
4611 static int hclge_init_fd_config(struct hclge_dev *hdev)
4612 {
4613 #define LOW_2_WORDS             0x03
4614         struct hclge_fd_key_cfg *key_cfg;
4615         int ret;
4616
4617         if (!hnae3_dev_fd_supported(hdev))
4618                 return 0;
4619
4620         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4621         if (ret)
4622                 return ret;
4623
4624         switch (hdev->fd_cfg.fd_mode) {
4625         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4626                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4627                 break;
4628         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4629                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4630                 break;
4631         default:
4632                 dev_err(&hdev->pdev->dev,
4633                         "Unsupported flow director mode %d\n",
4634                         hdev->fd_cfg.fd_mode);
4635                 return -EOPNOTSUPP;
4636         }
4637
4638         hdev->fd_cfg.proto_support =
4639                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4640                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4641         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4642         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4643         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4644         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4645         key_cfg->outer_sipv6_word_en = 0;
4646         key_cfg->outer_dipv6_word_en = 0;
4647
4648         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4649                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4650                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4651                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4652
4653         /* If use max 400bit key, we can support tuples for ether type */
4654         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4655                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4656                 key_cfg->tuple_active |=
4657                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4658         }
4659
4660         /* roce_type is used to filter roce frames
4661          * dst_vport is used to specify the rule
4662          */
4663         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4664
4665         ret = hclge_get_fd_allocation(hdev,
4666                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4667                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4668                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4669                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4670         if (ret)
4671                 return ret;
4672
4673         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4674 }
4675
4676 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4677                                 int loc, u8 *key, bool is_add)
4678 {
4679         struct hclge_fd_tcam_config_1_cmd *req1;
4680         struct hclge_fd_tcam_config_2_cmd *req2;
4681         struct hclge_fd_tcam_config_3_cmd *req3;
4682         struct hclge_desc desc[3];
4683         int ret;
4684
4685         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4686         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4687         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4688         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4689         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4690
4691         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4692         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4693         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4694
4695         req1->stage = stage;
4696         req1->xy_sel = sel_x ? 1 : 0;
4697         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4698         req1->index = cpu_to_le32(loc);
4699         req1->entry_vld = sel_x ? is_add : 0;
4700
4701         if (key) {
4702                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4703                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4704                        sizeof(req2->tcam_data));
4705                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4706                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4707         }
4708
4709         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4710         if (ret)
4711                 dev_err(&hdev->pdev->dev,
4712                         "config tcam key fail, ret=%d\n",
4713                         ret);
4714
4715         return ret;
4716 }
4717
4718 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4719                               struct hclge_fd_ad_data *action)
4720 {
4721         struct hclge_fd_ad_config_cmd *req;
4722         struct hclge_desc desc;
4723         u64 ad_data = 0;
4724         int ret;
4725
4726         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4727
4728         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4729         req->index = cpu_to_le32(loc);
4730         req->stage = stage;
4731
4732         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4733                       action->write_rule_id_to_bd);
4734         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4735                         action->rule_id);
4736         ad_data <<= 32;
4737         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4738         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4739                       action->forward_to_direct_queue);
4740         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4741                         action->queue_id);
4742         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4743         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4744                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4745         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4746         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4747                         action->counter_id);
4748
4749         req->ad_data = cpu_to_le64(ad_data);
4750         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4751         if (ret)
4752                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4753
4754         return ret;
4755 }
4756
4757 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4758                                    struct hclge_fd_rule *rule)
4759 {
4760         u16 tmp_x_s, tmp_y_s;
4761         u32 tmp_x_l, tmp_y_l;
4762         int i;
4763
4764         if (rule->unused_tuple & tuple_bit)
4765                 return true;
4766
4767         switch (tuple_bit) {
4768         case 0:
4769                 return false;
4770         case BIT(INNER_DST_MAC):
4771                 for (i = 0; i < ETH_ALEN; i++) {
4772                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4773                                rule->tuples_mask.dst_mac[i]);
4774                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4775                                rule->tuples_mask.dst_mac[i]);
4776                 }
4777
4778                 return true;
4779         case BIT(INNER_SRC_MAC):
4780                 for (i = 0; i < ETH_ALEN; i++) {
4781                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4782                                rule->tuples.src_mac[i]);
4783                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4784                                rule->tuples.src_mac[i]);
4785                 }
4786
4787                 return true;
4788         case BIT(INNER_VLAN_TAG_FST):
4789                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4790                        rule->tuples_mask.vlan_tag1);
4791                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4792                        rule->tuples_mask.vlan_tag1);
4793                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4794                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4795
4796                 return true;
4797         case BIT(INNER_ETH_TYPE):
4798                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4799                        rule->tuples_mask.ether_proto);
4800                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4801                        rule->tuples_mask.ether_proto);
4802                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4803                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4804
4805                 return true;
4806         case BIT(INNER_IP_TOS):
4807                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4808                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4809
4810                 return true;
4811         case BIT(INNER_IP_PROTO):
4812                 calc_x(*key_x, rule->tuples.ip_proto,
4813                        rule->tuples_mask.ip_proto);
4814                 calc_y(*key_y, rule->tuples.ip_proto,
4815                        rule->tuples_mask.ip_proto);
4816
4817                 return true;
4818         case BIT(INNER_SRC_IP):
4819                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4820                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4821                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4822                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4823                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4824                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4825
4826                 return true;
4827         case BIT(INNER_DST_IP):
4828                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4829                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4830                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4831                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4832                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4833                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4834
4835                 return true;
4836         case BIT(INNER_SRC_PORT):
4837                 calc_x(tmp_x_s, rule->tuples.src_port,
4838                        rule->tuples_mask.src_port);
4839                 calc_y(tmp_y_s, rule->tuples.src_port,
4840                        rule->tuples_mask.src_port);
4841                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4842                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4843
4844                 return true;
4845         case BIT(INNER_DST_PORT):
4846                 calc_x(tmp_x_s, rule->tuples.dst_port,
4847                        rule->tuples_mask.dst_port);
4848                 calc_y(tmp_y_s, rule->tuples.dst_port,
4849                        rule->tuples_mask.dst_port);
4850                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4851                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4852
4853                 return true;
4854         default:
4855                 return false;
4856         }
4857 }
4858
4859 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4860                                  u8 vf_id, u8 network_port_id)
4861 {
4862         u32 port_number = 0;
4863
4864         if (port_type == HOST_PORT) {
4865                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4866                                 pf_id);
4867                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4868                                 vf_id);
4869                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4870         } else {
4871                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4872                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4873                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4874         }
4875
4876         return port_number;
4877 }
4878
4879 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4880                                        __le32 *key_x, __le32 *key_y,
4881                                        struct hclge_fd_rule *rule)
4882 {
4883         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4884         u8 cur_pos = 0, tuple_size, shift_bits;
4885         unsigned int i;
4886
4887         for (i = 0; i < MAX_META_DATA; i++) {
4888                 tuple_size = meta_data_key_info[i].key_length;
4889                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4890
4891                 switch (tuple_bit) {
4892                 case BIT(ROCE_TYPE):
4893                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4894                         cur_pos += tuple_size;
4895                         break;
4896                 case BIT(DST_VPORT):
4897                         port_number = hclge_get_port_number(HOST_PORT, 0,
4898                                                             rule->vf_id, 0);
4899                         hnae3_set_field(meta_data,
4900                                         GENMASK(cur_pos + tuple_size, cur_pos),
4901                                         cur_pos, port_number);
4902                         cur_pos += tuple_size;
4903                         break;
4904                 default:
4905                         break;
4906                 }
4907         }
4908
4909         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4910         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4911         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4912
4913         *key_x = cpu_to_le32(tmp_x << shift_bits);
4914         *key_y = cpu_to_le32(tmp_y << shift_bits);
4915 }
4916
4917 /* A complete key is combined with meta data key and tuple key.
4918  * Meta data key is stored at the MSB region, and tuple key is stored at
4919  * the LSB region, unused bits will be filled 0.
4920  */
4921 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4922                             struct hclge_fd_rule *rule)
4923 {
4924         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4925         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4926         u8 *cur_key_x, *cur_key_y;
4927         unsigned int i;
4928         int ret, tuple_size;
4929         u8 meta_data_region;
4930
4931         memset(key_x, 0, sizeof(key_x));
4932         memset(key_y, 0, sizeof(key_y));
4933         cur_key_x = key_x;
4934         cur_key_y = key_y;
4935
4936         for (i = 0 ; i < MAX_TUPLE; i++) {
4937                 bool tuple_valid;
4938                 u32 check_tuple;
4939
4940                 tuple_size = tuple_key_info[i].key_length / 8;
4941                 check_tuple = key_cfg->tuple_active & BIT(i);
4942
4943                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4944                                                      cur_key_y, rule);
4945                 if (tuple_valid) {
4946                         cur_key_x += tuple_size;
4947                         cur_key_y += tuple_size;
4948                 }
4949         }
4950
4951         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4952                         MAX_META_DATA_LENGTH / 8;
4953
4954         hclge_fd_convert_meta_data(key_cfg,
4955                                    (__le32 *)(key_x + meta_data_region),
4956                                    (__le32 *)(key_y + meta_data_region),
4957                                    rule);
4958
4959         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4960                                    true);
4961         if (ret) {
4962                 dev_err(&hdev->pdev->dev,
4963                         "fd key_y config fail, loc=%d, ret=%d\n",
4964                         rule->queue_id, ret);
4965                 return ret;
4966         }
4967
4968         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4969                                    true);
4970         if (ret)
4971                 dev_err(&hdev->pdev->dev,
4972                         "fd key_x config fail, loc=%d, ret=%d\n",
4973                         rule->queue_id, ret);
4974         return ret;
4975 }
4976
4977 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4978                                struct hclge_fd_rule *rule)
4979 {
4980         struct hclge_fd_ad_data ad_data;
4981
4982         ad_data.ad_id = rule->location;
4983
4984         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4985                 ad_data.drop_packet = true;
4986                 ad_data.forward_to_direct_queue = false;
4987                 ad_data.queue_id = 0;
4988         } else {
4989                 ad_data.drop_packet = false;
4990                 ad_data.forward_to_direct_queue = true;
4991                 ad_data.queue_id = rule->queue_id;
4992         }
4993
4994         ad_data.use_counter = false;
4995         ad_data.counter_id = 0;
4996
4997         ad_data.use_next_stage = false;
4998         ad_data.next_input_key = 0;
4999
5000         ad_data.write_rule_id_to_bd = true;
5001         ad_data.rule_id = rule->location;
5002
5003         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5004 }
5005
5006 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5007                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5008 {
5009         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5010         struct ethtool_usrip4_spec *usr_ip4_spec;
5011         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5012         struct ethtool_usrip6_spec *usr_ip6_spec;
5013         struct ethhdr *ether_spec;
5014
5015         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5016                 return -EINVAL;
5017
5018         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5019                 return -EOPNOTSUPP;
5020
5021         if ((fs->flow_type & FLOW_EXT) &&
5022             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5023                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5024                 return -EOPNOTSUPP;
5025         }
5026
5027         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5028         case SCTP_V4_FLOW:
5029         case TCP_V4_FLOW:
5030         case UDP_V4_FLOW:
5031                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5032                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5033
5034                 if (!tcp_ip4_spec->ip4src)
5035                         *unused |= BIT(INNER_SRC_IP);
5036
5037                 if (!tcp_ip4_spec->ip4dst)
5038                         *unused |= BIT(INNER_DST_IP);
5039
5040                 if (!tcp_ip4_spec->psrc)
5041                         *unused |= BIT(INNER_SRC_PORT);
5042
5043                 if (!tcp_ip4_spec->pdst)
5044                         *unused |= BIT(INNER_DST_PORT);
5045
5046                 if (!tcp_ip4_spec->tos)
5047                         *unused |= BIT(INNER_IP_TOS);
5048
5049                 break;
5050         case IP_USER_FLOW:
5051                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5052                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5053                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5054
5055                 if (!usr_ip4_spec->ip4src)
5056                         *unused |= BIT(INNER_SRC_IP);
5057
5058                 if (!usr_ip4_spec->ip4dst)
5059                         *unused |= BIT(INNER_DST_IP);
5060
5061                 if (!usr_ip4_spec->tos)
5062                         *unused |= BIT(INNER_IP_TOS);
5063
5064                 if (!usr_ip4_spec->proto)
5065                         *unused |= BIT(INNER_IP_PROTO);
5066
5067                 if (usr_ip4_spec->l4_4_bytes)
5068                         return -EOPNOTSUPP;
5069
5070                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5071                         return -EOPNOTSUPP;
5072
5073                 break;
5074         case SCTP_V6_FLOW:
5075         case TCP_V6_FLOW:
5076         case UDP_V6_FLOW:
5077                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5078                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5079                         BIT(INNER_IP_TOS);
5080
5081                 /* check whether src/dst ip address used */
5082                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5083                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5084                         *unused |= BIT(INNER_SRC_IP);
5085
5086                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5087                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5088                         *unused |= BIT(INNER_DST_IP);
5089
5090                 if (!tcp_ip6_spec->psrc)
5091                         *unused |= BIT(INNER_SRC_PORT);
5092
5093                 if (!tcp_ip6_spec->pdst)
5094                         *unused |= BIT(INNER_DST_PORT);
5095
5096                 if (tcp_ip6_spec->tclass)
5097                         return -EOPNOTSUPP;
5098
5099                 break;
5100         case IPV6_USER_FLOW:
5101                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5102                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5103                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5104                         BIT(INNER_DST_PORT);
5105
5106                 /* check whether src/dst ip address used */
5107                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5108                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5109                         *unused |= BIT(INNER_SRC_IP);
5110
5111                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5112                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5113                         *unused |= BIT(INNER_DST_IP);
5114
5115                 if (!usr_ip6_spec->l4_proto)
5116                         *unused |= BIT(INNER_IP_PROTO);
5117
5118                 if (usr_ip6_spec->tclass)
5119                         return -EOPNOTSUPP;
5120
5121                 if (usr_ip6_spec->l4_4_bytes)
5122                         return -EOPNOTSUPP;
5123
5124                 break;
5125         case ETHER_FLOW:
5126                 ether_spec = &fs->h_u.ether_spec;
5127                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5128                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5129                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5130
5131                 if (is_zero_ether_addr(ether_spec->h_source))
5132                         *unused |= BIT(INNER_SRC_MAC);
5133
5134                 if (is_zero_ether_addr(ether_spec->h_dest))
5135                         *unused |= BIT(INNER_DST_MAC);
5136
5137                 if (!ether_spec->h_proto)
5138                         *unused |= BIT(INNER_ETH_TYPE);
5139
5140                 break;
5141         default:
5142                 return -EOPNOTSUPP;
5143         }
5144
5145         if ((fs->flow_type & FLOW_EXT)) {
5146                 if (fs->h_ext.vlan_etype)
5147                         return -EOPNOTSUPP;
5148                 if (!fs->h_ext.vlan_tci)
5149                         *unused |= BIT(INNER_VLAN_TAG_FST);
5150
5151                 if (fs->m_ext.vlan_tci) {
5152                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5153                                 return -EINVAL;
5154                 }
5155         } else {
5156                 *unused |= BIT(INNER_VLAN_TAG_FST);
5157         }
5158
5159         if (fs->flow_type & FLOW_MAC_EXT) {
5160                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5161                         return -EOPNOTSUPP;
5162
5163                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5164                         *unused |= BIT(INNER_DST_MAC);
5165                 else
5166                         *unused &= ~(BIT(INNER_DST_MAC));
5167         }
5168
5169         return 0;
5170 }
5171
5172 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5173 {
5174         struct hclge_fd_rule *rule = NULL;
5175         struct hlist_node *node2;
5176
5177         spin_lock_bh(&hdev->fd_rule_lock);
5178         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5179                 if (rule->location >= location)
5180                         break;
5181         }
5182
5183         spin_unlock_bh(&hdev->fd_rule_lock);
5184
5185         return  rule && rule->location == location;
5186 }
5187
5188 /* make sure being called after lock up with fd_rule_lock */
5189 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5190                                      struct hclge_fd_rule *new_rule,
5191                                      u16 location,
5192                                      bool is_add)
5193 {
5194         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5195         struct hlist_node *node2;
5196
5197         if (is_add && !new_rule)
5198                 return -EINVAL;
5199
5200         hlist_for_each_entry_safe(rule, node2,
5201                                   &hdev->fd_rule_list, rule_node) {
5202                 if (rule->location >= location)
5203                         break;
5204                 parent = rule;
5205         }
5206
5207         if (rule && rule->location == location) {
5208                 hlist_del(&rule->rule_node);
5209                 kfree(rule);
5210                 hdev->hclge_fd_rule_num--;
5211
5212                 if (!is_add) {
5213                         if (!hdev->hclge_fd_rule_num)
5214                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5215                         clear_bit(location, hdev->fd_bmap);
5216
5217                         return 0;
5218                 }
5219         } else if (!is_add) {
5220                 dev_err(&hdev->pdev->dev,
5221                         "delete fail, rule %d is inexistent\n",
5222                         location);
5223                 return -EINVAL;
5224         }
5225
5226         INIT_HLIST_NODE(&new_rule->rule_node);
5227
5228         if (parent)
5229                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5230         else
5231                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5232
5233         set_bit(location, hdev->fd_bmap);
5234         hdev->hclge_fd_rule_num++;
5235         hdev->fd_active_type = new_rule->rule_type;
5236
5237         return 0;
5238 }
5239
5240 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5241                               struct ethtool_rx_flow_spec *fs,
5242                               struct hclge_fd_rule *rule)
5243 {
5244         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5245
5246         switch (flow_type) {
5247         case SCTP_V4_FLOW:
5248         case TCP_V4_FLOW:
5249         case UDP_V4_FLOW:
5250                 rule->tuples.src_ip[IPV4_INDEX] =
5251                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5252                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5253                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5254
5255                 rule->tuples.dst_ip[IPV4_INDEX] =
5256                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5257                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5258                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5259
5260                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5261                 rule->tuples_mask.src_port =
5262                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5263
5264                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5265                 rule->tuples_mask.dst_port =
5266                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5267
5268                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5269                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5270
5271                 rule->tuples.ether_proto = ETH_P_IP;
5272                 rule->tuples_mask.ether_proto = 0xFFFF;
5273
5274                 break;
5275         case IP_USER_FLOW:
5276                 rule->tuples.src_ip[IPV4_INDEX] =
5277                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5278                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5279                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5280
5281                 rule->tuples.dst_ip[IPV4_INDEX] =
5282                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5283                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5284                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5285
5286                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5287                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5288
5289                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5290                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5291
5292                 rule->tuples.ether_proto = ETH_P_IP;
5293                 rule->tuples_mask.ether_proto = 0xFFFF;
5294
5295                 break;
5296         case SCTP_V6_FLOW:
5297         case TCP_V6_FLOW:
5298         case UDP_V6_FLOW:
5299                 be32_to_cpu_array(rule->tuples.src_ip,
5300                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5301                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5302                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5303
5304                 be32_to_cpu_array(rule->tuples.dst_ip,
5305                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5306                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5307                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5308
5309                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5310                 rule->tuples_mask.src_port =
5311                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5312
5313                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5314                 rule->tuples_mask.dst_port =
5315                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5316
5317                 rule->tuples.ether_proto = ETH_P_IPV6;
5318                 rule->tuples_mask.ether_proto = 0xFFFF;
5319
5320                 break;
5321         case IPV6_USER_FLOW:
5322                 be32_to_cpu_array(rule->tuples.src_ip,
5323                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5324                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5325                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5326
5327                 be32_to_cpu_array(rule->tuples.dst_ip,
5328                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5329                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5330                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5331
5332                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5333                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5334
5335                 rule->tuples.ether_proto = ETH_P_IPV6;
5336                 rule->tuples_mask.ether_proto = 0xFFFF;
5337
5338                 break;
5339         case ETHER_FLOW:
5340                 ether_addr_copy(rule->tuples.src_mac,
5341                                 fs->h_u.ether_spec.h_source);
5342                 ether_addr_copy(rule->tuples_mask.src_mac,
5343                                 fs->m_u.ether_spec.h_source);
5344
5345                 ether_addr_copy(rule->tuples.dst_mac,
5346                                 fs->h_u.ether_spec.h_dest);
5347                 ether_addr_copy(rule->tuples_mask.dst_mac,
5348                                 fs->m_u.ether_spec.h_dest);
5349
5350                 rule->tuples.ether_proto =
5351                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5352                 rule->tuples_mask.ether_proto =
5353                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5354
5355                 break;
5356         default:
5357                 return -EOPNOTSUPP;
5358         }
5359
5360         switch (flow_type) {
5361         case SCTP_V4_FLOW:
5362         case SCTP_V6_FLOW:
5363                 rule->tuples.ip_proto = IPPROTO_SCTP;
5364                 rule->tuples_mask.ip_proto = 0xFF;
5365                 break;
5366         case TCP_V4_FLOW:
5367         case TCP_V6_FLOW:
5368                 rule->tuples.ip_proto = IPPROTO_TCP;
5369                 rule->tuples_mask.ip_proto = 0xFF;
5370                 break;
5371         case UDP_V4_FLOW:
5372         case UDP_V6_FLOW:
5373                 rule->tuples.ip_proto = IPPROTO_UDP;
5374                 rule->tuples_mask.ip_proto = 0xFF;
5375                 break;
5376         default:
5377                 break;
5378         }
5379
5380         if ((fs->flow_type & FLOW_EXT)) {
5381                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5382                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5383         }
5384
5385         if (fs->flow_type & FLOW_MAC_EXT) {
5386                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5387                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5388         }
5389
5390         return 0;
5391 }
5392
5393 /* make sure being called after lock up with fd_rule_lock */
5394 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5395                                 struct hclge_fd_rule *rule)
5396 {
5397         int ret;
5398
5399         if (!rule) {
5400                 dev_err(&hdev->pdev->dev,
5401                         "The flow director rule is NULL\n");
5402                 return -EINVAL;
5403         }
5404
5405         /* it will never fail here, so needn't to check return value */
5406         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5407
5408         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5409         if (ret)
5410                 goto clear_rule;
5411
5412         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5413         if (ret)
5414                 goto clear_rule;
5415
5416         return 0;
5417
5418 clear_rule:
5419         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5420         return ret;
5421 }
5422
5423 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5424                               struct ethtool_rxnfc *cmd)
5425 {
5426         struct hclge_vport *vport = hclge_get_vport(handle);
5427         struct hclge_dev *hdev = vport->back;
5428         u16 dst_vport_id = 0, q_index = 0;
5429         struct ethtool_rx_flow_spec *fs;
5430         struct hclge_fd_rule *rule;
5431         u32 unused = 0;
5432         u8 action;
5433         int ret;
5434
5435         if (!hnae3_dev_fd_supported(hdev))
5436                 return -EOPNOTSUPP;
5437
5438         if (!hdev->fd_en) {
5439                 dev_warn(&hdev->pdev->dev,
5440                          "Please enable flow director first\n");
5441                 return -EOPNOTSUPP;
5442         }
5443
5444         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5445
5446         ret = hclge_fd_check_spec(hdev, fs, &unused);
5447         if (ret) {
5448                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5449                 return ret;
5450         }
5451
5452         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5453                 action = HCLGE_FD_ACTION_DROP_PACKET;
5454         } else {
5455                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5456                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5457                 u16 tqps;
5458
5459                 if (vf > hdev->num_req_vfs) {
5460                         dev_err(&hdev->pdev->dev,
5461                                 "Error: vf id (%d) > max vf num (%d)\n",
5462                                 vf, hdev->num_req_vfs);
5463                         return -EINVAL;
5464                 }
5465
5466                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5467                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5468
5469                 if (ring >= tqps) {
5470                         dev_err(&hdev->pdev->dev,
5471                                 "Error: queue id (%d) > max tqp num (%d)\n",
5472                                 ring, tqps - 1);
5473                         return -EINVAL;
5474                 }
5475
5476                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5477                 q_index = ring;
5478         }
5479
5480         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5481         if (!rule)
5482                 return -ENOMEM;
5483
5484         ret = hclge_fd_get_tuple(hdev, fs, rule);
5485         if (ret) {
5486                 kfree(rule);
5487                 return ret;
5488         }
5489
5490         rule->flow_type = fs->flow_type;
5491
5492         rule->location = fs->location;
5493         rule->unused_tuple = unused;
5494         rule->vf_id = dst_vport_id;
5495         rule->queue_id = q_index;
5496         rule->action = action;
5497         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5498
5499         /* to avoid rule conflict, when user configure rule by ethtool,
5500          * we need to clear all arfs rules
5501          */
5502         hclge_clear_arfs_rules(handle);
5503
5504         spin_lock_bh(&hdev->fd_rule_lock);
5505         ret = hclge_fd_config_rule(hdev, rule);
5506
5507         spin_unlock_bh(&hdev->fd_rule_lock);
5508
5509         return ret;
5510 }
5511
5512 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5513                               struct ethtool_rxnfc *cmd)
5514 {
5515         struct hclge_vport *vport = hclge_get_vport(handle);
5516         struct hclge_dev *hdev = vport->back;
5517         struct ethtool_rx_flow_spec *fs;
5518         int ret;
5519
5520         if (!hnae3_dev_fd_supported(hdev))
5521                 return -EOPNOTSUPP;
5522
5523         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5524
5525         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5526                 return -EINVAL;
5527
5528         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5529                 dev_err(&hdev->pdev->dev,
5530                         "Delete fail, rule %d is inexistent\n", fs->location);
5531                 return -ENOENT;
5532         }
5533
5534         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5535                                    NULL, false);
5536         if (ret)
5537                 return ret;
5538
5539         spin_lock_bh(&hdev->fd_rule_lock);
5540         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5541
5542         spin_unlock_bh(&hdev->fd_rule_lock);
5543
5544         return ret;
5545 }
5546
5547 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5548                                      bool clear_list)
5549 {
5550         struct hclge_vport *vport = hclge_get_vport(handle);
5551         struct hclge_dev *hdev = vport->back;
5552         struct hclge_fd_rule *rule;
5553         struct hlist_node *node;
5554         u16 location;
5555
5556         if (!hnae3_dev_fd_supported(hdev))
5557                 return;
5558
5559         spin_lock_bh(&hdev->fd_rule_lock);
5560         for_each_set_bit(location, hdev->fd_bmap,
5561                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5562                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5563                                      NULL, false);
5564
5565         if (clear_list) {
5566                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5567                                           rule_node) {
5568                         hlist_del(&rule->rule_node);
5569                         kfree(rule);
5570                 }
5571                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5572                 hdev->hclge_fd_rule_num = 0;
5573                 bitmap_zero(hdev->fd_bmap,
5574                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5575         }
5576
5577         spin_unlock_bh(&hdev->fd_rule_lock);
5578 }
5579
5580 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5581 {
5582         struct hclge_vport *vport = hclge_get_vport(handle);
5583         struct hclge_dev *hdev = vport->back;
5584         struct hclge_fd_rule *rule;
5585         struct hlist_node *node;
5586         int ret;
5587
5588         /* Return ok here, because reset error handling will check this
5589          * return value. If error is returned here, the reset process will
5590          * fail.
5591          */
5592         if (!hnae3_dev_fd_supported(hdev))
5593                 return 0;
5594
5595         /* if fd is disabled, should not restore it when reset */
5596         if (!hdev->fd_en)
5597                 return 0;
5598
5599         spin_lock_bh(&hdev->fd_rule_lock);
5600         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5601                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5602                 if (!ret)
5603                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5604
5605                 if (ret) {
5606                         dev_warn(&hdev->pdev->dev,
5607                                  "Restore rule %d failed, remove it\n",
5608                                  rule->location);
5609                         clear_bit(rule->location, hdev->fd_bmap);
5610                         hlist_del(&rule->rule_node);
5611                         kfree(rule);
5612                         hdev->hclge_fd_rule_num--;
5613                 }
5614         }
5615
5616         if (hdev->hclge_fd_rule_num)
5617                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5618
5619         spin_unlock_bh(&hdev->fd_rule_lock);
5620
5621         return 0;
5622 }
5623
5624 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5625                                  struct ethtool_rxnfc *cmd)
5626 {
5627         struct hclge_vport *vport = hclge_get_vport(handle);
5628         struct hclge_dev *hdev = vport->back;
5629
5630         if (!hnae3_dev_fd_supported(hdev))
5631                 return -EOPNOTSUPP;
5632
5633         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5634         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5635
5636         return 0;
5637 }
5638
5639 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5640                                   struct ethtool_rxnfc *cmd)
5641 {
5642         struct hclge_vport *vport = hclge_get_vport(handle);
5643         struct hclge_fd_rule *rule = NULL;
5644         struct hclge_dev *hdev = vport->back;
5645         struct ethtool_rx_flow_spec *fs;
5646         struct hlist_node *node2;
5647
5648         if (!hnae3_dev_fd_supported(hdev))
5649                 return -EOPNOTSUPP;
5650
5651         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5652
5653         spin_lock_bh(&hdev->fd_rule_lock);
5654
5655         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5656                 if (rule->location >= fs->location)
5657                         break;
5658         }
5659
5660         if (!rule || fs->location != rule->location) {
5661                 spin_unlock_bh(&hdev->fd_rule_lock);
5662
5663                 return -ENOENT;
5664         }
5665
5666         fs->flow_type = rule->flow_type;
5667         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5668         case SCTP_V4_FLOW:
5669         case TCP_V4_FLOW:
5670         case UDP_V4_FLOW:
5671                 fs->h_u.tcp_ip4_spec.ip4src =
5672                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5673                 fs->m_u.tcp_ip4_spec.ip4src =
5674                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5675                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5676
5677                 fs->h_u.tcp_ip4_spec.ip4dst =
5678                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5679                 fs->m_u.tcp_ip4_spec.ip4dst =
5680                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5681                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5682
5683                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5684                 fs->m_u.tcp_ip4_spec.psrc =
5685                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5686                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5687
5688                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5689                 fs->m_u.tcp_ip4_spec.pdst =
5690                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5691                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5692
5693                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5694                 fs->m_u.tcp_ip4_spec.tos =
5695                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5696                                 0 : rule->tuples_mask.ip_tos;
5697
5698                 break;
5699         case IP_USER_FLOW:
5700                 fs->h_u.usr_ip4_spec.ip4src =
5701                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5702                 fs->m_u.tcp_ip4_spec.ip4src =
5703                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5704                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5705
5706                 fs->h_u.usr_ip4_spec.ip4dst =
5707                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5708                 fs->m_u.usr_ip4_spec.ip4dst =
5709                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5710                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5711
5712                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5713                 fs->m_u.usr_ip4_spec.tos =
5714                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5715                                 0 : rule->tuples_mask.ip_tos;
5716
5717                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5718                 fs->m_u.usr_ip4_spec.proto =
5719                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5720                                 0 : rule->tuples_mask.ip_proto;
5721
5722                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5723
5724                 break;
5725         case SCTP_V6_FLOW:
5726         case TCP_V6_FLOW:
5727         case UDP_V6_FLOW:
5728                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5729                                   rule->tuples.src_ip, IPV6_SIZE);
5730                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5731                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5732                                sizeof(int) * IPV6_SIZE);
5733                 else
5734                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5735                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5736
5737                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5738                                   rule->tuples.dst_ip, IPV6_SIZE);
5739                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5740                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5741                                sizeof(int) * IPV6_SIZE);
5742                 else
5743                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5744                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5745
5746                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5747                 fs->m_u.tcp_ip6_spec.psrc =
5748                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5749                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5750
5751                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5752                 fs->m_u.tcp_ip6_spec.pdst =
5753                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5754                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5755
5756                 break;
5757         case IPV6_USER_FLOW:
5758                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5759                                   rule->tuples.src_ip, IPV6_SIZE);
5760                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5761                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5762                                sizeof(int) * IPV6_SIZE);
5763                 else
5764                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5765                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5766
5767                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5768                                   rule->tuples.dst_ip, IPV6_SIZE);
5769                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5770                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5771                                sizeof(int) * IPV6_SIZE);
5772                 else
5773                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5774                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5775
5776                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5777                 fs->m_u.usr_ip6_spec.l4_proto =
5778                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5779                                 0 : rule->tuples_mask.ip_proto;
5780
5781                 break;
5782         case ETHER_FLOW:
5783                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5784                                 rule->tuples.src_mac);
5785                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5786                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5787                 else
5788                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5789                                         rule->tuples_mask.src_mac);
5790
5791                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5792                                 rule->tuples.dst_mac);
5793                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5794                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5795                 else
5796                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5797                                         rule->tuples_mask.dst_mac);
5798
5799                 fs->h_u.ether_spec.h_proto =
5800                                 cpu_to_be16(rule->tuples.ether_proto);
5801                 fs->m_u.ether_spec.h_proto =
5802                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5803                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5804
5805                 break;
5806         default:
5807                 spin_unlock_bh(&hdev->fd_rule_lock);
5808                 return -EOPNOTSUPP;
5809         }
5810
5811         if (fs->flow_type & FLOW_EXT) {
5812                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5813                 fs->m_ext.vlan_tci =
5814                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5815                                 cpu_to_be16(VLAN_VID_MASK) :
5816                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5817         }
5818
5819         if (fs->flow_type & FLOW_MAC_EXT) {
5820                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5821                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5822                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5823                 else
5824                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5825                                         rule->tuples_mask.dst_mac);
5826         }
5827
5828         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5829                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5830         } else {
5831                 u64 vf_id;
5832
5833                 fs->ring_cookie = rule->queue_id;
5834                 vf_id = rule->vf_id;
5835                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5836                 fs->ring_cookie |= vf_id;
5837         }
5838
5839         spin_unlock_bh(&hdev->fd_rule_lock);
5840
5841         return 0;
5842 }
5843
5844 static int hclge_get_all_rules(struct hnae3_handle *handle,
5845                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5846 {
5847         struct hclge_vport *vport = hclge_get_vport(handle);
5848         struct hclge_dev *hdev = vport->back;
5849         struct hclge_fd_rule *rule;
5850         struct hlist_node *node2;
5851         int cnt = 0;
5852
5853         if (!hnae3_dev_fd_supported(hdev))
5854                 return -EOPNOTSUPP;
5855
5856         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5857
5858         spin_lock_bh(&hdev->fd_rule_lock);
5859         hlist_for_each_entry_safe(rule, node2,
5860                                   &hdev->fd_rule_list, rule_node) {
5861                 if (cnt == cmd->rule_cnt) {
5862                         spin_unlock_bh(&hdev->fd_rule_lock);
5863                         return -EMSGSIZE;
5864                 }
5865
5866                 rule_locs[cnt] = rule->location;
5867                 cnt++;
5868         }
5869
5870         spin_unlock_bh(&hdev->fd_rule_lock);
5871
5872         cmd->rule_cnt = cnt;
5873
5874         return 0;
5875 }
5876
5877 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5878                                      struct hclge_fd_rule_tuples *tuples)
5879 {
5880         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5881         tuples->ip_proto = fkeys->basic.ip_proto;
5882         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5883
5884         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5885                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5886                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5887         } else {
5888                 memcpy(tuples->src_ip,
5889                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5890                        sizeof(tuples->src_ip));
5891                 memcpy(tuples->dst_ip,
5892                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5893                        sizeof(tuples->dst_ip));
5894         }
5895 }
5896
5897 /* traverse all rules, check whether an existed rule has the same tuples */
5898 static struct hclge_fd_rule *
5899 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5900                           const struct hclge_fd_rule_tuples *tuples)
5901 {
5902         struct hclge_fd_rule *rule = NULL;
5903         struct hlist_node *node;
5904
5905         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5906                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5907                         return rule;
5908         }
5909
5910         return NULL;
5911 }
5912
5913 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5914                                      struct hclge_fd_rule *rule)
5915 {
5916         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5917                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5918                              BIT(INNER_SRC_PORT);
5919         rule->action = 0;
5920         rule->vf_id = 0;
5921         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5922         if (tuples->ether_proto == ETH_P_IP) {
5923                 if (tuples->ip_proto == IPPROTO_TCP)
5924                         rule->flow_type = TCP_V4_FLOW;
5925                 else
5926                         rule->flow_type = UDP_V4_FLOW;
5927         } else {
5928                 if (tuples->ip_proto == IPPROTO_TCP)
5929                         rule->flow_type = TCP_V6_FLOW;
5930                 else
5931                         rule->flow_type = UDP_V6_FLOW;
5932         }
5933         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5934         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5935 }
5936
5937 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5938                                       u16 flow_id, struct flow_keys *fkeys)
5939 {
5940         struct hclge_vport *vport = hclge_get_vport(handle);
5941         struct hclge_fd_rule_tuples new_tuples;
5942         struct hclge_dev *hdev = vport->back;
5943         struct hclge_fd_rule *rule;
5944         u16 tmp_queue_id;
5945         u16 bit_id;
5946         int ret;
5947
5948         if (!hnae3_dev_fd_supported(hdev))
5949                 return -EOPNOTSUPP;
5950
5951         memset(&new_tuples, 0, sizeof(new_tuples));
5952         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5953
5954         spin_lock_bh(&hdev->fd_rule_lock);
5955
5956         /* when there is already fd rule existed add by user,
5957          * arfs should not work
5958          */
5959         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5960                 spin_unlock_bh(&hdev->fd_rule_lock);
5961
5962                 return -EOPNOTSUPP;
5963         }
5964
5965         /* check is there flow director filter existed for this flow,
5966          * if not, create a new filter for it;
5967          * if filter exist with different queue id, modify the filter;
5968          * if filter exist with same queue id, do nothing
5969          */
5970         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5971         if (!rule) {
5972                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5973                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5974                         spin_unlock_bh(&hdev->fd_rule_lock);
5975
5976                         return -ENOSPC;
5977                 }
5978
5979                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
5980                 if (!rule) {
5981                         spin_unlock_bh(&hdev->fd_rule_lock);
5982
5983                         return -ENOMEM;
5984                 }
5985
5986                 set_bit(bit_id, hdev->fd_bmap);
5987                 rule->location = bit_id;
5988                 rule->flow_id = flow_id;
5989                 rule->queue_id = queue_id;
5990                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5991                 ret = hclge_fd_config_rule(hdev, rule);
5992
5993                 spin_unlock_bh(&hdev->fd_rule_lock);
5994
5995                 if (ret)
5996                         return ret;
5997
5998                 return rule->location;
5999         }
6000
6001         spin_unlock_bh(&hdev->fd_rule_lock);
6002
6003         if (rule->queue_id == queue_id)
6004                 return rule->location;
6005
6006         tmp_queue_id = rule->queue_id;
6007         rule->queue_id = queue_id;
6008         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6009         if (ret) {
6010                 rule->queue_id = tmp_queue_id;
6011                 return ret;
6012         }
6013
6014         return rule->location;
6015 }
6016
6017 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6018 {
6019 #ifdef CONFIG_RFS_ACCEL
6020         struct hnae3_handle *handle = &hdev->vport[0].nic;
6021         struct hclge_fd_rule *rule;
6022         struct hlist_node *node;
6023         HLIST_HEAD(del_list);
6024
6025         spin_lock_bh(&hdev->fd_rule_lock);
6026         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6027                 spin_unlock_bh(&hdev->fd_rule_lock);
6028                 return;
6029         }
6030         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6031                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6032                                         rule->flow_id, rule->location)) {
6033                         hlist_del_init(&rule->rule_node);
6034                         hlist_add_head(&rule->rule_node, &del_list);
6035                         hdev->hclge_fd_rule_num--;
6036                         clear_bit(rule->location, hdev->fd_bmap);
6037                 }
6038         }
6039         spin_unlock_bh(&hdev->fd_rule_lock);
6040
6041         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6042                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6043                                      rule->location, NULL, false);
6044                 kfree(rule);
6045         }
6046 #endif
6047 }
6048
6049 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6050 {
6051 #ifdef CONFIG_RFS_ACCEL
6052         struct hclge_vport *vport = hclge_get_vport(handle);
6053         struct hclge_dev *hdev = vport->back;
6054
6055         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6056                 hclge_del_all_fd_entries(handle, true);
6057 #endif
6058 }
6059
6060 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6061 {
6062         struct hclge_vport *vport = hclge_get_vport(handle);
6063         struct hclge_dev *hdev = vport->back;
6064
6065         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6066                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6067 }
6068
6069 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6070 {
6071         struct hclge_vport *vport = hclge_get_vport(handle);
6072         struct hclge_dev *hdev = vport->back;
6073
6074         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6075 }
6076
6077 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6078 {
6079         struct hclge_vport *vport = hclge_get_vport(handle);
6080         struct hclge_dev *hdev = vport->back;
6081
6082         return hdev->rst_stats.hw_reset_done_cnt;
6083 }
6084
6085 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6086 {
6087         struct hclge_vport *vport = hclge_get_vport(handle);
6088         struct hclge_dev *hdev = vport->back;
6089         bool clear;
6090
6091         hdev->fd_en = enable;
6092         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
6093         if (!enable)
6094                 hclge_del_all_fd_entries(handle, clear);
6095         else
6096                 hclge_restore_fd_entries(handle);
6097 }
6098
6099 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6100 {
6101         struct hclge_desc desc;
6102         struct hclge_config_mac_mode_cmd *req =
6103                 (struct hclge_config_mac_mode_cmd *)desc.data;
6104         u32 loop_en = 0;
6105         int ret;
6106
6107         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6108
6109         if (enable) {
6110                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6111                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6112                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6113                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6114                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6115                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6116                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6117                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6118                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6119                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6120         }
6121
6122         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6123
6124         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6125         if (ret)
6126                 dev_err(&hdev->pdev->dev,
6127                         "mac enable fail, ret =%d.\n", ret);
6128 }
6129
6130 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6131 {
6132         struct hclge_config_mac_mode_cmd *req;
6133         struct hclge_desc desc;
6134         u32 loop_en;
6135         int ret;
6136
6137         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6138         /* 1 Read out the MAC mode config at first */
6139         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6140         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6141         if (ret) {
6142                 dev_err(&hdev->pdev->dev,
6143                         "mac loopback get fail, ret =%d.\n", ret);
6144                 return ret;
6145         }
6146
6147         /* 2 Then setup the loopback flag */
6148         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6149         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6150         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6151         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6152
6153         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6154
6155         /* 3 Config mac work mode with loopback flag
6156          * and its original configure parameters
6157          */
6158         hclge_cmd_reuse_desc(&desc, false);
6159         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6160         if (ret)
6161                 dev_err(&hdev->pdev->dev,
6162                         "mac loopback set fail, ret =%d.\n", ret);
6163         return ret;
6164 }
6165
6166 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6167                                      enum hnae3_loop loop_mode)
6168 {
6169 #define HCLGE_SERDES_RETRY_MS   10
6170 #define HCLGE_SERDES_RETRY_NUM  100
6171
6172 #define HCLGE_MAC_LINK_STATUS_MS   10
6173 #define HCLGE_MAC_LINK_STATUS_NUM  100
6174 #define HCLGE_MAC_LINK_STATUS_DOWN 0
6175 #define HCLGE_MAC_LINK_STATUS_UP   1
6176
6177         struct hclge_serdes_lb_cmd *req;
6178         struct hclge_desc desc;
6179         int mac_link_ret = 0;
6180         int ret, i = 0;
6181         u8 loop_mode_b;
6182
6183         req = (struct hclge_serdes_lb_cmd *)desc.data;
6184         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6185
6186         switch (loop_mode) {
6187         case HNAE3_LOOP_SERIAL_SERDES:
6188                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6189                 break;
6190         case HNAE3_LOOP_PARALLEL_SERDES:
6191                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6192                 break;
6193         default:
6194                 dev_err(&hdev->pdev->dev,
6195                         "unsupported serdes loopback mode %d\n", loop_mode);
6196                 return -ENOTSUPP;
6197         }
6198
6199         if (en) {
6200                 req->enable = loop_mode_b;
6201                 req->mask = loop_mode_b;
6202                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6203         } else {
6204                 req->mask = loop_mode_b;
6205                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6206         }
6207
6208         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6209         if (ret) {
6210                 dev_err(&hdev->pdev->dev,
6211                         "serdes loopback set fail, ret = %d\n", ret);
6212                 return ret;
6213         }
6214
6215         do {
6216                 msleep(HCLGE_SERDES_RETRY_MS);
6217                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6218                                            true);
6219                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6220                 if (ret) {
6221                         dev_err(&hdev->pdev->dev,
6222                                 "serdes loopback get, ret = %d\n", ret);
6223                         return ret;
6224                 }
6225         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6226                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6227
6228         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6229                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6230                 return -EBUSY;
6231         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6232                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6233                 return -EIO;
6234         }
6235
6236         hclge_cfg_mac_mode(hdev, en);
6237
6238         i = 0;
6239         do {
6240                 /* serdes Internal loopback, independent of the network cable.*/
6241                 msleep(HCLGE_MAC_LINK_STATUS_MS);
6242                 ret = hclge_get_mac_link_status(hdev);
6243                 if (ret == mac_link_ret)
6244                         return 0;
6245         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6246
6247         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6248
6249         return -EBUSY;
6250 }
6251
6252 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6253                             int stream_id, bool enable)
6254 {
6255         struct hclge_desc desc;
6256         struct hclge_cfg_com_tqp_queue_cmd *req =
6257                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6258         int ret;
6259
6260         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6261         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6262         req->stream_id = cpu_to_le16(stream_id);
6263         if (enable)
6264                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6265
6266         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6267         if (ret)
6268                 dev_err(&hdev->pdev->dev,
6269                         "Tqp enable fail, status =%d.\n", ret);
6270         return ret;
6271 }
6272
6273 static int hclge_set_loopback(struct hnae3_handle *handle,
6274                               enum hnae3_loop loop_mode, bool en)
6275 {
6276         struct hclge_vport *vport = hclge_get_vport(handle);
6277         struct hnae3_knic_private_info *kinfo;
6278         struct hclge_dev *hdev = vport->back;
6279         int i, ret;
6280
6281         switch (loop_mode) {
6282         case HNAE3_LOOP_APP:
6283                 ret = hclge_set_app_loopback(hdev, en);
6284                 break;
6285         case HNAE3_LOOP_SERIAL_SERDES:
6286         case HNAE3_LOOP_PARALLEL_SERDES:
6287                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6288                 break;
6289         default:
6290                 ret = -ENOTSUPP;
6291                 dev_err(&hdev->pdev->dev,
6292                         "loop_mode %d is not supported\n", loop_mode);
6293                 break;
6294         }
6295
6296         if (ret)
6297                 return ret;
6298
6299         kinfo = &vport->nic.kinfo;
6300         for (i = 0; i < kinfo->num_tqps; i++) {
6301                 ret = hclge_tqp_enable(hdev, i, 0, en);
6302                 if (ret)
6303                         return ret;
6304         }
6305
6306         return 0;
6307 }
6308
6309 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6310 {
6311         struct hclge_vport *vport = hclge_get_vport(handle);
6312         struct hnae3_knic_private_info *kinfo;
6313         struct hnae3_queue *queue;
6314         struct hclge_tqp *tqp;
6315         int i;
6316
6317         kinfo = &vport->nic.kinfo;
6318         for (i = 0; i < kinfo->num_tqps; i++) {
6319                 queue = handle->kinfo.tqp[i];
6320                 tqp = container_of(queue, struct hclge_tqp, q);
6321                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6322         }
6323 }
6324
6325 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6326 {
6327         struct hclge_vport *vport = hclge_get_vport(handle);
6328         struct hclge_dev *hdev = vport->back;
6329
6330         if (enable) {
6331                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6332         } else {
6333                 /* Set the DOWN flag here to disable the service to be
6334                  * scheduled again
6335                  */
6336                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6337                 cancel_delayed_work_sync(&hdev->service_task);
6338                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6339         }
6340 }
6341
6342 static int hclge_ae_start(struct hnae3_handle *handle)
6343 {
6344         struct hclge_vport *vport = hclge_get_vport(handle);
6345         struct hclge_dev *hdev = vport->back;
6346
6347         /* mac enable */
6348         hclge_cfg_mac_mode(hdev, true);
6349         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6350         hdev->hw.mac.link = 0;
6351
6352         /* reset tqp stats */
6353         hclge_reset_tqp_stats(handle);
6354
6355         hclge_mac_start_phy(hdev);
6356
6357         return 0;
6358 }
6359
6360 static void hclge_ae_stop(struct hnae3_handle *handle)
6361 {
6362         struct hclge_vport *vport = hclge_get_vport(handle);
6363         struct hclge_dev *hdev = vport->back;
6364         int i;
6365
6366         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6367
6368         hclge_clear_arfs_rules(handle);
6369
6370         /* If it is not PF reset, the firmware will disable the MAC,
6371          * so it only need to stop phy here.
6372          */
6373         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6374             hdev->reset_type != HNAE3_FUNC_RESET) {
6375                 hclge_mac_stop_phy(hdev);
6376                 hclge_update_link_status(hdev);
6377                 return;
6378         }
6379
6380         for (i = 0; i < handle->kinfo.num_tqps; i++)
6381                 hclge_reset_tqp(handle, i);
6382
6383         /* Mac disable */
6384         hclge_cfg_mac_mode(hdev, false);
6385
6386         hclge_mac_stop_phy(hdev);
6387
6388         /* reset tqp stats */
6389         hclge_reset_tqp_stats(handle);
6390         hclge_update_link_status(hdev);
6391 }
6392
6393 int hclge_vport_start(struct hclge_vport *vport)
6394 {
6395         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6396         vport->last_active_jiffies = jiffies;
6397         return 0;
6398 }
6399
6400 void hclge_vport_stop(struct hclge_vport *vport)
6401 {
6402         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6403 }
6404
6405 static int hclge_client_start(struct hnae3_handle *handle)
6406 {
6407         struct hclge_vport *vport = hclge_get_vport(handle);
6408
6409         return hclge_vport_start(vport);
6410 }
6411
6412 static void hclge_client_stop(struct hnae3_handle *handle)
6413 {
6414         struct hclge_vport *vport = hclge_get_vport(handle);
6415
6416         hclge_vport_stop(vport);
6417 }
6418
6419 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6420                                          u16 cmdq_resp, u8  resp_code,
6421                                          enum hclge_mac_vlan_tbl_opcode op)
6422 {
6423         struct hclge_dev *hdev = vport->back;
6424
6425         if (cmdq_resp) {
6426                 dev_err(&hdev->pdev->dev,
6427                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6428                         cmdq_resp);
6429                 return -EIO;
6430         }
6431
6432         if (op == HCLGE_MAC_VLAN_ADD) {
6433                 if ((!resp_code) || (resp_code == 1)) {
6434                         return 0;
6435                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6436                         dev_err(&hdev->pdev->dev,
6437                                 "add mac addr failed for uc_overflow.\n");
6438                         return -ENOSPC;
6439                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6440                         dev_err(&hdev->pdev->dev,
6441                                 "add mac addr failed for mc_overflow.\n");
6442                         return -ENOSPC;
6443                 }
6444
6445                 dev_err(&hdev->pdev->dev,
6446                         "add mac addr failed for undefined, code=%u.\n",
6447                         resp_code);
6448                 return -EIO;
6449         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6450                 if (!resp_code) {
6451                         return 0;
6452                 } else if (resp_code == 1) {
6453                         dev_dbg(&hdev->pdev->dev,
6454                                 "remove mac addr failed for miss.\n");
6455                         return -ENOENT;
6456                 }
6457
6458                 dev_err(&hdev->pdev->dev,
6459                         "remove mac addr failed for undefined, code=%u.\n",
6460                         resp_code);
6461                 return -EIO;
6462         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6463                 if (!resp_code) {
6464                         return 0;
6465                 } else if (resp_code == 1) {
6466                         dev_dbg(&hdev->pdev->dev,
6467                                 "lookup mac addr failed for miss.\n");
6468                         return -ENOENT;
6469                 }
6470
6471                 dev_err(&hdev->pdev->dev,
6472                         "lookup mac addr failed for undefined, code=%u.\n",
6473                         resp_code);
6474                 return -EIO;
6475         }
6476
6477         dev_err(&hdev->pdev->dev,
6478                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6479
6480         return -EINVAL;
6481 }
6482
6483 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6484 {
6485 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6486
6487         unsigned int word_num;
6488         unsigned int bit_num;
6489
6490         if (vfid > 255 || vfid < 0)
6491                 return -EIO;
6492
6493         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6494                 word_num = vfid / 32;
6495                 bit_num  = vfid % 32;
6496                 if (clr)
6497                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6498                 else
6499                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6500         } else {
6501                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6502                 bit_num  = vfid % 32;
6503                 if (clr)
6504                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6505                 else
6506                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6507         }
6508
6509         return 0;
6510 }
6511
6512 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6513 {
6514 #define HCLGE_DESC_NUMBER 3
6515 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6516         int i, j;
6517
6518         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6519                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6520                         if (desc[i].data[j])
6521                                 return false;
6522
6523         return true;
6524 }
6525
6526 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6527                                    const u8 *addr, bool is_mc)
6528 {
6529         const unsigned char *mac_addr = addr;
6530         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6531                        (mac_addr[0]) | (mac_addr[1] << 8);
6532         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6533
6534         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6535         if (is_mc) {
6536                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6537                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6538         }
6539
6540         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6541         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6542 }
6543
6544 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6545                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6546 {
6547         struct hclge_dev *hdev = vport->back;
6548         struct hclge_desc desc;
6549         u8 resp_code;
6550         u16 retval;
6551         int ret;
6552
6553         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6554
6555         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6556
6557         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6558         if (ret) {
6559                 dev_err(&hdev->pdev->dev,
6560                         "del mac addr failed for cmd_send, ret =%d.\n",
6561                         ret);
6562                 return ret;
6563         }
6564         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6565         retval = le16_to_cpu(desc.retval);
6566
6567         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6568                                              HCLGE_MAC_VLAN_REMOVE);
6569 }
6570
6571 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6572                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6573                                      struct hclge_desc *desc,
6574                                      bool is_mc)
6575 {
6576         struct hclge_dev *hdev = vport->back;
6577         u8 resp_code;
6578         u16 retval;
6579         int ret;
6580
6581         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6582         if (is_mc) {
6583                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6584                 memcpy(desc[0].data,
6585                        req,
6586                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6587                 hclge_cmd_setup_basic_desc(&desc[1],
6588                                            HCLGE_OPC_MAC_VLAN_ADD,
6589                                            true);
6590                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6591                 hclge_cmd_setup_basic_desc(&desc[2],
6592                                            HCLGE_OPC_MAC_VLAN_ADD,
6593                                            true);
6594                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6595         } else {
6596                 memcpy(desc[0].data,
6597                        req,
6598                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6599                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6600         }
6601         if (ret) {
6602                 dev_err(&hdev->pdev->dev,
6603                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6604                         ret);
6605                 return ret;
6606         }
6607         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6608         retval = le16_to_cpu(desc[0].retval);
6609
6610         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6611                                              HCLGE_MAC_VLAN_LKUP);
6612 }
6613
6614 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6615                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6616                                   struct hclge_desc *mc_desc)
6617 {
6618         struct hclge_dev *hdev = vport->back;
6619         int cfg_status;
6620         u8 resp_code;
6621         u16 retval;
6622         int ret;
6623
6624         if (!mc_desc) {
6625                 struct hclge_desc desc;
6626
6627                 hclge_cmd_setup_basic_desc(&desc,
6628                                            HCLGE_OPC_MAC_VLAN_ADD,
6629                                            false);
6630                 memcpy(desc.data, req,
6631                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6632                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6633                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6634                 retval = le16_to_cpu(desc.retval);
6635
6636                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6637                                                            resp_code,
6638                                                            HCLGE_MAC_VLAN_ADD);
6639         } else {
6640                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6641                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6642                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6643                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6644                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6645                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6646                 memcpy(mc_desc[0].data, req,
6647                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6648                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6649                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6650                 retval = le16_to_cpu(mc_desc[0].retval);
6651
6652                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6653                                                            resp_code,
6654                                                            HCLGE_MAC_VLAN_ADD);
6655         }
6656
6657         if (ret) {
6658                 dev_err(&hdev->pdev->dev,
6659                         "add mac addr failed for cmd_send, ret =%d.\n",
6660                         ret);
6661                 return ret;
6662         }
6663
6664         return cfg_status;
6665 }
6666
6667 static int hclge_init_umv_space(struct hclge_dev *hdev)
6668 {
6669         u16 allocated_size = 0;
6670         int ret;
6671
6672         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6673                                   true);
6674         if (ret)
6675                 return ret;
6676
6677         if (allocated_size < hdev->wanted_umv_size)
6678                 dev_warn(&hdev->pdev->dev,
6679                          "Alloc umv space failed, want %d, get %d\n",
6680                          hdev->wanted_umv_size, allocated_size);
6681
6682         mutex_init(&hdev->umv_mutex);
6683         hdev->max_umv_size = allocated_size;
6684         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6685          * preserve some unicast mac vlan table entries shared by pf
6686          * and its vfs.
6687          */
6688         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6689         hdev->share_umv_size = hdev->priv_umv_size +
6690                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6691
6692         return 0;
6693 }
6694
6695 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6696 {
6697         int ret;
6698
6699         if (hdev->max_umv_size > 0) {
6700                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6701                                           false);
6702                 if (ret)
6703                         return ret;
6704                 hdev->max_umv_size = 0;
6705         }
6706         mutex_destroy(&hdev->umv_mutex);
6707
6708         return 0;
6709 }
6710
6711 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6712                                u16 *allocated_size, bool is_alloc)
6713 {
6714         struct hclge_umv_spc_alc_cmd *req;
6715         struct hclge_desc desc;
6716         int ret;
6717
6718         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6719         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6720         if (!is_alloc)
6721                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6722
6723         req->space_size = cpu_to_le32(space_size);
6724
6725         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6726         if (ret) {
6727                 dev_err(&hdev->pdev->dev,
6728                         "%s umv space failed for cmd_send, ret =%d\n",
6729                         is_alloc ? "allocate" : "free", ret);
6730                 return ret;
6731         }
6732
6733         if (is_alloc && allocated_size)
6734                 *allocated_size = le32_to_cpu(desc.data[1]);
6735
6736         return 0;
6737 }
6738
6739 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6740 {
6741         struct hclge_vport *vport;
6742         int i;
6743
6744         for (i = 0; i < hdev->num_alloc_vport; i++) {
6745                 vport = &hdev->vport[i];
6746                 vport->used_umv_num = 0;
6747         }
6748
6749         mutex_lock(&hdev->umv_mutex);
6750         hdev->share_umv_size = hdev->priv_umv_size +
6751                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6752         mutex_unlock(&hdev->umv_mutex);
6753 }
6754
6755 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6756 {
6757         struct hclge_dev *hdev = vport->back;
6758         bool is_full;
6759
6760         mutex_lock(&hdev->umv_mutex);
6761         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6762                    hdev->share_umv_size == 0);
6763         mutex_unlock(&hdev->umv_mutex);
6764
6765         return is_full;
6766 }
6767
6768 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6769 {
6770         struct hclge_dev *hdev = vport->back;
6771
6772         mutex_lock(&hdev->umv_mutex);
6773         if (is_free) {
6774                 if (vport->used_umv_num > hdev->priv_umv_size)
6775                         hdev->share_umv_size++;
6776
6777                 if (vport->used_umv_num > 0)
6778                         vport->used_umv_num--;
6779         } else {
6780                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6781                     hdev->share_umv_size > 0)
6782                         hdev->share_umv_size--;
6783                 vport->used_umv_num++;
6784         }
6785         mutex_unlock(&hdev->umv_mutex);
6786 }
6787
6788 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6789                              const unsigned char *addr)
6790 {
6791         struct hclge_vport *vport = hclge_get_vport(handle);
6792
6793         return hclge_add_uc_addr_common(vport, addr);
6794 }
6795
6796 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6797                              const unsigned char *addr)
6798 {
6799         struct hclge_dev *hdev = vport->back;
6800         struct hclge_mac_vlan_tbl_entry_cmd req;
6801         struct hclge_desc desc;
6802         u16 egress_port = 0;
6803         int ret;
6804
6805         /* mac addr check */
6806         if (is_zero_ether_addr(addr) ||
6807             is_broadcast_ether_addr(addr) ||
6808             is_multicast_ether_addr(addr)) {
6809                 dev_err(&hdev->pdev->dev,
6810                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6811                          addr, is_zero_ether_addr(addr),
6812                          is_broadcast_ether_addr(addr),
6813                          is_multicast_ether_addr(addr));
6814                 return -EINVAL;
6815         }
6816
6817         memset(&req, 0, sizeof(req));
6818
6819         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6820                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6821
6822         req.egress_port = cpu_to_le16(egress_port);
6823
6824         hclge_prepare_mac_addr(&req, addr, false);
6825
6826         /* Lookup the mac address in the mac_vlan table, and add
6827          * it if the entry is inexistent. Repeated unicast entry
6828          * is not allowed in the mac vlan table.
6829          */
6830         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6831         if (ret == -ENOENT) {
6832                 if (!hclge_is_umv_space_full(vport)) {
6833                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6834                         if (!ret)
6835                                 hclge_update_umv_space(vport, false);
6836                         return ret;
6837                 }
6838
6839                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6840                         hdev->priv_umv_size);
6841
6842                 return -ENOSPC;
6843         }
6844
6845         /* check if we just hit the duplicate */
6846         if (!ret) {
6847                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6848                          vport->vport_id, addr);
6849                 return 0;
6850         }
6851
6852         dev_err(&hdev->pdev->dev,
6853                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6854                 addr);
6855
6856         return ret;
6857 }
6858
6859 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6860                             const unsigned char *addr)
6861 {
6862         struct hclge_vport *vport = hclge_get_vport(handle);
6863
6864         return hclge_rm_uc_addr_common(vport, addr);
6865 }
6866
6867 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6868                             const unsigned char *addr)
6869 {
6870         struct hclge_dev *hdev = vport->back;
6871         struct hclge_mac_vlan_tbl_entry_cmd req;
6872         int ret;
6873
6874         /* mac addr check */
6875         if (is_zero_ether_addr(addr) ||
6876             is_broadcast_ether_addr(addr) ||
6877             is_multicast_ether_addr(addr)) {
6878                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6879                         addr);
6880                 return -EINVAL;
6881         }
6882
6883         memset(&req, 0, sizeof(req));
6884         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6885         hclge_prepare_mac_addr(&req, addr, false);
6886         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6887         if (!ret)
6888                 hclge_update_umv_space(vport, true);
6889
6890         return ret;
6891 }
6892
6893 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6894                              const unsigned char *addr)
6895 {
6896         struct hclge_vport *vport = hclge_get_vport(handle);
6897
6898         return hclge_add_mc_addr_common(vport, addr);
6899 }
6900
6901 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6902                              const unsigned char *addr)
6903 {
6904         struct hclge_dev *hdev = vport->back;
6905         struct hclge_mac_vlan_tbl_entry_cmd req;
6906         struct hclge_desc desc[3];
6907         int status;
6908
6909         /* mac addr check */
6910         if (!is_multicast_ether_addr(addr)) {
6911                 dev_err(&hdev->pdev->dev,
6912                         "Add mc mac err! invalid mac:%pM.\n",
6913                          addr);
6914                 return -EINVAL;
6915         }
6916         memset(&req, 0, sizeof(req));
6917         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6918         hclge_prepare_mac_addr(&req, addr, true);
6919         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6920         if (status) {
6921                 /* This mac addr do not exist, add new entry for it */
6922                 memset(desc[0].data, 0, sizeof(desc[0].data));
6923                 memset(desc[1].data, 0, sizeof(desc[0].data));
6924                 memset(desc[2].data, 0, sizeof(desc[0].data));
6925         }
6926         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6927         if (status)
6928                 return status;
6929         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6930
6931         if (status == -ENOSPC)
6932                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6933
6934         return status;
6935 }
6936
6937 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6938                             const unsigned char *addr)
6939 {
6940         struct hclge_vport *vport = hclge_get_vport(handle);
6941
6942         return hclge_rm_mc_addr_common(vport, addr);
6943 }
6944
6945 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6946                             const unsigned char *addr)
6947 {
6948         struct hclge_dev *hdev = vport->back;
6949         struct hclge_mac_vlan_tbl_entry_cmd req;
6950         enum hclge_cmd_status status;
6951         struct hclge_desc desc[3];
6952
6953         /* mac addr check */
6954         if (!is_multicast_ether_addr(addr)) {
6955                 dev_dbg(&hdev->pdev->dev,
6956                         "Remove mc mac err! invalid mac:%pM.\n",
6957                          addr);
6958                 return -EINVAL;
6959         }
6960
6961         memset(&req, 0, sizeof(req));
6962         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6963         hclge_prepare_mac_addr(&req, addr, true);
6964         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6965         if (!status) {
6966                 /* This mac addr exist, remove this handle's VFID for it */
6967                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6968                 if (status)
6969                         return status;
6970
6971                 if (hclge_is_all_function_id_zero(desc))
6972                         /* All the vfid is zero, so need to delete this entry */
6973                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6974                 else
6975                         /* Not all the vfid is zero, update the vfid */
6976                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6977
6978         } else {
6979                 /* Maybe this mac address is in mta table, but it cannot be
6980                  * deleted here because an entry of mta represents an address
6981                  * range rather than a specific address. the delete action to
6982                  * all entries will take effect in update_mta_status called by
6983                  * hns3_nic_set_rx_mode.
6984                  */
6985                 status = 0;
6986         }
6987
6988         return status;
6989 }
6990
6991 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6992                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6993 {
6994         struct hclge_vport_mac_addr_cfg *mac_cfg;
6995         struct list_head *list;
6996
6997         if (!vport->vport_id)
6998                 return;
6999
7000         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7001         if (!mac_cfg)
7002                 return;
7003
7004         mac_cfg->hd_tbl_status = true;
7005         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7006
7007         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7008                &vport->uc_mac_list : &vport->mc_mac_list;
7009
7010         list_add_tail(&mac_cfg->node, list);
7011 }
7012
7013 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7014                               bool is_write_tbl,
7015                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7016 {
7017         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7018         struct list_head *list;
7019         bool uc_flag, mc_flag;
7020
7021         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7022                &vport->uc_mac_list : &vport->mc_mac_list;
7023
7024         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7025         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7026
7027         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7028                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7029                         if (uc_flag && mac_cfg->hd_tbl_status)
7030                                 hclge_rm_uc_addr_common(vport, mac_addr);
7031
7032                         if (mc_flag && mac_cfg->hd_tbl_status)
7033                                 hclge_rm_mc_addr_common(vport, mac_addr);
7034
7035                         list_del(&mac_cfg->node);
7036                         kfree(mac_cfg);
7037                         break;
7038                 }
7039         }
7040 }
7041
7042 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7043                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7044 {
7045         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7046         struct list_head *list;
7047
7048         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7049                &vport->uc_mac_list : &vport->mc_mac_list;
7050
7051         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7052                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7053                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7054
7055                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7056                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7057
7058                 mac_cfg->hd_tbl_status = false;
7059                 if (is_del_list) {
7060                         list_del(&mac_cfg->node);
7061                         kfree(mac_cfg);
7062                 }
7063         }
7064 }
7065
7066 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7067 {
7068         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7069         struct hclge_vport *vport;
7070         int i;
7071
7072         mutex_lock(&hdev->vport_cfg_mutex);
7073         for (i = 0; i < hdev->num_alloc_vport; i++) {
7074                 vport = &hdev->vport[i];
7075                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7076                         list_del(&mac->node);
7077                         kfree(mac);
7078                 }
7079
7080                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7081                         list_del(&mac->node);
7082                         kfree(mac);
7083                 }
7084         }
7085         mutex_unlock(&hdev->vport_cfg_mutex);
7086 }
7087
7088 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7089                                               u16 cmdq_resp, u8 resp_code)
7090 {
7091 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7092 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7093 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7094 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7095
7096         int return_status;
7097
7098         if (cmdq_resp) {
7099                 dev_err(&hdev->pdev->dev,
7100                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7101                         cmdq_resp);
7102                 return -EIO;
7103         }
7104
7105         switch (resp_code) {
7106         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7107         case HCLGE_ETHERTYPE_ALREADY_ADD:
7108                 return_status = 0;
7109                 break;
7110         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7111                 dev_err(&hdev->pdev->dev,
7112                         "add mac ethertype failed for manager table overflow.\n");
7113                 return_status = -EIO;
7114                 break;
7115         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7116                 dev_err(&hdev->pdev->dev,
7117                         "add mac ethertype failed for key conflict.\n");
7118                 return_status = -EIO;
7119                 break;
7120         default:
7121                 dev_err(&hdev->pdev->dev,
7122                         "add mac ethertype failed for undefined, code=%d.\n",
7123                         resp_code);
7124                 return_status = -EIO;
7125         }
7126
7127         return return_status;
7128 }
7129
7130 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7131                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7132 {
7133         struct hclge_desc desc;
7134         u8 resp_code;
7135         u16 retval;
7136         int ret;
7137
7138         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7139         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7140
7141         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7142         if (ret) {
7143                 dev_err(&hdev->pdev->dev,
7144                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7145                         ret);
7146                 return ret;
7147         }
7148
7149         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7150         retval = le16_to_cpu(desc.retval);
7151
7152         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7153 }
7154
7155 static int init_mgr_tbl(struct hclge_dev *hdev)
7156 {
7157         int ret;
7158         int i;
7159
7160         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7161                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7162                 if (ret) {
7163                         dev_err(&hdev->pdev->dev,
7164                                 "add mac ethertype failed, ret =%d.\n",
7165                                 ret);
7166                         return ret;
7167                 }
7168         }
7169
7170         return 0;
7171 }
7172
7173 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7174 {
7175         struct hclge_vport *vport = hclge_get_vport(handle);
7176         struct hclge_dev *hdev = vport->back;
7177
7178         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7179 }
7180
7181 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7182                               bool is_first)
7183 {
7184         const unsigned char *new_addr = (const unsigned char *)p;
7185         struct hclge_vport *vport = hclge_get_vport(handle);
7186         struct hclge_dev *hdev = vport->back;
7187         int ret;
7188
7189         /* mac addr check */
7190         if (is_zero_ether_addr(new_addr) ||
7191             is_broadcast_ether_addr(new_addr) ||
7192             is_multicast_ether_addr(new_addr)) {
7193                 dev_err(&hdev->pdev->dev,
7194                         "Change uc mac err! invalid mac:%p.\n",
7195                          new_addr);
7196                 return -EINVAL;
7197         }
7198
7199         if ((!is_first || is_kdump_kernel()) &&
7200             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7201                 dev_warn(&hdev->pdev->dev,
7202                          "remove old uc mac address fail.\n");
7203
7204         ret = hclge_add_uc_addr(handle, new_addr);
7205         if (ret) {
7206                 dev_err(&hdev->pdev->dev,
7207                         "add uc mac address fail, ret =%d.\n",
7208                         ret);
7209
7210                 if (!is_first &&
7211                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7212                         dev_err(&hdev->pdev->dev,
7213                                 "restore uc mac address fail.\n");
7214
7215                 return -EIO;
7216         }
7217
7218         ret = hclge_pause_addr_cfg(hdev, new_addr);
7219         if (ret) {
7220                 dev_err(&hdev->pdev->dev,
7221                         "configure mac pause address fail, ret =%d.\n",
7222                         ret);
7223                 return -EIO;
7224         }
7225
7226         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7227
7228         return 0;
7229 }
7230
7231 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7232                           int cmd)
7233 {
7234         struct hclge_vport *vport = hclge_get_vport(handle);
7235         struct hclge_dev *hdev = vport->back;
7236
7237         if (!hdev->hw.mac.phydev)
7238                 return -EOPNOTSUPP;
7239
7240         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7241 }
7242
7243 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7244                                       u8 fe_type, bool filter_en, u8 vf_id)
7245 {
7246         struct hclge_vlan_filter_ctrl_cmd *req;
7247         struct hclge_desc desc;
7248         int ret;
7249
7250         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7251
7252         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7253         req->vlan_type = vlan_type;
7254         req->vlan_fe = filter_en ? fe_type : 0;
7255         req->vf_id = vf_id;
7256
7257         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7258         if (ret)
7259                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7260                         ret);
7261
7262         return ret;
7263 }
7264
7265 #define HCLGE_FILTER_TYPE_VF            0
7266 #define HCLGE_FILTER_TYPE_PORT          1
7267 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7268 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7269 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7270 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7271 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7272 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7273                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7274 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7275                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7276
7277 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7278 {
7279         struct hclge_vport *vport = hclge_get_vport(handle);
7280         struct hclge_dev *hdev = vport->back;
7281
7282         if (hdev->pdev->revision >= 0x21) {
7283                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7284                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7285                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7286                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7287         } else {
7288                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7289                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7290                                            0);
7291         }
7292         if (enable)
7293                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7294         else
7295                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7296 }
7297
7298 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7299                                     bool is_kill, u16 vlan, u8 qos,
7300                                     __be16 proto)
7301 {
7302 #define HCLGE_MAX_VF_BYTES  16
7303         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7304         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7305         struct hclge_desc desc[2];
7306         u8 vf_byte_val;
7307         u8 vf_byte_off;
7308         int ret;
7309
7310         /* if vf vlan table is full, firmware will close vf vlan filter, it
7311          * is unable and unnecessary to add new vlan id to vf vlan filter
7312          */
7313         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7314                 return 0;
7315
7316         hclge_cmd_setup_basic_desc(&desc[0],
7317                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7318         hclge_cmd_setup_basic_desc(&desc[1],
7319                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7320
7321         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7322
7323         vf_byte_off = vfid / 8;
7324         vf_byte_val = 1 << (vfid % 8);
7325
7326         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7327         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7328
7329         req0->vlan_id  = cpu_to_le16(vlan);
7330         req0->vlan_cfg = is_kill;
7331
7332         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7333                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7334         else
7335                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7336
7337         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7338         if (ret) {
7339                 dev_err(&hdev->pdev->dev,
7340                         "Send vf vlan command fail, ret =%d.\n",
7341                         ret);
7342                 return ret;
7343         }
7344
7345         if (!is_kill) {
7346 #define HCLGE_VF_VLAN_NO_ENTRY  2
7347                 if (!req0->resp_code || req0->resp_code == 1)
7348                         return 0;
7349
7350                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7351                         set_bit(vfid, hdev->vf_vlan_full);
7352                         dev_warn(&hdev->pdev->dev,
7353                                  "vf vlan table is full, vf vlan filter is disabled\n");
7354                         return 0;
7355                 }
7356
7357                 dev_err(&hdev->pdev->dev,
7358                         "Add vf vlan filter fail, ret =%d.\n",
7359                         req0->resp_code);
7360         } else {
7361 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7362                 if (!req0->resp_code)
7363                         return 0;
7364
7365                 /* vf vlan filter is disabled when vf vlan table is full,
7366                  * then new vlan id will not be added into vf vlan table.
7367                  * Just return 0 without warning, avoid massive verbose
7368                  * print logs when unload.
7369                  */
7370                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7371                         return 0;
7372
7373                 dev_err(&hdev->pdev->dev,
7374                         "Kill vf vlan filter fail, ret =%d.\n",
7375                         req0->resp_code);
7376         }
7377
7378         return -EIO;
7379 }
7380
7381 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7382                                       u16 vlan_id, bool is_kill)
7383 {
7384         struct hclge_vlan_filter_pf_cfg_cmd *req;
7385         struct hclge_desc desc;
7386         u8 vlan_offset_byte_val;
7387         u8 vlan_offset_byte;
7388         u8 vlan_offset_160;
7389         int ret;
7390
7391         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7392
7393         vlan_offset_160 = vlan_id / 160;
7394         vlan_offset_byte = (vlan_id % 160) / 8;
7395         vlan_offset_byte_val = 1 << (vlan_id % 8);
7396
7397         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7398         req->vlan_offset = vlan_offset_160;
7399         req->vlan_cfg = is_kill;
7400         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7401
7402         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7403         if (ret)
7404                 dev_err(&hdev->pdev->dev,
7405                         "port vlan command, send fail, ret =%d.\n", ret);
7406         return ret;
7407 }
7408
7409 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7410                                     u16 vport_id, u16 vlan_id, u8 qos,
7411                                     bool is_kill)
7412 {
7413         u16 vport_idx, vport_num = 0;
7414         int ret;
7415
7416         if (is_kill && !vlan_id)
7417                 return 0;
7418
7419         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7420                                        0, proto);
7421         if (ret) {
7422                 dev_err(&hdev->pdev->dev,
7423                         "Set %d vport vlan filter config fail, ret =%d.\n",
7424                         vport_id, ret);
7425                 return ret;
7426         }
7427
7428         /* vlan 0 may be added twice when 8021q module is enabled */
7429         if (!is_kill && !vlan_id &&
7430             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7431                 return 0;
7432
7433         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7434                 dev_err(&hdev->pdev->dev,
7435                         "Add port vlan failed, vport %d is already in vlan %d\n",
7436                         vport_id, vlan_id);
7437                 return -EINVAL;
7438         }
7439
7440         if (is_kill &&
7441             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7442                 dev_err(&hdev->pdev->dev,
7443                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7444                         vport_id, vlan_id);
7445                 return -EINVAL;
7446         }
7447
7448         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7449                 vport_num++;
7450
7451         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7452                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7453                                                  is_kill);
7454
7455         return ret;
7456 }
7457
7458 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7459 {
7460         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7461         struct hclge_vport_vtag_tx_cfg_cmd *req;
7462         struct hclge_dev *hdev = vport->back;
7463         struct hclge_desc desc;
7464         int status;
7465
7466         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7467
7468         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7469         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7470         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7471         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7472                       vcfg->accept_tag1 ? 1 : 0);
7473         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7474                       vcfg->accept_untag1 ? 1 : 0);
7475         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7476                       vcfg->accept_tag2 ? 1 : 0);
7477         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7478                       vcfg->accept_untag2 ? 1 : 0);
7479         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7480                       vcfg->insert_tag1_en ? 1 : 0);
7481         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7482                       vcfg->insert_tag2_en ? 1 : 0);
7483         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7484
7485         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7486         req->vf_bitmap[req->vf_offset] =
7487                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7488
7489         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7490         if (status)
7491                 dev_err(&hdev->pdev->dev,
7492                         "Send port txvlan cfg command fail, ret =%d\n",
7493                         status);
7494
7495         return status;
7496 }
7497
7498 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7499 {
7500         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7501         struct hclge_vport_vtag_rx_cfg_cmd *req;
7502         struct hclge_dev *hdev = vport->back;
7503         struct hclge_desc desc;
7504         int status;
7505
7506         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7507
7508         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7509         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7510                       vcfg->strip_tag1_en ? 1 : 0);
7511         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7512                       vcfg->strip_tag2_en ? 1 : 0);
7513         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7514                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7515         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7516                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7517
7518         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7519         req->vf_bitmap[req->vf_offset] =
7520                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7521
7522         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7523         if (status)
7524                 dev_err(&hdev->pdev->dev,
7525                         "Send port rxvlan cfg command fail, ret =%d\n",
7526                         status);
7527
7528         return status;
7529 }
7530
7531 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7532                                   u16 port_base_vlan_state,
7533                                   u16 vlan_tag)
7534 {
7535         int ret;
7536
7537         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7538                 vport->txvlan_cfg.accept_tag1 = true;
7539                 vport->txvlan_cfg.insert_tag1_en = false;
7540                 vport->txvlan_cfg.default_tag1 = 0;
7541         } else {
7542                 vport->txvlan_cfg.accept_tag1 = false;
7543                 vport->txvlan_cfg.insert_tag1_en = true;
7544                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7545         }
7546
7547         vport->txvlan_cfg.accept_untag1 = true;
7548
7549         /* accept_tag2 and accept_untag2 are not supported on
7550          * pdev revision(0x20), new revision support them,
7551          * this two fields can not be configured by user.
7552          */
7553         vport->txvlan_cfg.accept_tag2 = true;
7554         vport->txvlan_cfg.accept_untag2 = true;
7555         vport->txvlan_cfg.insert_tag2_en = false;
7556         vport->txvlan_cfg.default_tag2 = 0;
7557
7558         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7559                 vport->rxvlan_cfg.strip_tag1_en = false;
7560                 vport->rxvlan_cfg.strip_tag2_en =
7561                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7562         } else {
7563                 vport->rxvlan_cfg.strip_tag1_en =
7564                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7565                 vport->rxvlan_cfg.strip_tag2_en = true;
7566         }
7567         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7568         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7569
7570         ret = hclge_set_vlan_tx_offload_cfg(vport);
7571         if (ret)
7572                 return ret;
7573
7574         return hclge_set_vlan_rx_offload_cfg(vport);
7575 }
7576
7577 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7578 {
7579         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7580         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7581         struct hclge_desc desc;
7582         int status;
7583
7584         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7585         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7586         rx_req->ot_fst_vlan_type =
7587                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7588         rx_req->ot_sec_vlan_type =
7589                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7590         rx_req->in_fst_vlan_type =
7591                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7592         rx_req->in_sec_vlan_type =
7593                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7594
7595         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7596         if (status) {
7597                 dev_err(&hdev->pdev->dev,
7598                         "Send rxvlan protocol type command fail, ret =%d\n",
7599                         status);
7600                 return status;
7601         }
7602
7603         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7604
7605         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7606         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7607         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7608
7609         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7610         if (status)
7611                 dev_err(&hdev->pdev->dev,
7612                         "Send txvlan protocol type command fail, ret =%d\n",
7613                         status);
7614
7615         return status;
7616 }
7617
7618 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7619 {
7620 #define HCLGE_DEF_VLAN_TYPE             0x8100
7621
7622         struct hnae3_handle *handle = &hdev->vport[0].nic;
7623         struct hclge_vport *vport;
7624         int ret;
7625         int i;
7626
7627         if (hdev->pdev->revision >= 0x21) {
7628                 /* for revision 0x21, vf vlan filter is per function */
7629                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7630                         vport = &hdev->vport[i];
7631                         ret = hclge_set_vlan_filter_ctrl(hdev,
7632                                                          HCLGE_FILTER_TYPE_VF,
7633                                                          HCLGE_FILTER_FE_EGRESS,
7634                                                          true,
7635                                                          vport->vport_id);
7636                         if (ret)
7637                                 return ret;
7638                 }
7639
7640                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7641                                                  HCLGE_FILTER_FE_INGRESS, true,
7642                                                  0);
7643                 if (ret)
7644                         return ret;
7645         } else {
7646                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7647                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7648                                                  true, 0);
7649                 if (ret)
7650                         return ret;
7651         }
7652
7653         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7654
7655         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7656         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7657         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7658         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7659         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7660         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7661
7662         ret = hclge_set_vlan_protocol_type(hdev);
7663         if (ret)
7664                 return ret;
7665
7666         for (i = 0; i < hdev->num_alloc_vport; i++) {
7667                 u16 vlan_tag;
7668
7669                 vport = &hdev->vport[i];
7670                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7671
7672                 ret = hclge_vlan_offload_cfg(vport,
7673                                              vport->port_base_vlan_cfg.state,
7674                                              vlan_tag);
7675                 if (ret)
7676                         return ret;
7677         }
7678
7679         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7680 }
7681
7682 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7683                                        bool writen_to_tbl)
7684 {
7685         struct hclge_vport_vlan_cfg *vlan;
7686
7687         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7688         if (!vlan)
7689                 return;
7690
7691         vlan->hd_tbl_status = writen_to_tbl;
7692         vlan->vlan_id = vlan_id;
7693
7694         list_add_tail(&vlan->node, &vport->vlan_list);
7695 }
7696
7697 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7698 {
7699         struct hclge_vport_vlan_cfg *vlan, *tmp;
7700         struct hclge_dev *hdev = vport->back;
7701         int ret;
7702
7703         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7704                 if (!vlan->hd_tbl_status) {
7705                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7706                                                        vport->vport_id,
7707                                                        vlan->vlan_id, 0, false);
7708                         if (ret) {
7709                                 dev_err(&hdev->pdev->dev,
7710                                         "restore vport vlan list failed, ret=%d\n",
7711                                         ret);
7712                                 return ret;
7713                         }
7714                 }
7715                 vlan->hd_tbl_status = true;
7716         }
7717
7718         return 0;
7719 }
7720
7721 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7722                                       bool is_write_tbl)
7723 {
7724         struct hclge_vport_vlan_cfg *vlan, *tmp;
7725         struct hclge_dev *hdev = vport->back;
7726
7727         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7728                 if (vlan->vlan_id == vlan_id) {
7729                         if (is_write_tbl && vlan->hd_tbl_status)
7730                                 hclge_set_vlan_filter_hw(hdev,
7731                                                          htons(ETH_P_8021Q),
7732                                                          vport->vport_id,
7733                                                          vlan_id, 0,
7734                                                          true);
7735
7736                         list_del(&vlan->node);
7737                         kfree(vlan);
7738                         break;
7739                 }
7740         }
7741 }
7742
7743 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7744 {
7745         struct hclge_vport_vlan_cfg *vlan, *tmp;
7746         struct hclge_dev *hdev = vport->back;
7747
7748         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7749                 if (vlan->hd_tbl_status)
7750                         hclge_set_vlan_filter_hw(hdev,
7751                                                  htons(ETH_P_8021Q),
7752                                                  vport->vport_id,
7753                                                  vlan->vlan_id, 0,
7754                                                  true);
7755
7756                 vlan->hd_tbl_status = false;
7757                 if (is_del_list) {
7758                         list_del(&vlan->node);
7759                         kfree(vlan);
7760                 }
7761         }
7762 }
7763
7764 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7765 {
7766         struct hclge_vport_vlan_cfg *vlan, *tmp;
7767         struct hclge_vport *vport;
7768         int i;
7769
7770         mutex_lock(&hdev->vport_cfg_mutex);
7771         for (i = 0; i < hdev->num_alloc_vport; i++) {
7772                 vport = &hdev->vport[i];
7773                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7774                         list_del(&vlan->node);
7775                         kfree(vlan);
7776                 }
7777         }
7778         mutex_unlock(&hdev->vport_cfg_mutex);
7779 }
7780
7781 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7782 {
7783         struct hclge_vport *vport = hclge_get_vport(handle);
7784         struct hclge_vport_vlan_cfg *vlan, *tmp;
7785         struct hclge_dev *hdev = vport->back;
7786         u16 vlan_proto, qos;
7787         u16 state, vlan_id;
7788         int i;
7789
7790         mutex_lock(&hdev->vport_cfg_mutex);
7791         for (i = 0; i < hdev->num_alloc_vport; i++) {
7792                 vport = &hdev->vport[i];
7793                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7794                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7795                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7796                 state = vport->port_base_vlan_cfg.state;
7797
7798                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7799                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7800                                                  vport->vport_id, vlan_id, qos,
7801                                                  false);
7802                         continue;
7803                 }
7804
7805                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7806                         if (vlan->hd_tbl_status)
7807                                 hclge_set_vlan_filter_hw(hdev,
7808                                                          htons(ETH_P_8021Q),
7809                                                          vport->vport_id,
7810                                                          vlan->vlan_id, 0,
7811                                                          false);
7812                 }
7813         }
7814
7815         mutex_unlock(&hdev->vport_cfg_mutex);
7816 }
7817
7818 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7819 {
7820         struct hclge_vport *vport = hclge_get_vport(handle);
7821
7822         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7823                 vport->rxvlan_cfg.strip_tag1_en = false;
7824                 vport->rxvlan_cfg.strip_tag2_en = enable;
7825         } else {
7826                 vport->rxvlan_cfg.strip_tag1_en = enable;
7827                 vport->rxvlan_cfg.strip_tag2_en = true;
7828         }
7829         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7830         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7831         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7832
7833         return hclge_set_vlan_rx_offload_cfg(vport);
7834 }
7835
7836 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7837                                             u16 port_base_vlan_state,
7838                                             struct hclge_vlan_info *new_info,
7839                                             struct hclge_vlan_info *old_info)
7840 {
7841         struct hclge_dev *hdev = vport->back;
7842         int ret;
7843
7844         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7845                 hclge_rm_vport_all_vlan_table(vport, false);
7846                 return hclge_set_vlan_filter_hw(hdev,
7847                                                  htons(new_info->vlan_proto),
7848                                                  vport->vport_id,
7849                                                  new_info->vlan_tag,
7850                                                  new_info->qos, false);
7851         }
7852
7853         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7854                                        vport->vport_id, old_info->vlan_tag,
7855                                        old_info->qos, true);
7856         if (ret)
7857                 return ret;
7858
7859         return hclge_add_vport_all_vlan_table(vport);
7860 }
7861
7862 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7863                                     struct hclge_vlan_info *vlan_info)
7864 {
7865         struct hnae3_handle *nic = &vport->nic;
7866         struct hclge_vlan_info *old_vlan_info;
7867         struct hclge_dev *hdev = vport->back;
7868         int ret;
7869
7870         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7871
7872         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7873         if (ret)
7874                 return ret;
7875
7876         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7877                 /* add new VLAN tag */
7878                 ret = hclge_set_vlan_filter_hw(hdev,
7879                                                htons(vlan_info->vlan_proto),
7880                                                vport->vport_id,
7881                                                vlan_info->vlan_tag,
7882                                                vlan_info->qos, false);
7883                 if (ret)
7884                         return ret;
7885
7886                 /* remove old VLAN tag */
7887                 ret = hclge_set_vlan_filter_hw(hdev,
7888                                                htons(old_vlan_info->vlan_proto),
7889                                                vport->vport_id,
7890                                                old_vlan_info->vlan_tag,
7891                                                old_vlan_info->qos, true);
7892                 if (ret)
7893                         return ret;
7894
7895                 goto update;
7896         }
7897
7898         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7899                                                old_vlan_info);
7900         if (ret)
7901                 return ret;
7902
7903         /* update state only when disable/enable port based VLAN */
7904         vport->port_base_vlan_cfg.state = state;
7905         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7906                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7907         else
7908                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7909
7910 update:
7911         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7912         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7913         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7914
7915         return 0;
7916 }
7917
7918 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7919                                           enum hnae3_port_base_vlan_state state,
7920                                           u16 vlan)
7921 {
7922         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7923                 if (!vlan)
7924                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7925                 else
7926                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7927         } else {
7928                 if (!vlan)
7929                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7930                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7931                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7932                 else
7933                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7934         }
7935 }
7936
7937 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7938                                     u16 vlan, u8 qos, __be16 proto)
7939 {
7940         struct hclge_vport *vport = hclge_get_vport(handle);
7941         struct hclge_dev *hdev = vport->back;
7942         struct hclge_vlan_info vlan_info;
7943         u16 state;
7944         int ret;
7945
7946         if (hdev->pdev->revision == 0x20)
7947                 return -EOPNOTSUPP;
7948
7949         /* qos is a 3 bits value, so can not be bigger than 7 */
7950         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7951                 return -EINVAL;
7952         if (proto != htons(ETH_P_8021Q))
7953                 return -EPROTONOSUPPORT;
7954
7955         vport = &hdev->vport[vfid];
7956         state = hclge_get_port_base_vlan_state(vport,
7957                                                vport->port_base_vlan_cfg.state,
7958                                                vlan);
7959         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7960                 return 0;
7961
7962         vlan_info.vlan_tag = vlan;
7963         vlan_info.qos = qos;
7964         vlan_info.vlan_proto = ntohs(proto);
7965
7966         /* update port based VLAN for PF */
7967         if (!vfid) {
7968                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7969                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7970                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7971
7972                 return ret;
7973         }
7974
7975         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7976                 return hclge_update_port_base_vlan_cfg(vport, state,
7977                                                        &vlan_info);
7978         } else {
7979                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7980                                                         (u8)vfid, state,
7981                                                         vlan, qos,
7982                                                         ntohs(proto));
7983                 return ret;
7984         }
7985 }
7986
7987 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7988                           u16 vlan_id, bool is_kill)
7989 {
7990         struct hclge_vport *vport = hclge_get_vport(handle);
7991         struct hclge_dev *hdev = vport->back;
7992         bool writen_to_tbl = false;
7993         int ret = 0;
7994
7995         /* When device is resetting, firmware is unable to handle
7996          * mailbox. Just record the vlan id, and remove it after
7997          * reset finished.
7998          */
7999         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8000                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8001                 return -EBUSY;
8002         }
8003
8004         /* when port base vlan enabled, we use port base vlan as the vlan
8005          * filter entry. In this case, we don't update vlan filter table
8006          * when user add new vlan or remove exist vlan, just update the vport
8007          * vlan list. The vlan id in vlan list will be writen in vlan filter
8008          * table until port base vlan disabled
8009          */
8010         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8011                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8012                                                vlan_id, 0, is_kill);
8013                 writen_to_tbl = true;
8014         }
8015
8016         if (!ret) {
8017                 if (is_kill)
8018                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8019                 else
8020                         hclge_add_vport_vlan_table(vport, vlan_id,
8021                                                    writen_to_tbl);
8022         } else if (is_kill) {
8023                 /* when remove hw vlan filter failed, record the vlan id,
8024                  * and try to remove it from hw later, to be consistence
8025                  * with stack
8026                  */
8027                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8028         }
8029         return ret;
8030 }
8031
8032 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8033 {
8034 #define HCLGE_MAX_SYNC_COUNT    60
8035
8036         int i, ret, sync_cnt = 0;
8037         u16 vlan_id;
8038
8039         /* start from vport 1 for PF is always alive */
8040         for (i = 0; i < hdev->num_alloc_vport; i++) {
8041                 struct hclge_vport *vport = &hdev->vport[i];
8042
8043                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8044                                          VLAN_N_VID);
8045                 while (vlan_id != VLAN_N_VID) {
8046                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8047                                                        vport->vport_id, vlan_id,
8048                                                        0, true);
8049                         if (ret && ret != -EINVAL)
8050                                 return;
8051
8052                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8053                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8054
8055                         sync_cnt++;
8056                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8057                                 return;
8058
8059                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8060                                                  VLAN_N_VID);
8061                 }
8062         }
8063 }
8064
8065 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8066 {
8067         struct hclge_config_max_frm_size_cmd *req;
8068         struct hclge_desc desc;
8069
8070         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8071
8072         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8073         req->max_frm_size = cpu_to_le16(new_mps);
8074         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8075
8076         return hclge_cmd_send(&hdev->hw, &desc, 1);
8077 }
8078
8079 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8080 {
8081         struct hclge_vport *vport = hclge_get_vport(handle);
8082
8083         return hclge_set_vport_mtu(vport, new_mtu);
8084 }
8085
8086 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8087 {
8088         struct hclge_dev *hdev = vport->back;
8089         int i, max_frm_size, ret;
8090
8091         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8092         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8093             max_frm_size > HCLGE_MAC_MAX_FRAME)
8094                 return -EINVAL;
8095
8096         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8097         mutex_lock(&hdev->vport_lock);
8098         /* VF's mps must fit within hdev->mps */
8099         if (vport->vport_id && max_frm_size > hdev->mps) {
8100                 mutex_unlock(&hdev->vport_lock);
8101                 return -EINVAL;
8102         } else if (vport->vport_id) {
8103                 vport->mps = max_frm_size;
8104                 mutex_unlock(&hdev->vport_lock);
8105                 return 0;
8106         }
8107
8108         /* PF's mps must be greater then VF's mps */
8109         for (i = 1; i < hdev->num_alloc_vport; i++)
8110                 if (max_frm_size < hdev->vport[i].mps) {
8111                         mutex_unlock(&hdev->vport_lock);
8112                         return -EINVAL;
8113                 }
8114
8115         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8116
8117         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8118         if (ret) {
8119                 dev_err(&hdev->pdev->dev,
8120                         "Change mtu fail, ret =%d\n", ret);
8121                 goto out;
8122         }
8123
8124         hdev->mps = max_frm_size;
8125         vport->mps = max_frm_size;
8126
8127         ret = hclge_buffer_alloc(hdev);
8128         if (ret)
8129                 dev_err(&hdev->pdev->dev,
8130                         "Allocate buffer fail, ret =%d\n", ret);
8131
8132 out:
8133         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8134         mutex_unlock(&hdev->vport_lock);
8135         return ret;
8136 }
8137
8138 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8139                                     bool enable)
8140 {
8141         struct hclge_reset_tqp_queue_cmd *req;
8142         struct hclge_desc desc;
8143         int ret;
8144
8145         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8146
8147         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8148         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8149         if (enable)
8150                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8151
8152         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8153         if (ret) {
8154                 dev_err(&hdev->pdev->dev,
8155                         "Send tqp reset cmd error, status =%d\n", ret);
8156                 return ret;
8157         }
8158
8159         return 0;
8160 }
8161
8162 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8163 {
8164         struct hclge_reset_tqp_queue_cmd *req;
8165         struct hclge_desc desc;
8166         int ret;
8167
8168         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8169
8170         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8171         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8172
8173         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8174         if (ret) {
8175                 dev_err(&hdev->pdev->dev,
8176                         "Get reset status error, status =%d\n", ret);
8177                 return ret;
8178         }
8179
8180         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8181 }
8182
8183 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8184 {
8185         struct hnae3_queue *queue;
8186         struct hclge_tqp *tqp;
8187
8188         queue = handle->kinfo.tqp[queue_id];
8189         tqp = container_of(queue, struct hclge_tqp, q);
8190
8191         return tqp->index;
8192 }
8193
8194 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8195 {
8196         struct hclge_vport *vport = hclge_get_vport(handle);
8197         struct hclge_dev *hdev = vport->back;
8198         int reset_try_times = 0;
8199         int reset_status;
8200         u16 queue_gid;
8201         int ret;
8202
8203         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8204
8205         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8206         if (ret) {
8207                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8208                 return ret;
8209         }
8210
8211         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8212         if (ret) {
8213                 dev_err(&hdev->pdev->dev,
8214                         "Send reset tqp cmd fail, ret = %d\n", ret);
8215                 return ret;
8216         }
8217
8218         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8219                 /* Wait for tqp hw reset */
8220                 msleep(20);
8221                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8222                 if (reset_status)
8223                         break;
8224         }
8225
8226         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8227                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8228                 return ret;
8229         }
8230
8231         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8232         if (ret)
8233                 dev_err(&hdev->pdev->dev,
8234                         "Deassert the soft reset fail, ret = %d\n", ret);
8235
8236         return ret;
8237 }
8238
8239 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8240 {
8241         struct hclge_dev *hdev = vport->back;
8242         int reset_try_times = 0;
8243         int reset_status;
8244         u16 queue_gid;
8245         int ret;
8246
8247         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8248
8249         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8250         if (ret) {
8251                 dev_warn(&hdev->pdev->dev,
8252                          "Send reset tqp cmd fail, ret = %d\n", ret);
8253                 return;
8254         }
8255
8256         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8257                 /* Wait for tqp hw reset */
8258                 msleep(20);
8259                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8260                 if (reset_status)
8261                         break;
8262         }
8263
8264         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8265                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8266                 return;
8267         }
8268
8269         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8270         if (ret)
8271                 dev_warn(&hdev->pdev->dev,
8272                          "Deassert the soft reset fail, ret = %d\n", ret);
8273 }
8274
8275 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8276 {
8277         struct hclge_vport *vport = hclge_get_vport(handle);
8278         struct hclge_dev *hdev = vport->back;
8279
8280         return hdev->fw_version;
8281 }
8282
8283 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8284 {
8285         struct phy_device *phydev = hdev->hw.mac.phydev;
8286
8287         if (!phydev)
8288                 return;
8289
8290         phy_set_asym_pause(phydev, rx_en, tx_en);
8291 }
8292
8293 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8294 {
8295         int ret;
8296
8297         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8298                 return 0;
8299
8300         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8301         if (ret)
8302                 dev_err(&hdev->pdev->dev,
8303                         "configure pauseparam error, ret = %d.\n", ret);
8304
8305         return ret;
8306 }
8307
8308 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8309 {
8310         struct phy_device *phydev = hdev->hw.mac.phydev;
8311         u16 remote_advertising = 0;
8312         u16 local_advertising;
8313         u32 rx_pause, tx_pause;
8314         u8 flowctl;
8315
8316         if (!phydev->link || !phydev->autoneg)
8317                 return 0;
8318
8319         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8320
8321         if (phydev->pause)
8322                 remote_advertising = LPA_PAUSE_CAP;
8323
8324         if (phydev->asym_pause)
8325                 remote_advertising |= LPA_PAUSE_ASYM;
8326
8327         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8328                                            remote_advertising);
8329         tx_pause = flowctl & FLOW_CTRL_TX;
8330         rx_pause = flowctl & FLOW_CTRL_RX;
8331
8332         if (phydev->duplex == HCLGE_MAC_HALF) {
8333                 tx_pause = 0;
8334                 rx_pause = 0;
8335         }
8336
8337         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8338 }
8339
8340 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8341                                  u32 *rx_en, u32 *tx_en)
8342 {
8343         struct hclge_vport *vport = hclge_get_vport(handle);
8344         struct hclge_dev *hdev = vport->back;
8345         struct phy_device *phydev = hdev->hw.mac.phydev;
8346
8347         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8348
8349         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8350                 *rx_en = 0;
8351                 *tx_en = 0;
8352                 return;
8353         }
8354
8355         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8356                 *rx_en = 1;
8357                 *tx_en = 0;
8358         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8359                 *tx_en = 1;
8360                 *rx_en = 0;
8361         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8362                 *rx_en = 1;
8363                 *tx_en = 1;
8364         } else {
8365                 *rx_en = 0;
8366                 *tx_en = 0;
8367         }
8368 }
8369
8370 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8371                                          u32 rx_en, u32 tx_en)
8372 {
8373         if (rx_en && tx_en)
8374                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8375         else if (rx_en && !tx_en)
8376                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8377         else if (!rx_en && tx_en)
8378                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8379         else
8380                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8381
8382         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8383 }
8384
8385 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8386                                 u32 rx_en, u32 tx_en)
8387 {
8388         struct hclge_vport *vport = hclge_get_vport(handle);
8389         struct hclge_dev *hdev = vport->back;
8390         struct phy_device *phydev = hdev->hw.mac.phydev;
8391         u32 fc_autoneg;
8392
8393         if (phydev) {
8394                 fc_autoneg = hclge_get_autoneg(handle);
8395                 if (auto_neg != fc_autoneg) {
8396                         dev_info(&hdev->pdev->dev,
8397                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8398                         return -EOPNOTSUPP;
8399                 }
8400         }
8401
8402         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8403                 dev_info(&hdev->pdev->dev,
8404                          "Priority flow control enabled. Cannot set link flow control.\n");
8405                 return -EOPNOTSUPP;
8406         }
8407
8408         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8409
8410         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8411
8412         if (!auto_neg)
8413                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8414
8415         if (phydev)
8416                 return phy_start_aneg(phydev);
8417
8418         return -EOPNOTSUPP;
8419 }
8420
8421 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8422                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8423 {
8424         struct hclge_vport *vport = hclge_get_vport(handle);
8425         struct hclge_dev *hdev = vport->back;
8426
8427         if (speed)
8428                 *speed = hdev->hw.mac.speed;
8429         if (duplex)
8430                 *duplex = hdev->hw.mac.duplex;
8431         if (auto_neg)
8432                 *auto_neg = hdev->hw.mac.autoneg;
8433 }
8434
8435 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8436                                  u8 *module_type)
8437 {
8438         struct hclge_vport *vport = hclge_get_vport(handle);
8439         struct hclge_dev *hdev = vport->back;
8440
8441         if (media_type)
8442                 *media_type = hdev->hw.mac.media_type;
8443
8444         if (module_type)
8445                 *module_type = hdev->hw.mac.module_type;
8446 }
8447
8448 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8449                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8450 {
8451         struct hclge_vport *vport = hclge_get_vport(handle);
8452         struct hclge_dev *hdev = vport->back;
8453         struct phy_device *phydev = hdev->hw.mac.phydev;
8454         int mdix_ctrl, mdix, is_resolved;
8455         unsigned int retval;
8456
8457         if (!phydev) {
8458                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8459                 *tp_mdix = ETH_TP_MDI_INVALID;
8460                 return;
8461         }
8462
8463         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8464
8465         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8466         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8467                                     HCLGE_PHY_MDIX_CTRL_S);
8468
8469         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8470         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8471         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8472
8473         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8474
8475         switch (mdix_ctrl) {
8476         case 0x0:
8477                 *tp_mdix_ctrl = ETH_TP_MDI;
8478                 break;
8479         case 0x1:
8480                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8481                 break;
8482         case 0x3:
8483                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8484                 break;
8485         default:
8486                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8487                 break;
8488         }
8489
8490         if (!is_resolved)
8491                 *tp_mdix = ETH_TP_MDI_INVALID;
8492         else if (mdix)
8493                 *tp_mdix = ETH_TP_MDI_X;
8494         else
8495                 *tp_mdix = ETH_TP_MDI;
8496 }
8497
8498 static void hclge_info_show(struct hclge_dev *hdev)
8499 {
8500         struct device *dev = &hdev->pdev->dev;
8501
8502         dev_info(dev, "PF info begin:\n");
8503
8504         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8505         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8506         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8507         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8508         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8509         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8510         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8511         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8512         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8513         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8514         dev_info(dev, "This is %s PF\n",
8515                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8516         dev_info(dev, "DCB %s\n",
8517                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8518         dev_info(dev, "MQPRIO %s\n",
8519                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8520
8521         dev_info(dev, "PF info end.\n");
8522 }
8523
8524 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8525                                           struct hclge_vport *vport)
8526 {
8527         struct hnae3_client *client = vport->nic.client;
8528         struct hclge_dev *hdev = ae_dev->priv;
8529         int rst_cnt;
8530         int ret;
8531
8532         rst_cnt = hdev->rst_stats.reset_cnt;
8533         ret = client->ops->init_instance(&vport->nic);
8534         if (ret)
8535                 return ret;
8536
8537         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8538         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8539             rst_cnt != hdev->rst_stats.reset_cnt) {
8540                 ret = -EBUSY;
8541                 goto init_nic_err;
8542         }
8543
8544         /* Enable nic hw error interrupts */
8545         ret = hclge_config_nic_hw_error(hdev, true);
8546         if (ret) {
8547                 dev_err(&ae_dev->pdev->dev,
8548                         "fail(%d) to enable hw error interrupts\n", ret);
8549                 goto init_nic_err;
8550         }
8551
8552         hnae3_set_client_init_flag(client, ae_dev, 1);
8553
8554         if (netif_msg_drv(&hdev->vport->nic))
8555                 hclge_info_show(hdev);
8556
8557         return ret;
8558
8559 init_nic_err:
8560         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8561         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8562                 msleep(HCLGE_WAIT_RESET_DONE);
8563
8564         client->ops->uninit_instance(&vport->nic, 0);
8565
8566         return ret;
8567 }
8568
8569 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8570                                            struct hclge_vport *vport)
8571 {
8572         struct hnae3_client *client = vport->roce.client;
8573         struct hclge_dev *hdev = ae_dev->priv;
8574         int rst_cnt;
8575         int ret;
8576
8577         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8578             !hdev->nic_client)
8579                 return 0;
8580
8581         client = hdev->roce_client;
8582         ret = hclge_init_roce_base_info(vport);
8583         if (ret)
8584                 return ret;
8585
8586         rst_cnt = hdev->rst_stats.reset_cnt;
8587         ret = client->ops->init_instance(&vport->roce);
8588         if (ret)
8589                 return ret;
8590
8591         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8592         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8593             rst_cnt != hdev->rst_stats.reset_cnt) {
8594                 ret = -EBUSY;
8595                 goto init_roce_err;
8596         }
8597
8598         /* Enable roce ras interrupts */
8599         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8600         if (ret) {
8601                 dev_err(&ae_dev->pdev->dev,
8602                         "fail(%d) to enable roce ras interrupts\n", ret);
8603                 goto init_roce_err;
8604         }
8605
8606         hnae3_set_client_init_flag(client, ae_dev, 1);
8607
8608         return 0;
8609
8610 init_roce_err:
8611         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8612         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8613                 msleep(HCLGE_WAIT_RESET_DONE);
8614
8615         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8616
8617         return ret;
8618 }
8619
8620 static int hclge_init_client_instance(struct hnae3_client *client,
8621                                       struct hnae3_ae_dev *ae_dev)
8622 {
8623         struct hclge_dev *hdev = ae_dev->priv;
8624         struct hclge_vport *vport;
8625         int i, ret;
8626
8627         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8628                 vport = &hdev->vport[i];
8629
8630                 switch (client->type) {
8631                 case HNAE3_CLIENT_KNIC:
8632
8633                         hdev->nic_client = client;
8634                         vport->nic.client = client;
8635                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8636                         if (ret)
8637                                 goto clear_nic;
8638
8639                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8640                         if (ret)
8641                                 goto clear_roce;
8642
8643                         break;
8644                 case HNAE3_CLIENT_ROCE:
8645                         if (hnae3_dev_roce_supported(hdev)) {
8646                                 hdev->roce_client = client;
8647                                 vport->roce.client = client;
8648                         }
8649
8650                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8651                         if (ret)
8652                                 goto clear_roce;
8653
8654                         break;
8655                 default:
8656                         return -EINVAL;
8657                 }
8658         }
8659
8660         return 0;
8661
8662 clear_nic:
8663         hdev->nic_client = NULL;
8664         vport->nic.client = NULL;
8665         return ret;
8666 clear_roce:
8667         hdev->roce_client = NULL;
8668         vport->roce.client = NULL;
8669         return ret;
8670 }
8671
8672 static void hclge_uninit_client_instance(struct hnae3_client *client,
8673                                          struct hnae3_ae_dev *ae_dev)
8674 {
8675         struct hclge_dev *hdev = ae_dev->priv;
8676         struct hclge_vport *vport;
8677         int i;
8678
8679         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8680                 vport = &hdev->vport[i];
8681                 if (hdev->roce_client) {
8682                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8683                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8684                                 msleep(HCLGE_WAIT_RESET_DONE);
8685
8686                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8687                                                                 0);
8688                         hdev->roce_client = NULL;
8689                         vport->roce.client = NULL;
8690                 }
8691                 if (client->type == HNAE3_CLIENT_ROCE)
8692                         return;
8693                 if (hdev->nic_client && client->ops->uninit_instance) {
8694                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8695                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8696                                 msleep(HCLGE_WAIT_RESET_DONE);
8697
8698                         client->ops->uninit_instance(&vport->nic, 0);
8699                         hdev->nic_client = NULL;
8700                         vport->nic.client = NULL;
8701                 }
8702         }
8703 }
8704
8705 static int hclge_pci_init(struct hclge_dev *hdev)
8706 {
8707         struct pci_dev *pdev = hdev->pdev;
8708         struct hclge_hw *hw;
8709         int ret;
8710
8711         ret = pci_enable_device(pdev);
8712         if (ret) {
8713                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8714                 return ret;
8715         }
8716
8717         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8718         if (ret) {
8719                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8720                 if (ret) {
8721                         dev_err(&pdev->dev,
8722                                 "can't set consistent PCI DMA");
8723                         goto err_disable_device;
8724                 }
8725                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8726         }
8727
8728         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8729         if (ret) {
8730                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8731                 goto err_disable_device;
8732         }
8733
8734         pci_set_master(pdev);
8735         hw = &hdev->hw;
8736         hw->io_base = pcim_iomap(pdev, 2, 0);
8737         if (!hw->io_base) {
8738                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8739                 ret = -ENOMEM;
8740                 goto err_clr_master;
8741         }
8742
8743         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8744
8745         return 0;
8746 err_clr_master:
8747         pci_clear_master(pdev);
8748         pci_release_regions(pdev);
8749 err_disable_device:
8750         pci_disable_device(pdev);
8751
8752         return ret;
8753 }
8754
8755 static void hclge_pci_uninit(struct hclge_dev *hdev)
8756 {
8757         struct pci_dev *pdev = hdev->pdev;
8758
8759         pcim_iounmap(pdev, hdev->hw.io_base);
8760         pci_free_irq_vectors(pdev);
8761         pci_clear_master(pdev);
8762         pci_release_mem_regions(pdev);
8763         pci_disable_device(pdev);
8764 }
8765
8766 static void hclge_state_init(struct hclge_dev *hdev)
8767 {
8768         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8769         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8770         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8771         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8772         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8773         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8774 }
8775
8776 static void hclge_state_uninit(struct hclge_dev *hdev)
8777 {
8778         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8779         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8780
8781         if (hdev->reset_timer.function)
8782                 del_timer_sync(&hdev->reset_timer);
8783         if (hdev->service_task.work.func)
8784                 cancel_delayed_work_sync(&hdev->service_task);
8785         if (hdev->rst_service_task.func)
8786                 cancel_work_sync(&hdev->rst_service_task);
8787         if (hdev->mbx_service_task.func)
8788                 cancel_work_sync(&hdev->mbx_service_task);
8789 }
8790
8791 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8792 {
8793 #define HCLGE_FLR_WAIT_MS       100
8794 #define HCLGE_FLR_WAIT_CNT      50
8795         struct hclge_dev *hdev = ae_dev->priv;
8796         int cnt = 0;
8797
8798         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8799         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8800         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8801         hclge_reset_event(hdev->pdev, NULL);
8802
8803         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8804                cnt++ < HCLGE_FLR_WAIT_CNT)
8805                 msleep(HCLGE_FLR_WAIT_MS);
8806
8807         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8808                 dev_err(&hdev->pdev->dev,
8809                         "flr wait down timeout: %d\n", cnt);
8810 }
8811
8812 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8813 {
8814         struct hclge_dev *hdev = ae_dev->priv;
8815
8816         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8817 }
8818
8819 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8820 {
8821         u16 i;
8822
8823         for (i = 0; i < hdev->num_alloc_vport; i++) {
8824                 struct hclge_vport *vport = &hdev->vport[i];
8825                 int ret;
8826
8827                  /* Send cmd to clear VF's FUNC_RST_ING */
8828                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8829                 if (ret)
8830                         dev_warn(&hdev->pdev->dev,
8831                                  "clear vf(%d) rst failed %d!\n",
8832                                  vport->vport_id, ret);
8833         }
8834 }
8835
8836 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8837 {
8838         struct pci_dev *pdev = ae_dev->pdev;
8839         struct hclge_dev *hdev;
8840         int ret;
8841
8842         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8843         if (!hdev) {
8844                 ret = -ENOMEM;
8845                 goto out;
8846         }
8847
8848         hdev->pdev = pdev;
8849         hdev->ae_dev = ae_dev;
8850         hdev->reset_type = HNAE3_NONE_RESET;
8851         hdev->reset_level = HNAE3_FUNC_RESET;
8852         ae_dev->priv = hdev;
8853         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8854
8855         mutex_init(&hdev->vport_lock);
8856         mutex_init(&hdev->vport_cfg_mutex);
8857         spin_lock_init(&hdev->fd_rule_lock);
8858
8859         ret = hclge_pci_init(hdev);
8860         if (ret) {
8861                 dev_err(&pdev->dev, "PCI init failed\n");
8862                 goto out;
8863         }
8864
8865         /* Firmware command queue initialize */
8866         ret = hclge_cmd_queue_init(hdev);
8867         if (ret) {
8868                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8869                 goto err_pci_uninit;
8870         }
8871
8872         /* Firmware command initialize */
8873         ret = hclge_cmd_init(hdev);
8874         if (ret)
8875                 goto err_cmd_uninit;
8876
8877         ret = hclge_get_cap(hdev);
8878         if (ret) {
8879                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8880                         ret);
8881                 goto err_cmd_uninit;
8882         }
8883
8884         ret = hclge_configure(hdev);
8885         if (ret) {
8886                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8887                 goto err_cmd_uninit;
8888         }
8889
8890         ret = hclge_init_msi(hdev);
8891         if (ret) {
8892                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8893                 goto err_cmd_uninit;
8894         }
8895
8896         ret = hclge_misc_irq_init(hdev);
8897         if (ret) {
8898                 dev_err(&pdev->dev,
8899                         "Misc IRQ(vector0) init error, ret = %d.\n",
8900                         ret);
8901                 goto err_msi_uninit;
8902         }
8903
8904         ret = hclge_alloc_tqps(hdev);
8905         if (ret) {
8906                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8907                 goto err_msi_irq_uninit;
8908         }
8909
8910         ret = hclge_alloc_vport(hdev);
8911         if (ret) {
8912                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8913                 goto err_msi_irq_uninit;
8914         }
8915
8916         ret = hclge_map_tqp(hdev);
8917         if (ret) {
8918                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8919                 goto err_msi_irq_uninit;
8920         }
8921
8922         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8923                 ret = hclge_mac_mdio_config(hdev);
8924                 if (ret) {
8925                         dev_err(&hdev->pdev->dev,
8926                                 "mdio config fail ret=%d\n", ret);
8927                         goto err_msi_irq_uninit;
8928                 }
8929         }
8930
8931         ret = hclge_init_umv_space(hdev);
8932         if (ret) {
8933                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8934                 goto err_mdiobus_unreg;
8935         }
8936
8937         ret = hclge_mac_init(hdev);
8938         if (ret) {
8939                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8940                 goto err_mdiobus_unreg;
8941         }
8942
8943         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8944         if (ret) {
8945                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8946                 goto err_mdiobus_unreg;
8947         }
8948
8949         ret = hclge_config_gro(hdev, true);
8950         if (ret)
8951                 goto err_mdiobus_unreg;
8952
8953         ret = hclge_init_vlan_config(hdev);
8954         if (ret) {
8955                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8956                 goto err_mdiobus_unreg;
8957         }
8958
8959         ret = hclge_tm_schd_init(hdev);
8960         if (ret) {
8961                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8962                 goto err_mdiobus_unreg;
8963         }
8964
8965         hclge_rss_init_cfg(hdev);
8966         ret = hclge_rss_init_hw(hdev);
8967         if (ret) {
8968                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8969                 goto err_mdiobus_unreg;
8970         }
8971
8972         ret = init_mgr_tbl(hdev);
8973         if (ret) {
8974                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8975                 goto err_mdiobus_unreg;
8976         }
8977
8978         ret = hclge_init_fd_config(hdev);
8979         if (ret) {
8980                 dev_err(&pdev->dev,
8981                         "fd table init fail, ret=%d\n", ret);
8982                 goto err_mdiobus_unreg;
8983         }
8984
8985         INIT_KFIFO(hdev->mac_tnl_log);
8986
8987         hclge_dcb_ops_set(hdev);
8988
8989         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8990         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
8991         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8992         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8993
8994         /* Setup affinity after service timer setup because add_timer_on
8995          * is called in affinity notify.
8996          */
8997         hclge_misc_affinity_setup(hdev);
8998
8999         hclge_clear_all_event_cause(hdev);
9000         hclge_clear_resetting_state(hdev);
9001
9002         /* Log and clear the hw errors those already occurred */
9003         hclge_handle_all_hns_hw_errors(ae_dev);
9004
9005         /* request delayed reset for the error recovery because an immediate
9006          * global reset on a PF affecting pending initialization of other PFs
9007          */
9008         if (ae_dev->hw_err_reset_req) {
9009                 enum hnae3_reset_type reset_level;
9010
9011                 reset_level = hclge_get_reset_level(ae_dev,
9012                                                     &ae_dev->hw_err_reset_req);
9013                 hclge_set_def_reset_request(ae_dev, reset_level);
9014                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9015         }
9016
9017         /* Enable MISC vector(vector0) */
9018         hclge_enable_vector(&hdev->misc_vector, true);
9019
9020         hclge_state_init(hdev);
9021         hdev->last_reset_time = jiffies;
9022
9023         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9024                  HCLGE_DRIVER_NAME);
9025
9026         return 0;
9027
9028 err_mdiobus_unreg:
9029         if (hdev->hw.mac.phydev)
9030                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9031 err_msi_irq_uninit:
9032         hclge_misc_irq_uninit(hdev);
9033 err_msi_uninit:
9034         pci_free_irq_vectors(pdev);
9035 err_cmd_uninit:
9036         hclge_cmd_uninit(hdev);
9037 err_pci_uninit:
9038         pcim_iounmap(pdev, hdev->hw.io_base);
9039         pci_clear_master(pdev);
9040         pci_release_regions(pdev);
9041         pci_disable_device(pdev);
9042 out:
9043         return ret;
9044 }
9045
9046 static void hclge_stats_clear(struct hclge_dev *hdev)
9047 {
9048         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9049 }
9050
9051 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9052 {
9053         struct hclge_vport *vport = hdev->vport;
9054         int i;
9055
9056         for (i = 0; i < hdev->num_alloc_vport; i++) {
9057                 hclge_vport_stop(vport);
9058                 vport++;
9059         }
9060 }
9061
9062 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9063 {
9064         struct hclge_dev *hdev = ae_dev->priv;
9065         struct pci_dev *pdev = ae_dev->pdev;
9066         int ret;
9067
9068         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9069
9070         hclge_stats_clear(hdev);
9071         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9072         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9073
9074         ret = hclge_cmd_init(hdev);
9075         if (ret) {
9076                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9077                 return ret;
9078         }
9079
9080         ret = hclge_map_tqp(hdev);
9081         if (ret) {
9082                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9083                 return ret;
9084         }
9085
9086         hclge_reset_umv_space(hdev);
9087
9088         ret = hclge_mac_init(hdev);
9089         if (ret) {
9090                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9091                 return ret;
9092         }
9093
9094         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9095         if (ret) {
9096                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9097                 return ret;
9098         }
9099
9100         ret = hclge_config_gro(hdev, true);
9101         if (ret)
9102                 return ret;
9103
9104         ret = hclge_init_vlan_config(hdev);
9105         if (ret) {
9106                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9107                 return ret;
9108         }
9109
9110         ret = hclge_tm_init_hw(hdev, true);
9111         if (ret) {
9112                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9113                 return ret;
9114         }
9115
9116         ret = hclge_rss_init_hw(hdev);
9117         if (ret) {
9118                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9119                 return ret;
9120         }
9121
9122         ret = hclge_init_fd_config(hdev);
9123         if (ret) {
9124                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9125                 return ret;
9126         }
9127
9128         /* Re-enable the hw error interrupts because
9129          * the interrupts get disabled on global reset.
9130          */
9131         ret = hclge_config_nic_hw_error(hdev, true);
9132         if (ret) {
9133                 dev_err(&pdev->dev,
9134                         "fail(%d) to re-enable NIC hw error interrupts\n",
9135                         ret);
9136                 return ret;
9137         }
9138
9139         if (hdev->roce_client) {
9140                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9141                 if (ret) {
9142                         dev_err(&pdev->dev,
9143                                 "fail(%d) to re-enable roce ras interrupts\n",
9144                                 ret);
9145                         return ret;
9146                 }
9147         }
9148
9149         hclge_reset_vport_state(hdev);
9150
9151         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9152                  HCLGE_DRIVER_NAME);
9153
9154         return 0;
9155 }
9156
9157 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9158 {
9159         struct hclge_dev *hdev = ae_dev->priv;
9160         struct hclge_mac *mac = &hdev->hw.mac;
9161
9162         hclge_misc_affinity_teardown(hdev);
9163         hclge_state_uninit(hdev);
9164
9165         if (mac->phydev)
9166                 mdiobus_unregister(mac->mdio_bus);
9167
9168         hclge_uninit_umv_space(hdev);
9169
9170         /* Disable MISC vector(vector0) */
9171         hclge_enable_vector(&hdev->misc_vector, false);
9172         synchronize_irq(hdev->misc_vector.vector_irq);
9173
9174         /* Disable all hw interrupts */
9175         hclge_config_mac_tnl_int(hdev, false);
9176         hclge_config_nic_hw_error(hdev, false);
9177         hclge_config_rocee_ras_interrupt(hdev, false);
9178
9179         hclge_cmd_uninit(hdev);
9180         hclge_misc_irq_uninit(hdev);
9181         hclge_pci_uninit(hdev);
9182         mutex_destroy(&hdev->vport_lock);
9183         hclge_uninit_vport_mac_table(hdev);
9184         hclge_uninit_vport_vlan_table(hdev);
9185         mutex_destroy(&hdev->vport_cfg_mutex);
9186         ae_dev->priv = NULL;
9187 }
9188
9189 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9190 {
9191         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9192         struct hclge_vport *vport = hclge_get_vport(handle);
9193         struct hclge_dev *hdev = vport->back;
9194
9195         return min_t(u32, hdev->rss_size_max,
9196                      vport->alloc_tqps / kinfo->num_tc);
9197 }
9198
9199 static void hclge_get_channels(struct hnae3_handle *handle,
9200                                struct ethtool_channels *ch)
9201 {
9202         ch->max_combined = hclge_get_max_channels(handle);
9203         ch->other_count = 1;
9204         ch->max_other = 1;
9205         ch->combined_count = handle->kinfo.rss_size;
9206 }
9207
9208 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9209                                         u16 *alloc_tqps, u16 *max_rss_size)
9210 {
9211         struct hclge_vport *vport = hclge_get_vport(handle);
9212         struct hclge_dev *hdev = vport->back;
9213
9214         *alloc_tqps = vport->alloc_tqps;
9215         *max_rss_size = hdev->rss_size_max;
9216 }
9217
9218 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9219                               bool rxfh_configured)
9220 {
9221         struct hclge_vport *vport = hclge_get_vport(handle);
9222         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9223         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9224         struct hclge_dev *hdev = vport->back;
9225         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9226         int cur_rss_size = kinfo->rss_size;
9227         int cur_tqps = kinfo->num_tqps;
9228         u16 tc_valid[HCLGE_MAX_TC_NUM];
9229         u16 roundup_size;
9230         u32 *rss_indir;
9231         unsigned int i;
9232         int ret;
9233
9234         kinfo->req_rss_size = new_tqps_num;
9235
9236         ret = hclge_tm_vport_map_update(hdev);
9237         if (ret) {
9238                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9239                 return ret;
9240         }
9241
9242         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9243         roundup_size = ilog2(roundup_size);
9244         /* Set the RSS TC mode according to the new RSS size */
9245         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9246                 tc_valid[i] = 0;
9247
9248                 if (!(hdev->hw_tc_map & BIT(i)))
9249                         continue;
9250
9251                 tc_valid[i] = 1;
9252                 tc_size[i] = roundup_size;
9253                 tc_offset[i] = kinfo->rss_size * i;
9254         }
9255         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9256         if (ret)
9257                 return ret;
9258
9259         /* RSS indirection table has been configuared by user */
9260         if (rxfh_configured)
9261                 goto out;
9262
9263         /* Reinitializes the rss indirect table according to the new RSS size */
9264         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9265         if (!rss_indir)
9266                 return -ENOMEM;
9267
9268         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9269                 rss_indir[i] = i % kinfo->rss_size;
9270
9271         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9272         if (ret)
9273                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9274                         ret);
9275
9276         kfree(rss_indir);
9277
9278 out:
9279         if (!ret)
9280                 dev_info(&hdev->pdev->dev,
9281                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9282                          cur_rss_size, kinfo->rss_size,
9283                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
9284
9285         return ret;
9286 }
9287
9288 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9289                               u32 *regs_num_64_bit)
9290 {
9291         struct hclge_desc desc;
9292         u32 total_num;
9293         int ret;
9294
9295         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9296         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9297         if (ret) {
9298                 dev_err(&hdev->pdev->dev,
9299                         "Query register number cmd failed, ret = %d.\n", ret);
9300                 return ret;
9301         }
9302
9303         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9304         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9305
9306         total_num = *regs_num_32_bit + *regs_num_64_bit;
9307         if (!total_num)
9308                 return -EINVAL;
9309
9310         return 0;
9311 }
9312
9313 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9314                                  void *data)
9315 {
9316 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9317 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9318
9319         struct hclge_desc *desc;
9320         u32 *reg_val = data;
9321         __le32 *desc_data;
9322         int nodata_num;
9323         int cmd_num;
9324         int i, k, n;
9325         int ret;
9326
9327         if (regs_num == 0)
9328                 return 0;
9329
9330         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9331         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9332                                HCLGE_32_BIT_REG_RTN_DATANUM);
9333         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9334         if (!desc)
9335                 return -ENOMEM;
9336
9337         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9338         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9339         if (ret) {
9340                 dev_err(&hdev->pdev->dev,
9341                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
9342                 kfree(desc);
9343                 return ret;
9344         }
9345
9346         for (i = 0; i < cmd_num; i++) {
9347                 if (i == 0) {
9348                         desc_data = (__le32 *)(&desc[i].data[0]);
9349                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9350                 } else {
9351                         desc_data = (__le32 *)(&desc[i]);
9352                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
9353                 }
9354                 for (k = 0; k < n; k++) {
9355                         *reg_val++ = le32_to_cpu(*desc_data++);
9356
9357                         regs_num--;
9358                         if (!regs_num)
9359                                 break;
9360                 }
9361         }
9362
9363         kfree(desc);
9364         return 0;
9365 }
9366
9367 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9368                                  void *data)
9369 {
9370 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9371 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9372
9373         struct hclge_desc *desc;
9374         u64 *reg_val = data;
9375         __le64 *desc_data;
9376         int nodata_len;
9377         int cmd_num;
9378         int i, k, n;
9379         int ret;
9380
9381         if (regs_num == 0)
9382                 return 0;
9383
9384         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9385         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9386                                HCLGE_64_BIT_REG_RTN_DATANUM);
9387         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9388         if (!desc)
9389                 return -ENOMEM;
9390
9391         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9392         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9393         if (ret) {
9394                 dev_err(&hdev->pdev->dev,
9395                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
9396                 kfree(desc);
9397                 return ret;
9398         }
9399
9400         for (i = 0; i < cmd_num; i++) {
9401                 if (i == 0) {
9402                         desc_data = (__le64 *)(&desc[i].data[0]);
9403                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9404                 } else {
9405                         desc_data = (__le64 *)(&desc[i]);
9406                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
9407                 }
9408                 for (k = 0; k < n; k++) {
9409                         *reg_val++ = le64_to_cpu(*desc_data++);
9410
9411                         regs_num--;
9412                         if (!regs_num)
9413                                 break;
9414                 }
9415         }
9416
9417         kfree(desc);
9418         return 0;
9419 }
9420
9421 #define MAX_SEPARATE_NUM        4
9422 #define SEPARATOR_VALUE         0xFDFCFBFA
9423 #define REG_NUM_PER_LINE        4
9424 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
9425 #define REG_SEPARATOR_LINE      1
9426 #define REG_NUM_REMAIN_MASK     3
9427 #define BD_LIST_MAX_NUM         30
9428
9429 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9430 {
9431         /*prepare 4 commands to query DFX BD number*/
9432         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9433         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9434         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9435         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9436         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9437         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9438         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9439
9440         return hclge_cmd_send(&hdev->hw, desc, 4);
9441 }
9442
9443 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9444                                     int *bd_num_list,
9445                                     u32 type_num)
9446 {
9447 #define HCLGE_DFX_REG_BD_NUM    4
9448
9449         u32 entries_per_desc, desc_index, index, offset, i;
9450         struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9451         int ret;
9452
9453         ret = hclge_query_bd_num_cmd_send(hdev, desc);
9454         if (ret) {
9455                 dev_err(&hdev->pdev->dev,
9456                         "Get dfx bd num fail, status is %d.\n", ret);
9457                 return ret;
9458         }
9459
9460         entries_per_desc = ARRAY_SIZE(desc[0].data);
9461         for (i = 0; i < type_num; i++) {
9462                 offset = hclge_dfx_bd_offset_list[i];
9463                 index = offset % entries_per_desc;
9464                 desc_index = offset / entries_per_desc;
9465                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9466         }
9467
9468         return ret;
9469 }
9470
9471 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9472                                   struct hclge_desc *desc_src, int bd_num,
9473                                   enum hclge_opcode_type cmd)
9474 {
9475         struct hclge_desc *desc = desc_src;
9476         int i, ret;
9477
9478         hclge_cmd_setup_basic_desc(desc, cmd, true);
9479         for (i = 0; i < bd_num - 1; i++) {
9480                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9481                 desc++;
9482                 hclge_cmd_setup_basic_desc(desc, cmd, true);
9483         }
9484
9485         desc = desc_src;
9486         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9487         if (ret)
9488                 dev_err(&hdev->pdev->dev,
9489                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9490                         cmd, ret);
9491
9492         return ret;
9493 }
9494
9495 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9496                                     void *data)
9497 {
9498         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9499         struct hclge_desc *desc = desc_src;
9500         u32 *reg = data;
9501
9502         entries_per_desc = ARRAY_SIZE(desc->data);
9503         reg_num = entries_per_desc * bd_num;
9504         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9505         for (i = 0; i < reg_num; i++) {
9506                 index = i % entries_per_desc;
9507                 desc_index = i / entries_per_desc;
9508                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
9509         }
9510         for (i = 0; i < separator_num; i++)
9511                 *reg++ = SEPARATOR_VALUE;
9512
9513         return reg_num + separator_num;
9514 }
9515
9516 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9517 {
9518         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9519         int data_len_per_desc, data_len, bd_num, i;
9520         int bd_num_list[BD_LIST_MAX_NUM];
9521         int ret;
9522
9523         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9524         if (ret) {
9525                 dev_err(&hdev->pdev->dev,
9526                         "Get dfx reg bd num fail, status is %d.\n", ret);
9527                 return ret;
9528         }
9529
9530         data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9531         *len = 0;
9532         for (i = 0; i < dfx_reg_type_num; i++) {
9533                 bd_num = bd_num_list[i];
9534                 data_len = data_len_per_desc * bd_num;
9535                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9536         }
9537
9538         return ret;
9539 }
9540
9541 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9542 {
9543         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9544         int bd_num, bd_num_max, buf_len, i;
9545         int bd_num_list[BD_LIST_MAX_NUM];
9546         struct hclge_desc *desc_src;
9547         u32 *reg = data;
9548         int ret;
9549
9550         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9551         if (ret) {
9552                 dev_err(&hdev->pdev->dev,
9553                         "Get dfx reg bd num fail, status is %d.\n", ret);
9554                 return ret;
9555         }
9556
9557         bd_num_max = bd_num_list[0];
9558         for (i = 1; i < dfx_reg_type_num; i++)
9559                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9560
9561         buf_len = sizeof(*desc_src) * bd_num_max;
9562         desc_src = kzalloc(buf_len, GFP_KERNEL);
9563         if (!desc_src) {
9564                 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9565                 return -ENOMEM;
9566         }
9567
9568         for (i = 0; i < dfx_reg_type_num; i++) {
9569                 bd_num = bd_num_list[i];
9570                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9571                                              hclge_dfx_reg_opcode_list[i]);
9572                 if (ret) {
9573                         dev_err(&hdev->pdev->dev,
9574                                 "Get dfx reg fail, status is %d.\n", ret);
9575                         break;
9576                 }
9577
9578                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9579         }
9580
9581         kfree(desc_src);
9582         return ret;
9583 }
9584
9585 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9586                               struct hnae3_knic_private_info *kinfo)
9587 {
9588 #define HCLGE_RING_REG_OFFSET           0x200
9589 #define HCLGE_RING_INT_REG_OFFSET       0x4
9590
9591         int i, j, reg_num, separator_num;
9592         int data_num_sum;
9593         u32 *reg = data;
9594
9595         /* fetching per-PF registers valus from PF PCIe register space */
9596         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9597         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9598         for (i = 0; i < reg_num; i++)
9599                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9600         for (i = 0; i < separator_num; i++)
9601                 *reg++ = SEPARATOR_VALUE;
9602         data_num_sum = reg_num + separator_num;
9603
9604         reg_num = ARRAY_SIZE(common_reg_addr_list);
9605         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9606         for (i = 0; i < reg_num; i++)
9607                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9608         for (i = 0; i < separator_num; i++)
9609                 *reg++ = SEPARATOR_VALUE;
9610         data_num_sum += reg_num + separator_num;
9611
9612         reg_num = ARRAY_SIZE(ring_reg_addr_list);
9613         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9614         for (j = 0; j < kinfo->num_tqps; j++) {
9615                 for (i = 0; i < reg_num; i++)
9616                         *reg++ = hclge_read_dev(&hdev->hw,
9617                                                 ring_reg_addr_list[i] +
9618                                                 HCLGE_RING_REG_OFFSET * j);
9619                 for (i = 0; i < separator_num; i++)
9620                         *reg++ = SEPARATOR_VALUE;
9621         }
9622         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9623
9624         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9625         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9626         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9627                 for (i = 0; i < reg_num; i++)
9628                         *reg++ = hclge_read_dev(&hdev->hw,
9629                                                 tqp_intr_reg_addr_list[i] +
9630                                                 HCLGE_RING_INT_REG_OFFSET * j);
9631                 for (i = 0; i < separator_num; i++)
9632                         *reg++ = SEPARATOR_VALUE;
9633         }
9634         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9635
9636         return data_num_sum;
9637 }
9638
9639 static int hclge_get_regs_len(struct hnae3_handle *handle)
9640 {
9641         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9642         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9643         struct hclge_vport *vport = hclge_get_vport(handle);
9644         struct hclge_dev *hdev = vport->back;
9645         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9646         int regs_lines_32_bit, regs_lines_64_bit;
9647         int ret;
9648
9649         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9650         if (ret) {
9651                 dev_err(&hdev->pdev->dev,
9652                         "Get register number failed, ret = %d.\n", ret);
9653                 return ret;
9654         }
9655
9656         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9657         if (ret) {
9658                 dev_err(&hdev->pdev->dev,
9659                         "Get dfx reg len failed, ret = %d.\n", ret);
9660                 return ret;
9661         }
9662
9663         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9664                 REG_SEPARATOR_LINE;
9665         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9666                 REG_SEPARATOR_LINE;
9667         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9668                 REG_SEPARATOR_LINE;
9669         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9670                 REG_SEPARATOR_LINE;
9671         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9672                 REG_SEPARATOR_LINE;
9673         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9674                 REG_SEPARATOR_LINE;
9675
9676         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9677                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9678                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9679 }
9680
9681 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9682                            void *data)
9683 {
9684         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9685         struct hclge_vport *vport = hclge_get_vport(handle);
9686         struct hclge_dev *hdev = vport->back;
9687         u32 regs_num_32_bit, regs_num_64_bit;
9688         int i, reg_num, separator_num, ret;
9689         u32 *reg = data;
9690
9691         *version = hdev->fw_version;
9692
9693         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9694         if (ret) {
9695                 dev_err(&hdev->pdev->dev,
9696                         "Get register number failed, ret = %d.\n", ret);
9697                 return;
9698         }
9699
9700         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
9701
9702         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9703         if (ret) {
9704                 dev_err(&hdev->pdev->dev,
9705                         "Get 32 bit register failed, ret = %d.\n", ret);
9706                 return;
9707         }
9708         reg_num = regs_num_32_bit;
9709         reg += reg_num;
9710         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9711         for (i = 0; i < separator_num; i++)
9712                 *reg++ = SEPARATOR_VALUE;
9713
9714         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9715         if (ret) {
9716                 dev_err(&hdev->pdev->dev,
9717                         "Get 64 bit register failed, ret = %d.\n", ret);
9718                 return;
9719         }
9720         reg_num = regs_num_64_bit * 2;
9721         reg += reg_num;
9722         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9723         for (i = 0; i < separator_num; i++)
9724                 *reg++ = SEPARATOR_VALUE;
9725
9726         ret = hclge_get_dfx_reg(hdev, reg);
9727         if (ret)
9728                 dev_err(&hdev->pdev->dev,
9729                         "Get dfx register failed, ret = %d.\n", ret);
9730 }
9731
9732 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9733 {
9734         struct hclge_set_led_state_cmd *req;
9735         struct hclge_desc desc;
9736         int ret;
9737
9738         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9739
9740         req = (struct hclge_set_led_state_cmd *)desc.data;
9741         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9742                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9743
9744         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9745         if (ret)
9746                 dev_err(&hdev->pdev->dev,
9747                         "Send set led state cmd error, ret =%d\n", ret);
9748
9749         return ret;
9750 }
9751
9752 enum hclge_led_status {
9753         HCLGE_LED_OFF,
9754         HCLGE_LED_ON,
9755         HCLGE_LED_NO_CHANGE = 0xFF,
9756 };
9757
9758 static int hclge_set_led_id(struct hnae3_handle *handle,
9759                             enum ethtool_phys_id_state status)
9760 {
9761         struct hclge_vport *vport = hclge_get_vport(handle);
9762         struct hclge_dev *hdev = vport->back;
9763
9764         switch (status) {
9765         case ETHTOOL_ID_ACTIVE:
9766                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9767         case ETHTOOL_ID_INACTIVE:
9768                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9769         default:
9770                 return -EINVAL;
9771         }
9772 }
9773
9774 static void hclge_get_link_mode(struct hnae3_handle *handle,
9775                                 unsigned long *supported,
9776                                 unsigned long *advertising)
9777 {
9778         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9779         struct hclge_vport *vport = hclge_get_vport(handle);
9780         struct hclge_dev *hdev = vport->back;
9781         unsigned int idx = 0;
9782
9783         for (; idx < size; idx++) {
9784                 supported[idx] = hdev->hw.mac.supported[idx];
9785                 advertising[idx] = hdev->hw.mac.advertising[idx];
9786         }
9787 }
9788
9789 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9790 {
9791         struct hclge_vport *vport = hclge_get_vport(handle);
9792         struct hclge_dev *hdev = vport->back;
9793
9794         return hclge_config_gro(hdev, enable);
9795 }
9796
9797 static const struct hnae3_ae_ops hclge_ops = {
9798         .init_ae_dev = hclge_init_ae_dev,
9799         .uninit_ae_dev = hclge_uninit_ae_dev,
9800         .flr_prepare = hclge_flr_prepare,
9801         .flr_done = hclge_flr_done,
9802         .init_client_instance = hclge_init_client_instance,
9803         .uninit_client_instance = hclge_uninit_client_instance,
9804         .map_ring_to_vector = hclge_map_ring_to_vector,
9805         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9806         .get_vector = hclge_get_vector,
9807         .put_vector = hclge_put_vector,
9808         .set_promisc_mode = hclge_set_promisc_mode,
9809         .set_loopback = hclge_set_loopback,
9810         .start = hclge_ae_start,
9811         .stop = hclge_ae_stop,
9812         .client_start = hclge_client_start,
9813         .client_stop = hclge_client_stop,
9814         .get_status = hclge_get_status,
9815         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9816         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9817         .get_media_type = hclge_get_media_type,
9818         .check_port_speed = hclge_check_port_speed,
9819         .get_fec = hclge_get_fec,
9820         .set_fec = hclge_set_fec,
9821         .get_rss_key_size = hclge_get_rss_key_size,
9822         .get_rss_indir_size = hclge_get_rss_indir_size,
9823         .get_rss = hclge_get_rss,
9824         .set_rss = hclge_set_rss,
9825         .set_rss_tuple = hclge_set_rss_tuple,
9826         .get_rss_tuple = hclge_get_rss_tuple,
9827         .get_tc_size = hclge_get_tc_size,
9828         .get_mac_addr = hclge_get_mac_addr,
9829         .set_mac_addr = hclge_set_mac_addr,
9830         .do_ioctl = hclge_do_ioctl,
9831         .add_uc_addr = hclge_add_uc_addr,
9832         .rm_uc_addr = hclge_rm_uc_addr,
9833         .add_mc_addr = hclge_add_mc_addr,
9834         .rm_mc_addr = hclge_rm_mc_addr,
9835         .set_autoneg = hclge_set_autoneg,
9836         .get_autoneg = hclge_get_autoneg,
9837         .restart_autoneg = hclge_restart_autoneg,
9838         .halt_autoneg = hclge_halt_autoneg,
9839         .get_pauseparam = hclge_get_pauseparam,
9840         .set_pauseparam = hclge_set_pauseparam,
9841         .set_mtu = hclge_set_mtu,
9842         .reset_queue = hclge_reset_tqp,
9843         .get_stats = hclge_get_stats,
9844         .get_mac_stats = hclge_get_mac_stat,
9845         .update_stats = hclge_update_stats,
9846         .get_strings = hclge_get_strings,
9847         .get_sset_count = hclge_get_sset_count,
9848         .get_fw_version = hclge_get_fw_version,
9849         .get_mdix_mode = hclge_get_mdix_mode,
9850         .enable_vlan_filter = hclge_enable_vlan_filter,
9851         .set_vlan_filter = hclge_set_vlan_filter,
9852         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9853         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9854         .reset_event = hclge_reset_event,
9855         .get_reset_level = hclge_get_reset_level,
9856         .set_default_reset_request = hclge_set_def_reset_request,
9857         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9858         .set_channels = hclge_set_channels,
9859         .get_channels = hclge_get_channels,
9860         .get_regs_len = hclge_get_regs_len,
9861         .get_regs = hclge_get_regs,
9862         .set_led_id = hclge_set_led_id,
9863         .get_link_mode = hclge_get_link_mode,
9864         .add_fd_entry = hclge_add_fd_entry,
9865         .del_fd_entry = hclge_del_fd_entry,
9866         .del_all_fd_entries = hclge_del_all_fd_entries,
9867         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9868         .get_fd_rule_info = hclge_get_fd_rule_info,
9869         .get_fd_all_rules = hclge_get_all_rules,
9870         .restore_fd_rules = hclge_restore_fd_entries,
9871         .enable_fd = hclge_enable_fd,
9872         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9873         .dbg_run_cmd = hclge_dbg_run_cmd,
9874         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9875         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9876         .ae_dev_resetting = hclge_ae_dev_resetting,
9877         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9878         .set_gro_en = hclge_gro_en,
9879         .get_global_queue_id = hclge_covert_handle_qid_global,
9880         .set_timer_task = hclge_set_timer_task,
9881         .mac_connect_phy = hclge_mac_connect_phy,
9882         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9883         .restore_vlan_table = hclge_restore_vlan_table,
9884 };
9885
9886 static struct hnae3_ae_algo ae_algo = {
9887         .ops = &hclge_ops,
9888         .pdev_id_table = ae_algo_pci_tbl,
9889 };
9890
9891 static int hclge_init(void)
9892 {
9893         pr_info("%s is initializing\n", HCLGE_NAME);
9894
9895         hnae3_register_ae_algo(&ae_algo);
9896
9897         return 0;
9898 }
9899
9900 static void hclge_exit(void)
9901 {
9902         hnae3_unregister_ae_algo(&ae_algo);
9903 }
9904 module_init(hclge_init);
9905 module_exit(hclge_exit);
9906
9907 MODULE_LICENSE("GPL");
9908 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9909 MODULE_DESCRIPTION("HCLGE Driver");
9910 MODULE_VERSION(HCLGE_MOD_VERSION);