]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
0e1225c080204ae0c24f4090a3469b3dd7e6dc24
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
61 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
62 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
63 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
64                                u16 *allocated_size, bool is_alloc);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69
70 static struct hnae3_ae_algo ae_algo;
71
72 static const struct pci_device_id ae_algo_pci_tbl[] = {
73         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
74         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
75         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
76         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
77         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
80         /* required last entry */
81         {0, }
82 };
83
84 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
85
86 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
87                                          HCLGE_CMDQ_TX_ADDR_H_REG,
88                                          HCLGE_CMDQ_TX_DEPTH_REG,
89                                          HCLGE_CMDQ_TX_TAIL_REG,
90                                          HCLGE_CMDQ_TX_HEAD_REG,
91                                          HCLGE_CMDQ_RX_ADDR_L_REG,
92                                          HCLGE_CMDQ_RX_ADDR_H_REG,
93                                          HCLGE_CMDQ_RX_DEPTH_REG,
94                                          HCLGE_CMDQ_RX_TAIL_REG,
95                                          HCLGE_CMDQ_RX_HEAD_REG,
96                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
97                                          HCLGE_CMDQ_INTR_STS_REG,
98                                          HCLGE_CMDQ_INTR_EN_REG,
99                                          HCLGE_CMDQ_INTR_GEN_REG};
100
101 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
102                                            HCLGE_VECTOR0_OTER_EN_REG,
103                                            HCLGE_MISC_RESET_STS_REG,
104                                            HCLGE_MISC_VECTOR_INT_STS,
105                                            HCLGE_GLOBAL_RESET_REG,
106                                            HCLGE_FUN_RST_ING,
107                                            HCLGE_GRO_EN_REG};
108
109 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
110                                          HCLGE_RING_RX_ADDR_H_REG,
111                                          HCLGE_RING_RX_BD_NUM_REG,
112                                          HCLGE_RING_RX_BD_LENGTH_REG,
113                                          HCLGE_RING_RX_MERGE_EN_REG,
114                                          HCLGE_RING_RX_TAIL_REG,
115                                          HCLGE_RING_RX_HEAD_REG,
116                                          HCLGE_RING_RX_FBD_NUM_REG,
117                                          HCLGE_RING_RX_OFFSET_REG,
118                                          HCLGE_RING_RX_FBD_OFFSET_REG,
119                                          HCLGE_RING_RX_STASH_REG,
120                                          HCLGE_RING_RX_BD_ERR_REG,
121                                          HCLGE_RING_TX_ADDR_L_REG,
122                                          HCLGE_RING_TX_ADDR_H_REG,
123                                          HCLGE_RING_TX_BD_NUM_REG,
124                                          HCLGE_RING_TX_PRIORITY_REG,
125                                          HCLGE_RING_TX_TC_REG,
126                                          HCLGE_RING_TX_MERGE_EN_REG,
127                                          HCLGE_RING_TX_TAIL_REG,
128                                          HCLGE_RING_TX_HEAD_REG,
129                                          HCLGE_RING_TX_FBD_NUM_REG,
130                                          HCLGE_RING_TX_OFFSET_REG,
131                                          HCLGE_RING_TX_EBD_NUM_REG,
132                                          HCLGE_RING_TX_EBD_OFFSET_REG,
133                                          HCLGE_RING_TX_BD_ERR_REG,
134                                          HCLGE_RING_EN_REG};
135
136 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
137                                              HCLGE_TQP_INTR_GL0_REG,
138                                              HCLGE_TQP_INTR_GL1_REG,
139                                              HCLGE_TQP_INTR_GL2_REG,
140                                              HCLGE_TQP_INTR_RL_REG};
141
142 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
143         "App    Loopback test",
144         "Serdes serial Loopback test",
145         "Serdes parallel Loopback test",
146         "Phy    Loopback test"
147 };
148
149 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
150         {"mac_tx_mac_pause_num",
151                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
152         {"mac_rx_mac_pause_num",
153                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
154         {"mac_tx_control_pkt_num",
155                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
156         {"mac_rx_control_pkt_num",
157                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
158         {"mac_tx_pfc_pkt_num",
159                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
160         {"mac_tx_pfc_pri0_pkt_num",
161                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
162         {"mac_tx_pfc_pri1_pkt_num",
163                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
164         {"mac_tx_pfc_pri2_pkt_num",
165                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
166         {"mac_tx_pfc_pri3_pkt_num",
167                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
168         {"mac_tx_pfc_pri4_pkt_num",
169                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
170         {"mac_tx_pfc_pri5_pkt_num",
171                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
172         {"mac_tx_pfc_pri6_pkt_num",
173                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
174         {"mac_tx_pfc_pri7_pkt_num",
175                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
176         {"mac_rx_pfc_pkt_num",
177                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
178         {"mac_rx_pfc_pri0_pkt_num",
179                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
180         {"mac_rx_pfc_pri1_pkt_num",
181                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
182         {"mac_rx_pfc_pri2_pkt_num",
183                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
184         {"mac_rx_pfc_pri3_pkt_num",
185                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
186         {"mac_rx_pfc_pri4_pkt_num",
187                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
188         {"mac_rx_pfc_pri5_pkt_num",
189                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
190         {"mac_rx_pfc_pri6_pkt_num",
191                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
192         {"mac_rx_pfc_pri7_pkt_num",
193                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
194         {"mac_tx_total_pkt_num",
195                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
196         {"mac_tx_total_oct_num",
197                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
198         {"mac_tx_good_pkt_num",
199                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
200         {"mac_tx_bad_pkt_num",
201                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
202         {"mac_tx_good_oct_num",
203                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
204         {"mac_tx_bad_oct_num",
205                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
206         {"mac_tx_uni_pkt_num",
207                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
208         {"mac_tx_multi_pkt_num",
209                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
210         {"mac_tx_broad_pkt_num",
211                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
212         {"mac_tx_undersize_pkt_num",
213                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
214         {"mac_tx_oversize_pkt_num",
215                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
216         {"mac_tx_64_oct_pkt_num",
217                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
218         {"mac_tx_65_127_oct_pkt_num",
219                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
220         {"mac_tx_128_255_oct_pkt_num",
221                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
222         {"mac_tx_256_511_oct_pkt_num",
223                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
224         {"mac_tx_512_1023_oct_pkt_num",
225                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
226         {"mac_tx_1024_1518_oct_pkt_num",
227                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
228         {"mac_tx_1519_2047_oct_pkt_num",
229                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
230         {"mac_tx_2048_4095_oct_pkt_num",
231                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
232         {"mac_tx_4096_8191_oct_pkt_num",
233                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
234         {"mac_tx_8192_9216_oct_pkt_num",
235                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
236         {"mac_tx_9217_12287_oct_pkt_num",
237                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
238         {"mac_tx_12288_16383_oct_pkt_num",
239                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
240         {"mac_tx_1519_max_good_pkt_num",
241                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
242         {"mac_tx_1519_max_bad_pkt_num",
243                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
244         {"mac_rx_total_pkt_num",
245                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
246         {"mac_rx_total_oct_num",
247                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
248         {"mac_rx_good_pkt_num",
249                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
250         {"mac_rx_bad_pkt_num",
251                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
252         {"mac_rx_good_oct_num",
253                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
254         {"mac_rx_bad_oct_num",
255                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
256         {"mac_rx_uni_pkt_num",
257                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
258         {"mac_rx_multi_pkt_num",
259                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
260         {"mac_rx_broad_pkt_num",
261                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
262         {"mac_rx_undersize_pkt_num",
263                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
264         {"mac_rx_oversize_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
266         {"mac_rx_64_oct_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
268         {"mac_rx_65_127_oct_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
270         {"mac_rx_128_255_oct_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
272         {"mac_rx_256_511_oct_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
274         {"mac_rx_512_1023_oct_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
276         {"mac_rx_1024_1518_oct_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
278         {"mac_rx_1519_2047_oct_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
280         {"mac_rx_2048_4095_oct_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
282         {"mac_rx_4096_8191_oct_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
284         {"mac_rx_8192_9216_oct_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
286         {"mac_rx_9217_12287_oct_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
288         {"mac_rx_12288_16383_oct_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
290         {"mac_rx_1519_max_good_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
292         {"mac_rx_1519_max_bad_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
294
295         {"mac_tx_fragment_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
297         {"mac_tx_undermin_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
299         {"mac_tx_jabber_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
301         {"mac_tx_err_all_pkt_num",
302                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
303         {"mac_tx_from_app_good_pkt_num",
304                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
305         {"mac_tx_from_app_bad_pkt_num",
306                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
307         {"mac_rx_fragment_pkt_num",
308                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
309         {"mac_rx_undermin_pkt_num",
310                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
311         {"mac_rx_jabber_pkt_num",
312                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
313         {"mac_rx_fcs_err_pkt_num",
314                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
315         {"mac_rx_send_app_good_pkt_num",
316                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
317         {"mac_rx_send_app_bad_pkt_num",
318                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
319 };
320
321 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
322         {
323                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
324                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
325                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
326                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
327                 .i_port_bitmap = 0x1,
328         },
329 };
330
331 static const u8 hclge_hash_key[] = {
332         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
333         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
334         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
335         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
336         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
337 };
338
339 static const u32 hclge_dfx_bd_offset_list[] = {
340         HCLGE_DFX_BIOS_BD_OFFSET,
341         HCLGE_DFX_SSU_0_BD_OFFSET,
342         HCLGE_DFX_SSU_1_BD_OFFSET,
343         HCLGE_DFX_IGU_BD_OFFSET,
344         HCLGE_DFX_RPU_0_BD_OFFSET,
345         HCLGE_DFX_RPU_1_BD_OFFSET,
346         HCLGE_DFX_NCSI_BD_OFFSET,
347         HCLGE_DFX_RTC_BD_OFFSET,
348         HCLGE_DFX_PPP_BD_OFFSET,
349         HCLGE_DFX_RCB_BD_OFFSET,
350         HCLGE_DFX_TQP_BD_OFFSET,
351         HCLGE_DFX_SSU_2_BD_OFFSET
352 };
353
354 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
355         HCLGE_OPC_DFX_BIOS_COMMON_REG,
356         HCLGE_OPC_DFX_SSU_REG_0,
357         HCLGE_OPC_DFX_SSU_REG_1,
358         HCLGE_OPC_DFX_IGU_EGU_REG,
359         HCLGE_OPC_DFX_RPU_REG_0,
360         HCLGE_OPC_DFX_RPU_REG_1,
361         HCLGE_OPC_DFX_NCSI_REG,
362         HCLGE_OPC_DFX_RTC_REG,
363         HCLGE_OPC_DFX_PPP_REG,
364         HCLGE_OPC_DFX_RCB_REG,
365         HCLGE_OPC_DFX_TQP_REG,
366         HCLGE_OPC_DFX_SSU_REG_2
367 };
368
369 static const struct key_info meta_data_key_info[] = {
370         { PACKET_TYPE_ID, 6},
371         { IP_FRAGEMENT, 1},
372         { ROCE_TYPE, 1},
373         { NEXT_KEY, 5},
374         { VLAN_NUMBER, 2},
375         { SRC_VPORT, 12},
376         { DST_VPORT, 12},
377         { TUNNEL_PACKET, 1},
378 };
379
380 static const struct key_info tuple_key_info[] = {
381         { OUTER_DST_MAC, 48},
382         { OUTER_SRC_MAC, 48},
383         { OUTER_VLAN_TAG_FST, 16},
384         { OUTER_VLAN_TAG_SEC, 16},
385         { OUTER_ETH_TYPE, 16},
386         { OUTER_L2_RSV, 16},
387         { OUTER_IP_TOS, 8},
388         { OUTER_IP_PROTO, 8},
389         { OUTER_SRC_IP, 32},
390         { OUTER_DST_IP, 32},
391         { OUTER_L3_RSV, 16},
392         { OUTER_SRC_PORT, 16},
393         { OUTER_DST_PORT, 16},
394         { OUTER_L4_RSV, 32},
395         { OUTER_TUN_VNI, 24},
396         { OUTER_TUN_FLOW_ID, 8},
397         { INNER_DST_MAC, 48},
398         { INNER_SRC_MAC, 48},
399         { INNER_VLAN_TAG_FST, 16},
400         { INNER_VLAN_TAG_SEC, 16},
401         { INNER_ETH_TYPE, 16},
402         { INNER_L2_RSV, 16},
403         { INNER_IP_TOS, 8},
404         { INNER_IP_PROTO, 8},
405         { INNER_SRC_IP, 32},
406         { INNER_DST_IP, 32},
407         { INNER_L3_RSV, 16},
408         { INNER_SRC_PORT, 16},
409         { INNER_DST_PORT, 16},
410         { INNER_L4_RSV, 32},
411 };
412
413 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
414 {
415 #define HCLGE_MAC_CMD_NUM 21
416
417         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
418         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
419         __le64 *desc_data;
420         int i, k, n;
421         int ret;
422
423         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
424         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
425         if (ret) {
426                 dev_err(&hdev->pdev->dev,
427                         "Get MAC pkt stats fail, status = %d.\n", ret);
428
429                 return ret;
430         }
431
432         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
433                 /* for special opcode 0032, only the first desc has the head */
434                 if (unlikely(i == 0)) {
435                         desc_data = (__le64 *)(&desc[i].data[0]);
436                         n = HCLGE_RD_FIRST_STATS_NUM;
437                 } else {
438                         desc_data = (__le64 *)(&desc[i]);
439                         n = HCLGE_RD_OTHER_STATS_NUM;
440                 }
441
442                 for (k = 0; k < n; k++) {
443                         *data += le64_to_cpu(*desc_data);
444                         data++;
445                         desc_data++;
446                 }
447         }
448
449         return 0;
450 }
451
452 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
453 {
454         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
455         struct hclge_desc *desc;
456         __le64 *desc_data;
457         u16 i, k, n;
458         int ret;
459
460         /* This may be called inside atomic sections,
461          * so GFP_ATOMIC is more suitalbe here
462          */
463         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
464         if (!desc)
465                 return -ENOMEM;
466
467         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
468         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
469         if (ret) {
470                 kfree(desc);
471                 return ret;
472         }
473
474         for (i = 0; i < desc_num; i++) {
475                 /* for special opcode 0034, only the first desc has the head */
476                 if (i == 0) {
477                         desc_data = (__le64 *)(&desc[i].data[0]);
478                         n = HCLGE_RD_FIRST_STATS_NUM;
479                 } else {
480                         desc_data = (__le64 *)(&desc[i]);
481                         n = HCLGE_RD_OTHER_STATS_NUM;
482                 }
483
484                 for (k = 0; k < n; k++) {
485                         *data += le64_to_cpu(*desc_data);
486                         data++;
487                         desc_data++;
488                 }
489         }
490
491         kfree(desc);
492
493         return 0;
494 }
495
496 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
497 {
498         struct hclge_desc desc;
499         __le32 *desc_data;
500         u32 reg_num;
501         int ret;
502
503         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
504         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
505         if (ret)
506                 return ret;
507
508         desc_data = (__le32 *)(&desc.data[0]);
509         reg_num = le32_to_cpu(*desc_data);
510
511         *desc_num = 1 + ((reg_num - 3) >> 2) +
512                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
513
514         return 0;
515 }
516
517 static int hclge_mac_update_stats(struct hclge_dev *hdev)
518 {
519         u32 desc_num;
520         int ret;
521
522         ret = hclge_mac_query_reg_num(hdev, &desc_num);
523
524         /* The firmware supports the new statistics acquisition method */
525         if (!ret)
526                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
527         else if (ret == -EOPNOTSUPP)
528                 ret = hclge_mac_update_stats_defective(hdev);
529         else
530                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
531
532         return ret;
533 }
534
535 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
536 {
537         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
538         struct hclge_vport *vport = hclge_get_vport(handle);
539         struct hclge_dev *hdev = vport->back;
540         struct hnae3_queue *queue;
541         struct hclge_desc desc[1];
542         struct hclge_tqp *tqp;
543         int ret, i;
544
545         for (i = 0; i < kinfo->num_tqps; i++) {
546                 queue = handle->kinfo.tqp[i];
547                 tqp = container_of(queue, struct hclge_tqp, q);
548                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
549                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
550                                            true);
551
552                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
553                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
554                 if (ret) {
555                         dev_err(&hdev->pdev->dev,
556                                 "Query tqp stat fail, status = %d,queue = %d\n",
557                                 ret, i);
558                         return ret;
559                 }
560                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
561                         le32_to_cpu(desc[0].data[1]);
562         }
563
564         for (i = 0; i < kinfo->num_tqps; i++) {
565                 queue = handle->kinfo.tqp[i];
566                 tqp = container_of(queue, struct hclge_tqp, q);
567                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
568                 hclge_cmd_setup_basic_desc(&desc[0],
569                                            HCLGE_OPC_QUERY_TX_STATUS,
570                                            true);
571
572                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
573                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
574                 if (ret) {
575                         dev_err(&hdev->pdev->dev,
576                                 "Query tqp stat fail, status = %d,queue = %d\n",
577                                 ret, i);
578                         return ret;
579                 }
580                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
581                         le32_to_cpu(desc[0].data[1]);
582         }
583
584         return 0;
585 }
586
587 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
588 {
589         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
590         struct hclge_tqp *tqp;
591         u64 *buff = data;
592         int i;
593
594         for (i = 0; i < kinfo->num_tqps; i++) {
595                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
596                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
597         }
598
599         for (i = 0; i < kinfo->num_tqps; i++) {
600                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
601                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
602         }
603
604         return buff;
605 }
606
607 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
608 {
609         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
610
611         /* each tqp has TX & RX two queues */
612         return kinfo->num_tqps * (2);
613 }
614
615 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
616 {
617         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
618         u8 *buff = data;
619         int i = 0;
620
621         for (i = 0; i < kinfo->num_tqps; i++) {
622                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
623                         struct hclge_tqp, q);
624                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
625                          tqp->index);
626                 buff = buff + ETH_GSTRING_LEN;
627         }
628
629         for (i = 0; i < kinfo->num_tqps; i++) {
630                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
631                         struct hclge_tqp, q);
632                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
633                          tqp->index);
634                 buff = buff + ETH_GSTRING_LEN;
635         }
636
637         return buff;
638 }
639
640 static u64 *hclge_comm_get_stats(const void *comm_stats,
641                                  const struct hclge_comm_stats_str strs[],
642                                  int size, u64 *data)
643 {
644         u64 *buf = data;
645         u32 i;
646
647         for (i = 0; i < size; i++)
648                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
649
650         return buf + size;
651 }
652
653 static u8 *hclge_comm_get_strings(u32 stringset,
654                                   const struct hclge_comm_stats_str strs[],
655                                   int size, u8 *data)
656 {
657         char *buff = (char *)data;
658         u32 i;
659
660         if (stringset != ETH_SS_STATS)
661                 return buff;
662
663         for (i = 0; i < size; i++) {
664                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
665                 buff = buff + ETH_GSTRING_LEN;
666         }
667
668         return (u8 *)buff;
669 }
670
671 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
672 {
673         struct hnae3_handle *handle;
674         int status;
675
676         handle = &hdev->vport[0].nic;
677         if (handle->client) {
678                 status = hclge_tqps_update_stats(handle);
679                 if (status) {
680                         dev_err(&hdev->pdev->dev,
681                                 "Update TQPS stats fail, status = %d.\n",
682                                 status);
683                 }
684         }
685
686         status = hclge_mac_update_stats(hdev);
687         if (status)
688                 dev_err(&hdev->pdev->dev,
689                         "Update MAC stats fail, status = %d.\n", status);
690 }
691
692 static void hclge_update_stats(struct hnae3_handle *handle,
693                                struct net_device_stats *net_stats)
694 {
695         struct hclge_vport *vport = hclge_get_vport(handle);
696         struct hclge_dev *hdev = vport->back;
697         int status;
698
699         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
700                 return;
701
702         status = hclge_mac_update_stats(hdev);
703         if (status)
704                 dev_err(&hdev->pdev->dev,
705                         "Update MAC stats fail, status = %d.\n",
706                         status);
707
708         status = hclge_tqps_update_stats(handle);
709         if (status)
710                 dev_err(&hdev->pdev->dev,
711                         "Update TQPS stats fail, status = %d.\n",
712                         status);
713
714         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
715 }
716
717 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
718 {
719 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
720                 HNAE3_SUPPORT_PHY_LOOPBACK |\
721                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
722                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
723
724         struct hclge_vport *vport = hclge_get_vport(handle);
725         struct hclge_dev *hdev = vport->back;
726         int count = 0;
727
728         /* Loopback test support rules:
729          * mac: only GE mode support
730          * serdes: all mac mode will support include GE/XGE/LGE/CGE
731          * phy: only support when phy device exist on board
732          */
733         if (stringset == ETH_SS_TEST) {
734                 /* clear loopback bit flags at first */
735                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
736                 if (hdev->pdev->revision >= 0x21 ||
737                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
738                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
739                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
740                         count += 1;
741                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
742                 }
743
744                 count += 2;
745                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
746                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
747
748                 if (hdev->hw.mac.phydev) {
749                         count += 1;
750                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
751                 }
752
753         } else if (stringset == ETH_SS_STATS) {
754                 count = ARRAY_SIZE(g_mac_stats_string) +
755                         hclge_tqps_get_sset_count(handle, stringset);
756         }
757
758         return count;
759 }
760
761 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
762                               u8 *data)
763 {
764         u8 *p = (char *)data;
765         int size;
766
767         if (stringset == ETH_SS_STATS) {
768                 size = ARRAY_SIZE(g_mac_stats_string);
769                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
770                                            size, p);
771                 p = hclge_tqps_get_strings(handle, p);
772         } else if (stringset == ETH_SS_TEST) {
773                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
774                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
775                                ETH_GSTRING_LEN);
776                         p += ETH_GSTRING_LEN;
777                 }
778                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
779                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
780                                ETH_GSTRING_LEN);
781                         p += ETH_GSTRING_LEN;
782                 }
783                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
784                         memcpy(p,
785                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
786                                ETH_GSTRING_LEN);
787                         p += ETH_GSTRING_LEN;
788                 }
789                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
790                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
791                                ETH_GSTRING_LEN);
792                         p += ETH_GSTRING_LEN;
793                 }
794         }
795 }
796
797 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
798 {
799         struct hclge_vport *vport = hclge_get_vport(handle);
800         struct hclge_dev *hdev = vport->back;
801         u64 *p;
802
803         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
804                                  ARRAY_SIZE(g_mac_stats_string), data);
805         p = hclge_tqps_get_stats(handle, p);
806 }
807
808 static void hclge_get_mac_stat(struct hnae3_handle *handle,
809                                struct hns3_mac_stats *mac_stats)
810 {
811         struct hclge_vport *vport = hclge_get_vport(handle);
812         struct hclge_dev *hdev = vport->back;
813
814         hclge_update_stats(handle, NULL);
815
816         mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
817         mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
818 }
819
820 static int hclge_parse_func_status(struct hclge_dev *hdev,
821                                    struct hclge_func_status_cmd *status)
822 {
823         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
824                 return -EINVAL;
825
826         /* Set the pf to main pf */
827         if (status->pf_state & HCLGE_PF_STATE_MAIN)
828                 hdev->flag |= HCLGE_FLAG_MAIN;
829         else
830                 hdev->flag &= ~HCLGE_FLAG_MAIN;
831
832         return 0;
833 }
834
835 static int hclge_query_function_status(struct hclge_dev *hdev)
836 {
837 #define HCLGE_QUERY_MAX_CNT     5
838
839         struct hclge_func_status_cmd *req;
840         struct hclge_desc desc;
841         int timeout = 0;
842         int ret;
843
844         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
845         req = (struct hclge_func_status_cmd *)desc.data;
846
847         do {
848                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
849                 if (ret) {
850                         dev_err(&hdev->pdev->dev,
851                                 "query function status failed %d.\n", ret);
852                         return ret;
853                 }
854
855                 /* Check pf reset is done */
856                 if (req->pf_state)
857                         break;
858                 usleep_range(1000, 2000);
859         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
860
861         ret = hclge_parse_func_status(hdev, req);
862
863         return ret;
864 }
865
866 static int hclge_query_pf_resource(struct hclge_dev *hdev)
867 {
868         struct hclge_pf_res_cmd *req;
869         struct hclge_desc desc;
870         int ret;
871
872         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
873         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
874         if (ret) {
875                 dev_err(&hdev->pdev->dev,
876                         "query pf resource failed %d.\n", ret);
877                 return ret;
878         }
879
880         req = (struct hclge_pf_res_cmd *)desc.data;
881         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
882         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
883
884         if (req->tx_buf_size)
885                 hdev->tx_buf_size =
886                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
887         else
888                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
889
890         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
891
892         if (req->dv_buf_size)
893                 hdev->dv_buf_size =
894                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
895         else
896                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
897
898         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
899
900         if (hnae3_dev_roce_supported(hdev)) {
901                 hdev->roce_base_msix_offset =
902                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
903                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
904                 hdev->num_roce_msi =
905                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
906                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
907
908                 /* PF should have NIC vectors and Roce vectors,
909                  * NIC vectors are queued before Roce vectors.
910                  */
911                 hdev->num_msi = hdev->num_roce_msi +
912                                 hdev->roce_base_msix_offset;
913         } else {
914                 hdev->num_msi =
915                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
916                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
917         }
918
919         return 0;
920 }
921
922 static int hclge_parse_speed(int speed_cmd, int *speed)
923 {
924         switch (speed_cmd) {
925         case 6:
926                 *speed = HCLGE_MAC_SPEED_10M;
927                 break;
928         case 7:
929                 *speed = HCLGE_MAC_SPEED_100M;
930                 break;
931         case 0:
932                 *speed = HCLGE_MAC_SPEED_1G;
933                 break;
934         case 1:
935                 *speed = HCLGE_MAC_SPEED_10G;
936                 break;
937         case 2:
938                 *speed = HCLGE_MAC_SPEED_25G;
939                 break;
940         case 3:
941                 *speed = HCLGE_MAC_SPEED_40G;
942                 break;
943         case 4:
944                 *speed = HCLGE_MAC_SPEED_50G;
945                 break;
946         case 5:
947                 *speed = HCLGE_MAC_SPEED_100G;
948                 break;
949         default:
950                 return -EINVAL;
951         }
952
953         return 0;
954 }
955
956 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
957 {
958         struct hclge_vport *vport = hclge_get_vport(handle);
959         struct hclge_dev *hdev = vport->back;
960         u32 speed_ability = hdev->hw.mac.speed_ability;
961         u32 speed_bit = 0;
962
963         switch (speed) {
964         case HCLGE_MAC_SPEED_10M:
965                 speed_bit = HCLGE_SUPPORT_10M_BIT;
966                 break;
967         case HCLGE_MAC_SPEED_100M:
968                 speed_bit = HCLGE_SUPPORT_100M_BIT;
969                 break;
970         case HCLGE_MAC_SPEED_1G:
971                 speed_bit = HCLGE_SUPPORT_1G_BIT;
972                 break;
973         case HCLGE_MAC_SPEED_10G:
974                 speed_bit = HCLGE_SUPPORT_10G_BIT;
975                 break;
976         case HCLGE_MAC_SPEED_25G:
977                 speed_bit = HCLGE_SUPPORT_25G_BIT;
978                 break;
979         case HCLGE_MAC_SPEED_40G:
980                 speed_bit = HCLGE_SUPPORT_40G_BIT;
981                 break;
982         case HCLGE_MAC_SPEED_50G:
983                 speed_bit = HCLGE_SUPPORT_50G_BIT;
984                 break;
985         case HCLGE_MAC_SPEED_100G:
986                 speed_bit = HCLGE_SUPPORT_100G_BIT;
987                 break;
988         default:
989                 return -EINVAL;
990         }
991
992         if (speed_bit & speed_ability)
993                 return 0;
994
995         return -EINVAL;
996 }
997
998 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
999 {
1000         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1001                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1002                                  mac->supported);
1003         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1004                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1005                                  mac->supported);
1006         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1007                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1008                                  mac->supported);
1009         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1010                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1011                                  mac->supported);
1012         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1013                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1014                                  mac->supported);
1015 }
1016
1017 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1018 {
1019         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1030                                  mac->supported);
1031         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1033                                  mac->supported);
1034 }
1035
1036 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1037 {
1038         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1049                                  mac->supported);
1050         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1052                                  mac->supported);
1053 }
1054
1055 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1056 {
1057         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1059                                  mac->supported);
1060         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1061                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1062                                  mac->supported);
1063         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1064                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1065                                  mac->supported);
1066         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1067                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1068                                  mac->supported);
1069         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1070                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1071                                  mac->supported);
1072         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1073                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1074                                  mac->supported);
1075 }
1076
1077 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1078 {
1079         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1080         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1081
1082         switch (mac->speed) {
1083         case HCLGE_MAC_SPEED_10G:
1084         case HCLGE_MAC_SPEED_40G:
1085                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1086                                  mac->supported);
1087                 mac->fec_ability =
1088                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1089                 break;
1090         case HCLGE_MAC_SPEED_25G:
1091         case HCLGE_MAC_SPEED_50G:
1092                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1093                                  mac->supported);
1094                 mac->fec_ability =
1095                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1096                         BIT(HNAE3_FEC_AUTO);
1097                 break;
1098         case HCLGE_MAC_SPEED_100G:
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1101                 break;
1102         default:
1103                 mac->fec_ability = 0;
1104                 break;
1105         }
1106 }
1107
1108 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1109                                         u8 speed_ability)
1110 {
1111         struct hclge_mac *mac = &hdev->hw.mac;
1112
1113         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1114                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1115                                  mac->supported);
1116
1117         hclge_convert_setting_sr(mac, speed_ability);
1118         hclge_convert_setting_lr(mac, speed_ability);
1119         hclge_convert_setting_cr(mac, speed_ability);
1120         if (hdev->pdev->revision >= 0x21)
1121                 hclge_convert_setting_fec(mac);
1122
1123         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1124         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1125         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1126 }
1127
1128 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1129                                             u8 speed_ability)
1130 {
1131         struct hclge_mac *mac = &hdev->hw.mac;
1132
1133         hclge_convert_setting_kr(mac, speed_ability);
1134         if (hdev->pdev->revision >= 0x21)
1135                 hclge_convert_setting_fec(mac);
1136         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1137         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1138         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1139 }
1140
1141 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1142                                          u8 speed_ability)
1143 {
1144         unsigned long *supported = hdev->hw.mac.supported;
1145
1146         /* default to support all speed for GE port */
1147         if (!speed_ability)
1148                 speed_ability = HCLGE_SUPPORT_GE;
1149
1150         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1151                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1152                                  supported);
1153
1154         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1155                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1156                                  supported);
1157                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1158                                  supported);
1159         }
1160
1161         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1162                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1163                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1164         }
1165
1166         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1167         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1168         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1169         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1170 }
1171
1172 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1173 {
1174         u8 media_type = hdev->hw.mac.media_type;
1175
1176         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1177                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1178         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1179                 hclge_parse_copper_link_mode(hdev, speed_ability);
1180         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1181                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1182 }
1183
1184 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1185 {
1186         struct hclge_cfg_param_cmd *req;
1187         u64 mac_addr_tmp_high;
1188         u64 mac_addr_tmp;
1189         unsigned int i;
1190
1191         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1192
1193         /* get the configuration */
1194         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1195                                               HCLGE_CFG_VMDQ_M,
1196                                               HCLGE_CFG_VMDQ_S);
1197         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1198                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1199         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1200                                             HCLGE_CFG_TQP_DESC_N_M,
1201                                             HCLGE_CFG_TQP_DESC_N_S);
1202
1203         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1204                                         HCLGE_CFG_PHY_ADDR_M,
1205                                         HCLGE_CFG_PHY_ADDR_S);
1206         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1207                                           HCLGE_CFG_MEDIA_TP_M,
1208                                           HCLGE_CFG_MEDIA_TP_S);
1209         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1210                                           HCLGE_CFG_RX_BUF_LEN_M,
1211                                           HCLGE_CFG_RX_BUF_LEN_S);
1212         /* get mac_address */
1213         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1214         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1215                                             HCLGE_CFG_MAC_ADDR_H_M,
1216                                             HCLGE_CFG_MAC_ADDR_H_S);
1217
1218         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1219
1220         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1221                                              HCLGE_CFG_DEFAULT_SPEED_M,
1222                                              HCLGE_CFG_DEFAULT_SPEED_S);
1223         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1224                                             HCLGE_CFG_RSS_SIZE_M,
1225                                             HCLGE_CFG_RSS_SIZE_S);
1226
1227         for (i = 0; i < ETH_ALEN; i++)
1228                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1229
1230         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1231         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1232
1233         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1234                                              HCLGE_CFG_SPEED_ABILITY_M,
1235                                              HCLGE_CFG_SPEED_ABILITY_S);
1236         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1237                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1238                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1239         if (!cfg->umv_space)
1240                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1241 }
1242
1243 /* hclge_get_cfg: query the static parameter from flash
1244  * @hdev: pointer to struct hclge_dev
1245  * @hcfg: the config structure to be getted
1246  */
1247 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1248 {
1249         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1250         struct hclge_cfg_param_cmd *req;
1251         unsigned int i;
1252         int ret;
1253
1254         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1255                 u32 offset = 0;
1256
1257                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1258                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1259                                            true);
1260                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1261                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1262                 /* Len should be united by 4 bytes when send to hardware */
1263                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1264                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1265                 req->offset = cpu_to_le32(offset);
1266         }
1267
1268         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1269         if (ret) {
1270                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1271                 return ret;
1272         }
1273
1274         hclge_parse_cfg(hcfg, desc);
1275
1276         return 0;
1277 }
1278
1279 static int hclge_get_cap(struct hclge_dev *hdev)
1280 {
1281         int ret;
1282
1283         ret = hclge_query_function_status(hdev);
1284         if (ret) {
1285                 dev_err(&hdev->pdev->dev,
1286                         "query function status error %d.\n", ret);
1287                 return ret;
1288         }
1289
1290         /* get pf resource */
1291         ret = hclge_query_pf_resource(hdev);
1292         if (ret)
1293                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1294
1295         return ret;
1296 }
1297
1298 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1299 {
1300 #define HCLGE_MIN_TX_DESC       64
1301 #define HCLGE_MIN_RX_DESC       64
1302
1303         if (!is_kdump_kernel())
1304                 return;
1305
1306         dev_info(&hdev->pdev->dev,
1307                  "Running kdump kernel. Using minimal resources\n");
1308
1309         /* minimal queue pairs equals to the number of vports */
1310         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1311         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1312         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1313 }
1314
1315 static int hclge_configure(struct hclge_dev *hdev)
1316 {
1317         struct hclge_cfg cfg;
1318         unsigned int i;
1319         int ret;
1320
1321         ret = hclge_get_cfg(hdev, &cfg);
1322         if (ret) {
1323                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1324                 return ret;
1325         }
1326
1327         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1328         hdev->base_tqp_pid = 0;
1329         hdev->rss_size_max = cfg.rss_size_max;
1330         hdev->rx_buf_len = cfg.rx_buf_len;
1331         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1332         hdev->hw.mac.media_type = cfg.media_type;
1333         hdev->hw.mac.phy_addr = cfg.phy_addr;
1334         hdev->num_tx_desc = cfg.tqp_desc_num;
1335         hdev->num_rx_desc = cfg.tqp_desc_num;
1336         hdev->tm_info.num_pg = 1;
1337         hdev->tc_max = cfg.tc_num;
1338         hdev->tm_info.hw_pfc_map = 0;
1339         hdev->wanted_umv_size = cfg.umv_space;
1340
1341         if (hnae3_dev_fd_supported(hdev)) {
1342                 hdev->fd_en = true;
1343                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1344         }
1345
1346         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1347         if (ret) {
1348                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1349                 return ret;
1350         }
1351
1352         hclge_parse_link_mode(hdev, cfg.speed_ability);
1353
1354         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1355             (hdev->tc_max < 1)) {
1356                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1357                          hdev->tc_max);
1358                 hdev->tc_max = 1;
1359         }
1360
1361         /* Dev does not support DCB */
1362         if (!hnae3_dev_dcb_supported(hdev)) {
1363                 hdev->tc_max = 1;
1364                 hdev->pfc_max = 0;
1365         } else {
1366                 hdev->pfc_max = hdev->tc_max;
1367         }
1368
1369         hdev->tm_info.num_tc = 1;
1370
1371         /* Currently not support uncontiuous tc */
1372         for (i = 0; i < hdev->tm_info.num_tc; i++)
1373                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1374
1375         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1376
1377         hclge_init_kdump_kernel_config(hdev);
1378
1379         /* Set the init affinity based on pci func number */
1380         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1381         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1382         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1383                         &hdev->affinity_mask);
1384
1385         return ret;
1386 }
1387
1388 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1389                             unsigned int tso_mss_max)
1390 {
1391         struct hclge_cfg_tso_status_cmd *req;
1392         struct hclge_desc desc;
1393         u16 tso_mss;
1394
1395         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1396
1397         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1398
1399         tso_mss = 0;
1400         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1401                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1402         req->tso_mss_min = cpu_to_le16(tso_mss);
1403
1404         tso_mss = 0;
1405         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1406                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1407         req->tso_mss_max = cpu_to_le16(tso_mss);
1408
1409         return hclge_cmd_send(&hdev->hw, &desc, 1);
1410 }
1411
1412 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1413 {
1414         struct hclge_cfg_gro_status_cmd *req;
1415         struct hclge_desc desc;
1416         int ret;
1417
1418         if (!hnae3_dev_gro_supported(hdev))
1419                 return 0;
1420
1421         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1422         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1423
1424         req->gro_en = cpu_to_le16(en ? 1 : 0);
1425
1426         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1427         if (ret)
1428                 dev_err(&hdev->pdev->dev,
1429                         "GRO hardware config cmd failed, ret = %d\n", ret);
1430
1431         return ret;
1432 }
1433
1434 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1435 {
1436         struct hclge_tqp *tqp;
1437         int i;
1438
1439         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1440                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1441         if (!hdev->htqp)
1442                 return -ENOMEM;
1443
1444         tqp = hdev->htqp;
1445
1446         for (i = 0; i < hdev->num_tqps; i++) {
1447                 tqp->dev = &hdev->pdev->dev;
1448                 tqp->index = i;
1449
1450                 tqp->q.ae_algo = &ae_algo;
1451                 tqp->q.buf_size = hdev->rx_buf_len;
1452                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1453                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1454                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1455                         i * HCLGE_TQP_REG_SIZE;
1456
1457                 tqp++;
1458         }
1459
1460         return 0;
1461 }
1462
1463 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1464                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1465 {
1466         struct hclge_tqp_map_cmd *req;
1467         struct hclge_desc desc;
1468         int ret;
1469
1470         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1471
1472         req = (struct hclge_tqp_map_cmd *)desc.data;
1473         req->tqp_id = cpu_to_le16(tqp_pid);
1474         req->tqp_vf = func_id;
1475         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1476         if (!is_pf)
1477                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1478         req->tqp_vid = cpu_to_le16(tqp_vid);
1479
1480         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1481         if (ret)
1482                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1483
1484         return ret;
1485 }
1486
1487 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1488 {
1489         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1490         struct hclge_dev *hdev = vport->back;
1491         int i, alloced;
1492
1493         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1494              alloced < num_tqps; i++) {
1495                 if (!hdev->htqp[i].alloced) {
1496                         hdev->htqp[i].q.handle = &vport->nic;
1497                         hdev->htqp[i].q.tqp_index = alloced;
1498                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1499                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1500                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1501                         hdev->htqp[i].alloced = true;
1502                         alloced++;
1503                 }
1504         }
1505         vport->alloc_tqps = alloced;
1506         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1507                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1508
1509         return 0;
1510 }
1511
1512 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1513                             u16 num_tx_desc, u16 num_rx_desc)
1514
1515 {
1516         struct hnae3_handle *nic = &vport->nic;
1517         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1518         struct hclge_dev *hdev = vport->back;
1519         int ret;
1520
1521         kinfo->num_tx_desc = num_tx_desc;
1522         kinfo->num_rx_desc = num_rx_desc;
1523
1524         kinfo->rx_buf_len = hdev->rx_buf_len;
1525
1526         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1527                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1528         if (!kinfo->tqp)
1529                 return -ENOMEM;
1530
1531         ret = hclge_assign_tqp(vport, num_tqps);
1532         if (ret)
1533                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1534
1535         return ret;
1536 }
1537
1538 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1539                                   struct hclge_vport *vport)
1540 {
1541         struct hnae3_handle *nic = &vport->nic;
1542         struct hnae3_knic_private_info *kinfo;
1543         u16 i;
1544
1545         kinfo = &nic->kinfo;
1546         for (i = 0; i < vport->alloc_tqps; i++) {
1547                 struct hclge_tqp *q =
1548                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1549                 bool is_pf;
1550                 int ret;
1551
1552                 is_pf = !(vport->vport_id);
1553                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1554                                              i, is_pf);
1555                 if (ret)
1556                         return ret;
1557         }
1558
1559         return 0;
1560 }
1561
1562 static int hclge_map_tqp(struct hclge_dev *hdev)
1563 {
1564         struct hclge_vport *vport = hdev->vport;
1565         u16 i, num_vport;
1566
1567         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1568         for (i = 0; i < num_vport; i++) {
1569                 int ret;
1570
1571                 ret = hclge_map_tqp_to_vport(hdev, vport);
1572                 if (ret)
1573                         return ret;
1574
1575                 vport++;
1576         }
1577
1578         return 0;
1579 }
1580
1581 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1582 {
1583         struct hnae3_handle *nic = &vport->nic;
1584         struct hclge_dev *hdev = vport->back;
1585         int ret;
1586
1587         nic->pdev = hdev->pdev;
1588         nic->ae_algo = &ae_algo;
1589         nic->numa_node_mask = hdev->numa_node_mask;
1590
1591         ret = hclge_knic_setup(vport, num_tqps,
1592                                hdev->num_tx_desc, hdev->num_rx_desc);
1593         if (ret)
1594                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1595
1596         return ret;
1597 }
1598
1599 static int hclge_alloc_vport(struct hclge_dev *hdev)
1600 {
1601         struct pci_dev *pdev = hdev->pdev;
1602         struct hclge_vport *vport;
1603         u32 tqp_main_vport;
1604         u32 tqp_per_vport;
1605         int num_vport, i;
1606         int ret;
1607
1608         /* We need to alloc a vport for main NIC of PF */
1609         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1610
1611         if (hdev->num_tqps < num_vport) {
1612                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1613                         hdev->num_tqps, num_vport);
1614                 return -EINVAL;
1615         }
1616
1617         /* Alloc the same number of TQPs for every vport */
1618         tqp_per_vport = hdev->num_tqps / num_vport;
1619         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1620
1621         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1622                              GFP_KERNEL);
1623         if (!vport)
1624                 return -ENOMEM;
1625
1626         hdev->vport = vport;
1627         hdev->num_alloc_vport = num_vport;
1628
1629         if (IS_ENABLED(CONFIG_PCI_IOV))
1630                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1631
1632         for (i = 0; i < num_vport; i++) {
1633                 vport->back = hdev;
1634                 vport->vport_id = i;
1635                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1636                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1637                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1638                 INIT_LIST_HEAD(&vport->vlan_list);
1639                 INIT_LIST_HEAD(&vport->uc_mac_list);
1640                 INIT_LIST_HEAD(&vport->mc_mac_list);
1641
1642                 if (i == 0)
1643                         ret = hclge_vport_setup(vport, tqp_main_vport);
1644                 else
1645                         ret = hclge_vport_setup(vport, tqp_per_vport);
1646                 if (ret) {
1647                         dev_err(&pdev->dev,
1648                                 "vport setup failed for vport %d, %d\n",
1649                                 i, ret);
1650                         return ret;
1651                 }
1652
1653                 vport++;
1654         }
1655
1656         return 0;
1657 }
1658
1659 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1660                                     struct hclge_pkt_buf_alloc *buf_alloc)
1661 {
1662 /* TX buffer size is unit by 128 byte */
1663 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1664 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1665         struct hclge_tx_buff_alloc_cmd *req;
1666         struct hclge_desc desc;
1667         int ret;
1668         u8 i;
1669
1670         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1671
1672         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1673         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1674                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1675
1676                 req->tx_pkt_buff[i] =
1677                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1678                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1679         }
1680
1681         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1682         if (ret)
1683                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1684                         ret);
1685
1686         return ret;
1687 }
1688
1689 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1690                                  struct hclge_pkt_buf_alloc *buf_alloc)
1691 {
1692         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1693
1694         if (ret)
1695                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1696
1697         return ret;
1698 }
1699
1700 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1701 {
1702         unsigned int i;
1703         u32 cnt = 0;
1704
1705         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1706                 if (hdev->hw_tc_map & BIT(i))
1707                         cnt++;
1708         return cnt;
1709 }
1710
1711 /* Get the number of pfc enabled TCs, which have private buffer */
1712 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1713                                   struct hclge_pkt_buf_alloc *buf_alloc)
1714 {
1715         struct hclge_priv_buf *priv;
1716         unsigned int i;
1717         int cnt = 0;
1718
1719         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1720                 priv = &buf_alloc->priv_buf[i];
1721                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1722                     priv->enable)
1723                         cnt++;
1724         }
1725
1726         return cnt;
1727 }
1728
1729 /* Get the number of pfc disabled TCs, which have private buffer */
1730 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1731                                      struct hclge_pkt_buf_alloc *buf_alloc)
1732 {
1733         struct hclge_priv_buf *priv;
1734         unsigned int i;
1735         int cnt = 0;
1736
1737         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1738                 priv = &buf_alloc->priv_buf[i];
1739                 if (hdev->hw_tc_map & BIT(i) &&
1740                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1741                     priv->enable)
1742                         cnt++;
1743         }
1744
1745         return cnt;
1746 }
1747
1748 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1749 {
1750         struct hclge_priv_buf *priv;
1751         u32 rx_priv = 0;
1752         int i;
1753
1754         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1755                 priv = &buf_alloc->priv_buf[i];
1756                 if (priv->enable)
1757                         rx_priv += priv->buf_size;
1758         }
1759         return rx_priv;
1760 }
1761
1762 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1763 {
1764         u32 i, total_tx_size = 0;
1765
1766         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1767                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1768
1769         return total_tx_size;
1770 }
1771
1772 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1773                                 struct hclge_pkt_buf_alloc *buf_alloc,
1774                                 u32 rx_all)
1775 {
1776         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1777         u32 tc_num = hclge_get_tc_num(hdev);
1778         u32 shared_buf, aligned_mps;
1779         u32 rx_priv;
1780         int i;
1781
1782         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1783
1784         if (hnae3_dev_dcb_supported(hdev))
1785                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1786                                         hdev->dv_buf_size;
1787         else
1788                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1789                                         + hdev->dv_buf_size;
1790
1791         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1792         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1793                              HCLGE_BUF_SIZE_UNIT);
1794
1795         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1796         if (rx_all < rx_priv + shared_std)
1797                 return false;
1798
1799         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1800         buf_alloc->s_buf.buf_size = shared_buf;
1801         if (hnae3_dev_dcb_supported(hdev)) {
1802                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1803                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1804                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1805                                   HCLGE_BUF_SIZE_UNIT);
1806         } else {
1807                 buf_alloc->s_buf.self.high = aligned_mps +
1808                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1809                 buf_alloc->s_buf.self.low = aligned_mps;
1810         }
1811
1812         if (hnae3_dev_dcb_supported(hdev)) {
1813                 hi_thrd = shared_buf - hdev->dv_buf_size;
1814
1815                 if (tc_num <= NEED_RESERVE_TC_NUM)
1816                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1817                                         / BUF_MAX_PERCENT;
1818
1819                 if (tc_num)
1820                         hi_thrd = hi_thrd / tc_num;
1821
1822                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1823                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1824                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1825         } else {
1826                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1827                 lo_thrd = aligned_mps;
1828         }
1829
1830         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1831                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1832                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1833         }
1834
1835         return true;
1836 }
1837
1838 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1839                                 struct hclge_pkt_buf_alloc *buf_alloc)
1840 {
1841         u32 i, total_size;
1842
1843         total_size = hdev->pkt_buf_size;
1844
1845         /* alloc tx buffer for all enabled tc */
1846         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1847                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1848
1849                 if (hdev->hw_tc_map & BIT(i)) {
1850                         if (total_size < hdev->tx_buf_size)
1851                                 return -ENOMEM;
1852
1853                         priv->tx_buf_size = hdev->tx_buf_size;
1854                 } else {
1855                         priv->tx_buf_size = 0;
1856                 }
1857
1858                 total_size -= priv->tx_buf_size;
1859         }
1860
1861         return 0;
1862 }
1863
1864 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1865                                   struct hclge_pkt_buf_alloc *buf_alloc)
1866 {
1867         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1868         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1869         unsigned int i;
1870
1871         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1872                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1873
1874                 priv->enable = 0;
1875                 priv->wl.low = 0;
1876                 priv->wl.high = 0;
1877                 priv->buf_size = 0;
1878
1879                 if (!(hdev->hw_tc_map & BIT(i)))
1880                         continue;
1881
1882                 priv->enable = 1;
1883
1884                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1885                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1886                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1887                                                 HCLGE_BUF_SIZE_UNIT);
1888                 } else {
1889                         priv->wl.low = 0;
1890                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1891                                         aligned_mps;
1892                 }
1893
1894                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1895         }
1896
1897         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1898 }
1899
1900 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1901                                           struct hclge_pkt_buf_alloc *buf_alloc)
1902 {
1903         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1904         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1905         int i;
1906
1907         /* let the last to be cleared first */
1908         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1909                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1910                 unsigned int mask = BIT((unsigned int)i);
1911
1912                 if (hdev->hw_tc_map & mask &&
1913                     !(hdev->tm_info.hw_pfc_map & mask)) {
1914                         /* Clear the no pfc TC private buffer */
1915                         priv->wl.low = 0;
1916                         priv->wl.high = 0;
1917                         priv->buf_size = 0;
1918                         priv->enable = 0;
1919                         no_pfc_priv_num--;
1920                 }
1921
1922                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1923                     no_pfc_priv_num == 0)
1924                         break;
1925         }
1926
1927         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1928 }
1929
1930 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1931                                         struct hclge_pkt_buf_alloc *buf_alloc)
1932 {
1933         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1934         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1935         int i;
1936
1937         /* let the last to be cleared first */
1938         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1939                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1940                 unsigned int mask = BIT((unsigned int)i);
1941
1942                 if (hdev->hw_tc_map & mask &&
1943                     hdev->tm_info.hw_pfc_map & mask) {
1944                         /* Reduce the number of pfc TC with private buffer */
1945                         priv->wl.low = 0;
1946                         priv->enable = 0;
1947                         priv->wl.high = 0;
1948                         priv->buf_size = 0;
1949                         pfc_priv_num--;
1950                 }
1951
1952                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1953                     pfc_priv_num == 0)
1954                         break;
1955         }
1956
1957         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1958 }
1959
1960 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1961                                       struct hclge_pkt_buf_alloc *buf_alloc)
1962 {
1963 #define COMPENSATE_BUFFER       0x3C00
1964 #define COMPENSATE_HALF_MPS_NUM 5
1965 #define PRIV_WL_GAP             0x1800
1966
1967         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1968         u32 tc_num = hclge_get_tc_num(hdev);
1969         u32 half_mps = hdev->mps >> 1;
1970         u32 min_rx_priv;
1971         unsigned int i;
1972
1973         if (tc_num)
1974                 rx_priv = rx_priv / tc_num;
1975
1976         if (tc_num <= NEED_RESERVE_TC_NUM)
1977                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1978
1979         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1980                         COMPENSATE_HALF_MPS_NUM * half_mps;
1981         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1982         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1983
1984         if (rx_priv < min_rx_priv)
1985                 return false;
1986
1987         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1988                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1989
1990                 priv->enable = 0;
1991                 priv->wl.low = 0;
1992                 priv->wl.high = 0;
1993                 priv->buf_size = 0;
1994
1995                 if (!(hdev->hw_tc_map & BIT(i)))
1996                         continue;
1997
1998                 priv->enable = 1;
1999                 priv->buf_size = rx_priv;
2000                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2001                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2002         }
2003
2004         buf_alloc->s_buf.buf_size = 0;
2005
2006         return true;
2007 }
2008
2009 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2010  * @hdev: pointer to struct hclge_dev
2011  * @buf_alloc: pointer to buffer calculation data
2012  * @return: 0: calculate sucessful, negative: fail
2013  */
2014 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2015                                 struct hclge_pkt_buf_alloc *buf_alloc)
2016 {
2017         /* When DCB is not supported, rx private buffer is not allocated. */
2018         if (!hnae3_dev_dcb_supported(hdev)) {
2019                 u32 rx_all = hdev->pkt_buf_size;
2020
2021                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2022                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2023                         return -ENOMEM;
2024
2025                 return 0;
2026         }
2027
2028         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2029                 return 0;
2030
2031         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2032                 return 0;
2033
2034         /* try to decrease the buffer size */
2035         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2036                 return 0;
2037
2038         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2039                 return 0;
2040
2041         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2042                 return 0;
2043
2044         return -ENOMEM;
2045 }
2046
2047 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2048                                    struct hclge_pkt_buf_alloc *buf_alloc)
2049 {
2050         struct hclge_rx_priv_buff_cmd *req;
2051         struct hclge_desc desc;
2052         int ret;
2053         int i;
2054
2055         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2056         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2057
2058         /* Alloc private buffer TCs */
2059         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2060                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2061
2062                 req->buf_num[i] =
2063                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2064                 req->buf_num[i] |=
2065                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2066         }
2067
2068         req->shared_buf =
2069                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2070                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2071
2072         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2073         if (ret)
2074                 dev_err(&hdev->pdev->dev,
2075                         "rx private buffer alloc cmd failed %d\n", ret);
2076
2077         return ret;
2078 }
2079
2080 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2081                                    struct hclge_pkt_buf_alloc *buf_alloc)
2082 {
2083         struct hclge_rx_priv_wl_buf *req;
2084         struct hclge_priv_buf *priv;
2085         struct hclge_desc desc[2];
2086         int i, j;
2087         int ret;
2088
2089         for (i = 0; i < 2; i++) {
2090                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2091                                            false);
2092                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2093
2094                 /* The first descriptor set the NEXT bit to 1 */
2095                 if (i == 0)
2096                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2097                 else
2098                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2099
2100                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2101                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2102
2103                         priv = &buf_alloc->priv_buf[idx];
2104                         req->tc_wl[j].high =
2105                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2106                         req->tc_wl[j].high |=
2107                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2108                         req->tc_wl[j].low =
2109                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2110                         req->tc_wl[j].low |=
2111                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2112                 }
2113         }
2114
2115         /* Send 2 descriptor at one time */
2116         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2117         if (ret)
2118                 dev_err(&hdev->pdev->dev,
2119                         "rx private waterline config cmd failed %d\n",
2120                         ret);
2121         return ret;
2122 }
2123
2124 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2125                                     struct hclge_pkt_buf_alloc *buf_alloc)
2126 {
2127         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2128         struct hclge_rx_com_thrd *req;
2129         struct hclge_desc desc[2];
2130         struct hclge_tc_thrd *tc;
2131         int i, j;
2132         int ret;
2133
2134         for (i = 0; i < 2; i++) {
2135                 hclge_cmd_setup_basic_desc(&desc[i],
2136                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2137                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2138
2139                 /* The first descriptor set the NEXT bit to 1 */
2140                 if (i == 0)
2141                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2142                 else
2143                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2144
2145                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2146                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2147
2148                         req->com_thrd[j].high =
2149                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2150                         req->com_thrd[j].high |=
2151                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2152                         req->com_thrd[j].low =
2153                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2154                         req->com_thrd[j].low |=
2155                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2156                 }
2157         }
2158
2159         /* Send 2 descriptors at one time */
2160         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2161         if (ret)
2162                 dev_err(&hdev->pdev->dev,
2163                         "common threshold config cmd failed %d\n", ret);
2164         return ret;
2165 }
2166
2167 static int hclge_common_wl_config(struct hclge_dev *hdev,
2168                                   struct hclge_pkt_buf_alloc *buf_alloc)
2169 {
2170         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2171         struct hclge_rx_com_wl *req;
2172         struct hclge_desc desc;
2173         int ret;
2174
2175         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2176
2177         req = (struct hclge_rx_com_wl *)desc.data;
2178         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2179         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2180
2181         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2182         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2183
2184         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2185         if (ret)
2186                 dev_err(&hdev->pdev->dev,
2187                         "common waterline config cmd failed %d\n", ret);
2188
2189         return ret;
2190 }
2191
2192 int hclge_buffer_alloc(struct hclge_dev *hdev)
2193 {
2194         struct hclge_pkt_buf_alloc *pkt_buf;
2195         int ret;
2196
2197         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2198         if (!pkt_buf)
2199                 return -ENOMEM;
2200
2201         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2202         if (ret) {
2203                 dev_err(&hdev->pdev->dev,
2204                         "could not calc tx buffer size for all TCs %d\n", ret);
2205                 goto out;
2206         }
2207
2208         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2209         if (ret) {
2210                 dev_err(&hdev->pdev->dev,
2211                         "could not alloc tx buffers %d\n", ret);
2212                 goto out;
2213         }
2214
2215         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2216         if (ret) {
2217                 dev_err(&hdev->pdev->dev,
2218                         "could not calc rx priv buffer size for all TCs %d\n",
2219                         ret);
2220                 goto out;
2221         }
2222
2223         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2224         if (ret) {
2225                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2226                         ret);
2227                 goto out;
2228         }
2229
2230         if (hnae3_dev_dcb_supported(hdev)) {
2231                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2232                 if (ret) {
2233                         dev_err(&hdev->pdev->dev,
2234                                 "could not configure rx private waterline %d\n",
2235                                 ret);
2236                         goto out;
2237                 }
2238
2239                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2240                 if (ret) {
2241                         dev_err(&hdev->pdev->dev,
2242                                 "could not configure common threshold %d\n",
2243                                 ret);
2244                         goto out;
2245                 }
2246         }
2247
2248         ret = hclge_common_wl_config(hdev, pkt_buf);
2249         if (ret)
2250                 dev_err(&hdev->pdev->dev,
2251                         "could not configure common waterline %d\n", ret);
2252
2253 out:
2254         kfree(pkt_buf);
2255         return ret;
2256 }
2257
2258 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2259 {
2260         struct hnae3_handle *roce = &vport->roce;
2261         struct hnae3_handle *nic = &vport->nic;
2262
2263         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2264
2265         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2266             vport->back->num_msi_left == 0)
2267                 return -EINVAL;
2268
2269         roce->rinfo.base_vector = vport->back->roce_base_vector;
2270
2271         roce->rinfo.netdev = nic->kinfo.netdev;
2272         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2273
2274         roce->pdev = nic->pdev;
2275         roce->ae_algo = nic->ae_algo;
2276         roce->numa_node_mask = nic->numa_node_mask;
2277
2278         return 0;
2279 }
2280
2281 static int hclge_init_msi(struct hclge_dev *hdev)
2282 {
2283         struct pci_dev *pdev = hdev->pdev;
2284         int vectors;
2285         int i;
2286
2287         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2288                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2289         if (vectors < 0) {
2290                 dev_err(&pdev->dev,
2291                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2292                         vectors);
2293                 return vectors;
2294         }
2295         if (vectors < hdev->num_msi)
2296                 dev_warn(&hdev->pdev->dev,
2297                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2298                          hdev->num_msi, vectors);
2299
2300         hdev->num_msi = vectors;
2301         hdev->num_msi_left = vectors;
2302         hdev->base_msi_vector = pdev->irq;
2303         hdev->roce_base_vector = hdev->base_msi_vector +
2304                                 hdev->roce_base_msix_offset;
2305
2306         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2307                                            sizeof(u16), GFP_KERNEL);
2308         if (!hdev->vector_status) {
2309                 pci_free_irq_vectors(pdev);
2310                 return -ENOMEM;
2311         }
2312
2313         for (i = 0; i < hdev->num_msi; i++)
2314                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2315
2316         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2317                                         sizeof(int), GFP_KERNEL);
2318         if (!hdev->vector_irq) {
2319                 pci_free_irq_vectors(pdev);
2320                 return -ENOMEM;
2321         }
2322
2323         return 0;
2324 }
2325
2326 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2327 {
2328         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2329                 duplex = HCLGE_MAC_FULL;
2330
2331         return duplex;
2332 }
2333
2334 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2335                                       u8 duplex)
2336 {
2337         struct hclge_config_mac_speed_dup_cmd *req;
2338         struct hclge_desc desc;
2339         int ret;
2340
2341         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2342
2343         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2344
2345         if (duplex)
2346                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2347
2348         switch (speed) {
2349         case HCLGE_MAC_SPEED_10M:
2350                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2351                                 HCLGE_CFG_SPEED_S, 6);
2352                 break;
2353         case HCLGE_MAC_SPEED_100M:
2354                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2355                                 HCLGE_CFG_SPEED_S, 7);
2356                 break;
2357         case HCLGE_MAC_SPEED_1G:
2358                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2359                                 HCLGE_CFG_SPEED_S, 0);
2360                 break;
2361         case HCLGE_MAC_SPEED_10G:
2362                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2363                                 HCLGE_CFG_SPEED_S, 1);
2364                 break;
2365         case HCLGE_MAC_SPEED_25G:
2366                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2367                                 HCLGE_CFG_SPEED_S, 2);
2368                 break;
2369         case HCLGE_MAC_SPEED_40G:
2370                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2371                                 HCLGE_CFG_SPEED_S, 3);
2372                 break;
2373         case HCLGE_MAC_SPEED_50G:
2374                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2375                                 HCLGE_CFG_SPEED_S, 4);
2376                 break;
2377         case HCLGE_MAC_SPEED_100G:
2378                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2379                                 HCLGE_CFG_SPEED_S, 5);
2380                 break;
2381         default:
2382                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2383                 return -EINVAL;
2384         }
2385
2386         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2387                       1);
2388
2389         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "mac speed/duplex config cmd failed %d.\n", ret);
2393                 return ret;
2394         }
2395
2396         return 0;
2397 }
2398
2399 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2400 {
2401         int ret;
2402
2403         duplex = hclge_check_speed_dup(duplex, speed);
2404         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2405                 return 0;
2406
2407         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2408         if (ret)
2409                 return ret;
2410
2411         hdev->hw.mac.speed = speed;
2412         hdev->hw.mac.duplex = duplex;
2413
2414         return 0;
2415 }
2416
2417 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2418                                      u8 duplex)
2419 {
2420         struct hclge_vport *vport = hclge_get_vport(handle);
2421         struct hclge_dev *hdev = vport->back;
2422
2423         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2424 }
2425
2426 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2427 {
2428         struct hclge_config_auto_neg_cmd *req;
2429         struct hclge_desc desc;
2430         u32 flag = 0;
2431         int ret;
2432
2433         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2434
2435         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2436         if (enable)
2437                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2438         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2439
2440         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2441         if (ret)
2442                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2443                         ret);
2444
2445         return ret;
2446 }
2447
2448 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2449 {
2450         struct hclge_vport *vport = hclge_get_vport(handle);
2451         struct hclge_dev *hdev = vport->back;
2452
2453         if (!hdev->hw.mac.support_autoneg) {
2454                 if (enable) {
2455                         dev_err(&hdev->pdev->dev,
2456                                 "autoneg is not supported by current port\n");
2457                         return -EOPNOTSUPP;
2458                 } else {
2459                         return 0;
2460                 }
2461         }
2462
2463         return hclge_set_autoneg_en(hdev, enable);
2464 }
2465
2466 static int hclge_get_autoneg(struct hnae3_handle *handle)
2467 {
2468         struct hclge_vport *vport = hclge_get_vport(handle);
2469         struct hclge_dev *hdev = vport->back;
2470         struct phy_device *phydev = hdev->hw.mac.phydev;
2471
2472         if (phydev)
2473                 return phydev->autoneg;
2474
2475         return hdev->hw.mac.autoneg;
2476 }
2477
2478 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2479 {
2480         struct hclge_vport *vport = hclge_get_vport(handle);
2481         struct hclge_dev *hdev = vport->back;
2482         int ret;
2483
2484         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2485
2486         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2487         if (ret)
2488                 return ret;
2489         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2490 }
2491
2492 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2493 {
2494         struct hclge_vport *vport = hclge_get_vport(handle);
2495         struct hclge_dev *hdev = vport->back;
2496
2497         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2498                 return hclge_set_autoneg_en(hdev, !halt);
2499
2500         return 0;
2501 }
2502
2503 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2504 {
2505         struct hclge_config_fec_cmd *req;
2506         struct hclge_desc desc;
2507         int ret;
2508
2509         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2510
2511         req = (struct hclge_config_fec_cmd *)desc.data;
2512         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2513                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2514         if (fec_mode & BIT(HNAE3_FEC_RS))
2515                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2516                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2517         if (fec_mode & BIT(HNAE3_FEC_BASER))
2518                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2519                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2520
2521         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2522         if (ret)
2523                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2524
2525         return ret;
2526 }
2527
2528 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2529 {
2530         struct hclge_vport *vport = hclge_get_vport(handle);
2531         struct hclge_dev *hdev = vport->back;
2532         struct hclge_mac *mac = &hdev->hw.mac;
2533         int ret;
2534
2535         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2536                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2537                 return -EINVAL;
2538         }
2539
2540         ret = hclge_set_fec_hw(hdev, fec_mode);
2541         if (ret)
2542                 return ret;
2543
2544         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2545         return 0;
2546 }
2547
2548 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2549                           u8 *fec_mode)
2550 {
2551         struct hclge_vport *vport = hclge_get_vport(handle);
2552         struct hclge_dev *hdev = vport->back;
2553         struct hclge_mac *mac = &hdev->hw.mac;
2554
2555         if (fec_ability)
2556                 *fec_ability = mac->fec_ability;
2557         if (fec_mode)
2558                 *fec_mode = mac->fec_mode;
2559 }
2560
2561 static int hclge_mac_init(struct hclge_dev *hdev)
2562 {
2563         struct hclge_mac *mac = &hdev->hw.mac;
2564         int ret;
2565
2566         hdev->support_sfp_query = true;
2567         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2568         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2569                                          hdev->hw.mac.duplex);
2570         if (ret) {
2571                 dev_err(&hdev->pdev->dev,
2572                         "Config mac speed dup fail ret=%d\n", ret);
2573                 return ret;
2574         }
2575
2576         if (hdev->hw.mac.support_autoneg) {
2577                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2578                 if (ret) {
2579                         dev_err(&hdev->pdev->dev,
2580                                 "Config mac autoneg fail ret=%d\n", ret);
2581                         return ret;
2582                 }
2583         }
2584
2585         mac->link = 0;
2586
2587         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2588                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2589                 if (ret) {
2590                         dev_err(&hdev->pdev->dev,
2591                                 "Fec mode init fail, ret = %d\n", ret);
2592                         return ret;
2593                 }
2594         }
2595
2596         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2597         if (ret) {
2598                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2599                 return ret;
2600         }
2601
2602         ret = hclge_buffer_alloc(hdev);
2603         if (ret)
2604                 dev_err(&hdev->pdev->dev,
2605                         "allocate buffer fail, ret=%d\n", ret);
2606
2607         return ret;
2608 }
2609
2610 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2611 {
2612         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2613             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2614                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2615                               &hdev->mbx_service_task);
2616 }
2617
2618 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2619 {
2620         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2621             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2622                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2623                               &hdev->rst_service_task);
2624 }
2625
2626 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2627 {
2628         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2629             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2630             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2631                 hdev->hw_stats.stats_timer++;
2632                 hdev->fd_arfs_expire_timer++;
2633                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2634                                     system_wq, &hdev->service_task,
2635                                     delay_time);
2636         }
2637 }
2638
2639 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2640 {
2641         struct hclge_link_status_cmd *req;
2642         struct hclge_desc desc;
2643         int link_status;
2644         int ret;
2645
2646         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2647         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2648         if (ret) {
2649                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2650                         ret);
2651                 return ret;
2652         }
2653
2654         req = (struct hclge_link_status_cmd *)desc.data;
2655         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2656
2657         return !!link_status;
2658 }
2659
2660 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2661 {
2662         unsigned int mac_state;
2663         int link_stat;
2664
2665         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2666                 return 0;
2667
2668         mac_state = hclge_get_mac_link_status(hdev);
2669
2670         if (hdev->hw.mac.phydev) {
2671                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2672                         link_stat = mac_state &
2673                                 hdev->hw.mac.phydev->link;
2674                 else
2675                         link_stat = 0;
2676
2677         } else {
2678                 link_stat = mac_state;
2679         }
2680
2681         return !!link_stat;
2682 }
2683
2684 static void hclge_update_link_status(struct hclge_dev *hdev)
2685 {
2686         struct hnae3_client *rclient = hdev->roce_client;
2687         struct hnae3_client *client = hdev->nic_client;
2688         struct hnae3_handle *rhandle;
2689         struct hnae3_handle *handle;
2690         int state;
2691         int i;
2692
2693         if (!client)
2694                 return;
2695         state = hclge_get_mac_phy_link(hdev);
2696         if (state != hdev->hw.mac.link) {
2697                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2698                         handle = &hdev->vport[i].nic;
2699                         client->ops->link_status_change(handle, state);
2700                         hclge_config_mac_tnl_int(hdev, state);
2701                         rhandle = &hdev->vport[i].roce;
2702                         if (rclient && rclient->ops->link_status_change)
2703                                 rclient->ops->link_status_change(rhandle,
2704                                                                  state);
2705                 }
2706                 hdev->hw.mac.link = state;
2707         }
2708 }
2709
2710 static void hclge_update_port_capability(struct hclge_mac *mac)
2711 {
2712         /* update fec ability by speed */
2713         hclge_convert_setting_fec(mac);
2714
2715         /* firmware can not identify back plane type, the media type
2716          * read from configuration can help deal it
2717          */
2718         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2719             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2720                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2721         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2722                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2723
2724         if (mac->support_autoneg == true) {
2725                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2726                 linkmode_copy(mac->advertising, mac->supported);
2727         } else {
2728                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2729                                    mac->supported);
2730                 linkmode_zero(mac->advertising);
2731         }
2732 }
2733
2734 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2735 {
2736         struct hclge_sfp_info_cmd *resp;
2737         struct hclge_desc desc;
2738         int ret;
2739
2740         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2741         resp = (struct hclge_sfp_info_cmd *)desc.data;
2742         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2743         if (ret == -EOPNOTSUPP) {
2744                 dev_warn(&hdev->pdev->dev,
2745                          "IMP do not support get SFP speed %d\n", ret);
2746                 return ret;
2747         } else if (ret) {
2748                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2749                 return ret;
2750         }
2751
2752         *speed = le32_to_cpu(resp->speed);
2753
2754         return 0;
2755 }
2756
2757 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2758 {
2759         struct hclge_sfp_info_cmd *resp;
2760         struct hclge_desc desc;
2761         int ret;
2762
2763         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2764         resp = (struct hclge_sfp_info_cmd *)desc.data;
2765
2766         resp->query_type = QUERY_ACTIVE_SPEED;
2767
2768         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2769         if (ret == -EOPNOTSUPP) {
2770                 dev_warn(&hdev->pdev->dev,
2771                          "IMP does not support get SFP info %d\n", ret);
2772                 return ret;
2773         } else if (ret) {
2774                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2775                 return ret;
2776         }
2777
2778         mac->speed = le32_to_cpu(resp->speed);
2779         /* if resp->speed_ability is 0, it means it's an old version
2780          * firmware, do not update these params
2781          */
2782         if (resp->speed_ability) {
2783                 mac->module_type = le32_to_cpu(resp->module_type);
2784                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2785                 mac->autoneg = resp->autoneg;
2786                 mac->support_autoneg = resp->autoneg_ability;
2787                 mac->speed_type = QUERY_ACTIVE_SPEED;
2788                 if (!resp->active_fec)
2789                         mac->fec_mode = 0;
2790                 else
2791                         mac->fec_mode = BIT(resp->active_fec);
2792         } else {
2793                 mac->speed_type = QUERY_SFP_SPEED;
2794         }
2795
2796         return 0;
2797 }
2798
2799 static int hclge_update_port_info(struct hclge_dev *hdev)
2800 {
2801         struct hclge_mac *mac = &hdev->hw.mac;
2802         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2803         int ret;
2804
2805         /* get the port info from SFP cmd if not copper port */
2806         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2807                 return 0;
2808
2809         /* if IMP does not support get SFP/qSFP info, return directly */
2810         if (!hdev->support_sfp_query)
2811                 return 0;
2812
2813         if (hdev->pdev->revision >= 0x21)
2814                 ret = hclge_get_sfp_info(hdev, mac);
2815         else
2816                 ret = hclge_get_sfp_speed(hdev, &speed);
2817
2818         if (ret == -EOPNOTSUPP) {
2819                 hdev->support_sfp_query = false;
2820                 return ret;
2821         } else if (ret) {
2822                 return ret;
2823         }
2824
2825         if (hdev->pdev->revision >= 0x21) {
2826                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2827                         hclge_update_port_capability(mac);
2828                         return 0;
2829                 }
2830                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2831                                                HCLGE_MAC_FULL);
2832         } else {
2833                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2834                         return 0; /* do nothing if no SFP */
2835
2836                 /* must config full duplex for SFP */
2837                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2838         }
2839 }
2840
2841 static int hclge_get_status(struct hnae3_handle *handle)
2842 {
2843         struct hclge_vport *vport = hclge_get_vport(handle);
2844         struct hclge_dev *hdev = vport->back;
2845
2846         hclge_update_link_status(hdev);
2847
2848         return hdev->hw.mac.link;
2849 }
2850
2851 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2852 {
2853         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2854
2855         /* fetch the events from their corresponding regs */
2856         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2857         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2858         msix_src_reg = hclge_read_dev(&hdev->hw,
2859                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2860
2861         /* Assumption: If by any chance reset and mailbox events are reported
2862          * together then we will only process reset event in this go and will
2863          * defer the processing of the mailbox events. Since, we would have not
2864          * cleared RX CMDQ event this time we would receive again another
2865          * interrupt from H/W just for the mailbox.
2866          *
2867          * check for vector0 reset event sources
2868          */
2869         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2870                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2871                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2872                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2873                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2874                 hdev->rst_stats.imp_rst_cnt++;
2875                 return HCLGE_VECTOR0_EVENT_RST;
2876         }
2877
2878         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2879                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2880                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2881                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2882                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2883                 hdev->rst_stats.global_rst_cnt++;
2884                 return HCLGE_VECTOR0_EVENT_RST;
2885         }
2886
2887         /* check for vector0 msix event source */
2888         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2889                 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2890                          msix_src_reg);
2891                 *clearval = msix_src_reg;
2892                 return HCLGE_VECTOR0_EVENT_ERR;
2893         }
2894
2895         /* check for vector0 mailbox(=CMDQ RX) event source */
2896         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2897                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2898                 *clearval = cmdq_src_reg;
2899                 return HCLGE_VECTOR0_EVENT_MBX;
2900         }
2901
2902         /* print other vector0 event source */
2903         dev_info(&hdev->pdev->dev,
2904                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
2905                  cmdq_src_reg, msix_src_reg);
2906         *clearval = msix_src_reg;
2907
2908         return HCLGE_VECTOR0_EVENT_OTHER;
2909 }
2910
2911 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2912                                     u32 regclr)
2913 {
2914         switch (event_type) {
2915         case HCLGE_VECTOR0_EVENT_RST:
2916                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2917                 break;
2918         case HCLGE_VECTOR0_EVENT_MBX:
2919                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2920                 break;
2921         default:
2922                 break;
2923         }
2924 }
2925
2926 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2927 {
2928         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2929                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2930                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2931                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2932         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2933 }
2934
2935 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2936 {
2937         writel(enable ? 1 : 0, vector->addr);
2938 }
2939
2940 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2941 {
2942         struct hclge_dev *hdev = data;
2943         u32 clearval = 0;
2944         u32 event_cause;
2945
2946         hclge_enable_vector(&hdev->misc_vector, false);
2947         event_cause = hclge_check_event_cause(hdev, &clearval);
2948
2949         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2950         switch (event_cause) {
2951         case HCLGE_VECTOR0_EVENT_ERR:
2952                 /* we do not know what type of reset is required now. This could
2953                  * only be decided after we fetch the type of errors which
2954                  * caused this event. Therefore, we will do below for now:
2955                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2956                  *    have defered type of reset to be used.
2957                  * 2. Schedule the reset serivce task.
2958                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2959                  *    will fetch the correct type of reset.  This would be done
2960                  *    by first decoding the types of errors.
2961                  */
2962                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2963                 /* fall through */
2964         case HCLGE_VECTOR0_EVENT_RST:
2965                 hclge_reset_task_schedule(hdev);
2966                 break;
2967         case HCLGE_VECTOR0_EVENT_MBX:
2968                 /* If we are here then,
2969                  * 1. Either we are not handling any mbx task and we are not
2970                  *    scheduled as well
2971                  *                        OR
2972                  * 2. We could be handling a mbx task but nothing more is
2973                  *    scheduled.
2974                  * In both cases, we should schedule mbx task as there are more
2975                  * mbx messages reported by this interrupt.
2976                  */
2977                 hclge_mbx_task_schedule(hdev);
2978                 break;
2979         default:
2980                 dev_warn(&hdev->pdev->dev,
2981                          "received unknown or unhandled event of vector0\n");
2982                 break;
2983         }
2984
2985         hclge_clear_event_cause(hdev, event_cause, clearval);
2986
2987         /* Enable interrupt if it is not cause by reset. And when
2988          * clearval equal to 0, it means interrupt status may be
2989          * cleared by hardware before driver reads status register.
2990          * For this case, vector0 interrupt also should be enabled.
2991          */
2992         if (!clearval ||
2993             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2994                 hclge_enable_vector(&hdev->misc_vector, true);
2995         }
2996
2997         return IRQ_HANDLED;
2998 }
2999
3000 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3001 {
3002         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3003                 dev_warn(&hdev->pdev->dev,
3004                          "vector(vector_id %d) has been freed.\n", vector_id);
3005                 return;
3006         }
3007
3008         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3009         hdev->num_msi_left += 1;
3010         hdev->num_msi_used -= 1;
3011 }
3012
3013 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3014 {
3015         struct hclge_misc_vector *vector = &hdev->misc_vector;
3016
3017         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3018
3019         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3020         hdev->vector_status[0] = 0;
3021
3022         hdev->num_msi_left -= 1;
3023         hdev->num_msi_used += 1;
3024 }
3025
3026 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3027                                       const cpumask_t *mask)
3028 {
3029         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3030                                               affinity_notify);
3031
3032         cpumask_copy(&hdev->affinity_mask, mask);
3033 }
3034
3035 static void hclge_irq_affinity_release(struct kref *ref)
3036 {
3037 }
3038
3039 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3040 {
3041         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3042                               &hdev->affinity_mask);
3043
3044         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3045         hdev->affinity_notify.release = hclge_irq_affinity_release;
3046         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3047                                   &hdev->affinity_notify);
3048 }
3049
3050 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3051 {
3052         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3053         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3054 }
3055
3056 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3057 {
3058         int ret;
3059
3060         hclge_get_misc_vector(hdev);
3061
3062         /* this would be explicitly freed in the end */
3063         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3064                           0, "hclge_misc", hdev);
3065         if (ret) {
3066                 hclge_free_vector(hdev, 0);
3067                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3068                         hdev->misc_vector.vector_irq);
3069         }
3070
3071         return ret;
3072 }
3073
3074 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3075 {
3076         free_irq(hdev->misc_vector.vector_irq, hdev);
3077         hclge_free_vector(hdev, 0);
3078 }
3079
3080 int hclge_notify_client(struct hclge_dev *hdev,
3081                         enum hnae3_reset_notify_type type)
3082 {
3083         struct hnae3_client *client = hdev->nic_client;
3084         u16 i;
3085
3086         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3087                 return 0;
3088
3089         if (!client->ops->reset_notify)
3090                 return -EOPNOTSUPP;
3091
3092         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3093                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3094                 int ret;
3095
3096                 ret = client->ops->reset_notify(handle, type);
3097                 if (ret) {
3098                         dev_err(&hdev->pdev->dev,
3099                                 "notify nic client failed %d(%d)\n", type, ret);
3100                         return ret;
3101                 }
3102         }
3103
3104         return 0;
3105 }
3106
3107 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3108                                     enum hnae3_reset_notify_type type)
3109 {
3110         struct hnae3_client *client = hdev->roce_client;
3111         int ret = 0;
3112         u16 i;
3113
3114         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3115                 return 0;
3116
3117         if (!client->ops->reset_notify)
3118                 return -EOPNOTSUPP;
3119
3120         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3121                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3122
3123                 ret = client->ops->reset_notify(handle, type);
3124                 if (ret) {
3125                         dev_err(&hdev->pdev->dev,
3126                                 "notify roce client failed %d(%d)",
3127                                 type, ret);
3128                         return ret;
3129                 }
3130         }
3131
3132         return ret;
3133 }
3134
3135 static int hclge_reset_wait(struct hclge_dev *hdev)
3136 {
3137 #define HCLGE_RESET_WATI_MS     100
3138 #define HCLGE_RESET_WAIT_CNT    200
3139         u32 val, reg, reg_bit;
3140         u32 cnt = 0;
3141
3142         switch (hdev->reset_type) {
3143         case HNAE3_IMP_RESET:
3144                 reg = HCLGE_GLOBAL_RESET_REG;
3145                 reg_bit = HCLGE_IMP_RESET_BIT;
3146                 break;
3147         case HNAE3_GLOBAL_RESET:
3148                 reg = HCLGE_GLOBAL_RESET_REG;
3149                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3150                 break;
3151         case HNAE3_FUNC_RESET:
3152                 reg = HCLGE_FUN_RST_ING;
3153                 reg_bit = HCLGE_FUN_RST_ING_B;
3154                 break;
3155         case HNAE3_FLR_RESET:
3156                 break;
3157         default:
3158                 dev_err(&hdev->pdev->dev,
3159                         "Wait for unsupported reset type: %d\n",
3160                         hdev->reset_type);
3161                 return -EINVAL;
3162         }
3163
3164         if (hdev->reset_type == HNAE3_FLR_RESET) {
3165                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3166                        cnt++ < HCLGE_RESET_WAIT_CNT)
3167                         msleep(HCLGE_RESET_WATI_MS);
3168
3169                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3170                         dev_err(&hdev->pdev->dev,
3171                                 "flr wait timeout: %d\n", cnt);
3172                         return -EBUSY;
3173                 }
3174
3175                 return 0;
3176         }
3177
3178         val = hclge_read_dev(&hdev->hw, reg);
3179         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3180                 msleep(HCLGE_RESET_WATI_MS);
3181                 val = hclge_read_dev(&hdev->hw, reg);
3182                 cnt++;
3183         }
3184
3185         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3186                 dev_warn(&hdev->pdev->dev,
3187                          "Wait for reset timeout: %d\n", hdev->reset_type);
3188                 return -EBUSY;
3189         }
3190
3191         return 0;
3192 }
3193
3194 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3195 {
3196         struct hclge_vf_rst_cmd *req;
3197         struct hclge_desc desc;
3198
3199         req = (struct hclge_vf_rst_cmd *)desc.data;
3200         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3201         req->dest_vfid = func_id;
3202
3203         if (reset)
3204                 req->vf_rst = 0x1;
3205
3206         return hclge_cmd_send(&hdev->hw, &desc, 1);
3207 }
3208
3209 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3210 {
3211         int i;
3212
3213         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3214                 struct hclge_vport *vport = &hdev->vport[i];
3215                 int ret;
3216
3217                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3218                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3219                 if (ret) {
3220                         dev_err(&hdev->pdev->dev,
3221                                 "set vf(%d) rst failed %d!\n",
3222                                 vport->vport_id, ret);
3223                         return ret;
3224                 }
3225
3226                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3227                         continue;
3228
3229                 /* Inform VF to process the reset.
3230                  * hclge_inform_reset_assert_to_vf may fail if VF
3231                  * driver is not loaded.
3232                  */
3233                 ret = hclge_inform_reset_assert_to_vf(vport);
3234                 if (ret)
3235                         dev_warn(&hdev->pdev->dev,
3236                                  "inform reset to vf(%d) failed %d!\n",
3237                                  vport->vport_id, ret);
3238         }
3239
3240         return 0;
3241 }
3242
3243 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3244 {
3245         struct hclge_pf_rst_sync_cmd *req;
3246         struct hclge_desc desc;
3247         int cnt = 0;
3248         int ret;
3249
3250         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3251         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3252
3253         do {
3254                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3255                 /* for compatible with old firmware, wait
3256                  * 100 ms for VF to stop IO
3257                  */
3258                 if (ret == -EOPNOTSUPP) {
3259                         msleep(HCLGE_RESET_SYNC_TIME);
3260                         return 0;
3261                 } else if (ret) {
3262                         dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3263                                 ret);
3264                         return ret;
3265                 } else if (req->all_vf_ready) {
3266                         return 0;
3267                 }
3268                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3269                 hclge_cmd_reuse_desc(&desc, true);
3270         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3271
3272         dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3273         return -ETIME;
3274 }
3275
3276 void hclge_report_hw_error(struct hclge_dev *hdev,
3277                            enum hnae3_hw_error_type type)
3278 {
3279         struct hnae3_client *client = hdev->nic_client;
3280         u16 i;
3281
3282         if (!client || !client->ops->process_hw_error ||
3283             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3284                 return;
3285
3286         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3287                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3288 }
3289
3290 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3291 {
3292         u32 reg_val;
3293
3294         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3295         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3296                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3297                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3298                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3299         }
3300
3301         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3302                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3303                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3304                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3305         }
3306 }
3307
3308 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3309 {
3310         struct hclge_desc desc;
3311         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3312         int ret;
3313
3314         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3315         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3316         req->fun_reset_vfid = func_id;
3317
3318         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3319         if (ret)
3320                 dev_err(&hdev->pdev->dev,
3321                         "send function reset cmd fail, status =%d\n", ret);
3322
3323         return ret;
3324 }
3325
3326 static void hclge_do_reset(struct hclge_dev *hdev)
3327 {
3328         struct hnae3_handle *handle = &hdev->vport[0].nic;
3329         struct pci_dev *pdev = hdev->pdev;
3330         u32 val;
3331
3332         if (hclge_get_hw_reset_stat(handle)) {
3333                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3334                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3335                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3336                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3337                 return;
3338         }
3339
3340         switch (hdev->reset_type) {
3341         case HNAE3_GLOBAL_RESET:
3342                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3343                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3344                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3345                 dev_info(&pdev->dev, "Global Reset requested\n");
3346                 break;
3347         case HNAE3_FUNC_RESET:
3348                 dev_info(&pdev->dev, "PF Reset requested\n");
3349                 /* schedule again to check later */
3350                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3351                 hclge_reset_task_schedule(hdev);
3352                 break;
3353         case HNAE3_FLR_RESET:
3354                 dev_info(&pdev->dev, "FLR requested\n");
3355                 /* schedule again to check later */
3356                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3357                 hclge_reset_task_schedule(hdev);
3358                 break;
3359         default:
3360                 dev_warn(&pdev->dev,
3361                          "Unsupported reset type: %d\n", hdev->reset_type);
3362                 break;
3363         }
3364 }
3365
3366 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3367                                                    unsigned long *addr)
3368 {
3369         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3370         struct hclge_dev *hdev = ae_dev->priv;
3371
3372         /* first, resolve any unknown reset type to the known type(s) */
3373         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3374                 /* we will intentionally ignore any errors from this function
3375                  *  as we will end up in *some* reset request in any case
3376                  */
3377                 hclge_handle_hw_msix_error(hdev, addr);
3378                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3379                 /* We defered the clearing of the error event which caused
3380                  * interrupt since it was not posssible to do that in
3381                  * interrupt context (and this is the reason we introduced
3382                  * new UNKNOWN reset type). Now, the errors have been
3383                  * handled and cleared in hardware we can safely enable
3384                  * interrupts. This is an exception to the norm.
3385                  */
3386                 hclge_enable_vector(&hdev->misc_vector, true);
3387         }
3388
3389         /* return the highest priority reset level amongst all */
3390         if (test_bit(HNAE3_IMP_RESET, addr)) {
3391                 rst_level = HNAE3_IMP_RESET;
3392                 clear_bit(HNAE3_IMP_RESET, addr);
3393                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3394                 clear_bit(HNAE3_FUNC_RESET, addr);
3395         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3396                 rst_level = HNAE3_GLOBAL_RESET;
3397                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3398                 clear_bit(HNAE3_FUNC_RESET, addr);
3399         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3400                 rst_level = HNAE3_FUNC_RESET;
3401                 clear_bit(HNAE3_FUNC_RESET, addr);
3402         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3403                 rst_level = HNAE3_FLR_RESET;
3404                 clear_bit(HNAE3_FLR_RESET, addr);
3405         }
3406
3407         if (hdev->reset_type != HNAE3_NONE_RESET &&
3408             rst_level < hdev->reset_type)
3409                 return HNAE3_NONE_RESET;
3410
3411         return rst_level;
3412 }
3413
3414 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3415 {
3416         u32 clearval = 0;
3417
3418         switch (hdev->reset_type) {
3419         case HNAE3_IMP_RESET:
3420                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3421                 break;
3422         case HNAE3_GLOBAL_RESET:
3423                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3424                 break;
3425         default:
3426                 break;
3427         }
3428
3429         if (!clearval)
3430                 return;
3431
3432         /* For revision 0x20, the reset interrupt source
3433          * can only be cleared after hardware reset done
3434          */
3435         if (hdev->pdev->revision == 0x20)
3436                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3437                                 clearval);
3438
3439         hclge_enable_vector(&hdev->misc_vector, true);
3440 }
3441
3442 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3443 {
3444         int ret = 0;
3445
3446         switch (hdev->reset_type) {
3447         case HNAE3_FUNC_RESET:
3448                 /* fall through */
3449         case HNAE3_FLR_RESET:
3450                 ret = hclge_set_all_vf_rst(hdev, true);
3451                 break;
3452         default:
3453                 break;
3454         }
3455
3456         return ret;
3457 }
3458
3459 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3460 {
3461         u32 reg_val;
3462
3463         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3464         if (enable)
3465                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3466         else
3467                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3468
3469         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3470 }
3471
3472 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3473 {
3474         u32 reg_val;
3475         int ret = 0;
3476
3477         switch (hdev->reset_type) {
3478         case HNAE3_FUNC_RESET:
3479                 /* to confirm whether all running VF is ready
3480                  * before request PF reset
3481                  */
3482                 ret = hclge_func_reset_sync_vf(hdev);
3483                 if (ret)
3484                         return ret;
3485
3486                 ret = hclge_func_reset_cmd(hdev, 0);
3487                 if (ret) {
3488                         dev_err(&hdev->pdev->dev,
3489                                 "asserting function reset fail %d!\n", ret);
3490                         return ret;
3491                 }
3492
3493                 /* After performaning pf reset, it is not necessary to do the
3494                  * mailbox handling or send any command to firmware, because
3495                  * any mailbox handling or command to firmware is only valid
3496                  * after hclge_cmd_init is called.
3497                  */
3498                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3499                 hdev->rst_stats.pf_rst_cnt++;
3500                 break;
3501         case HNAE3_FLR_RESET:
3502                 /* to confirm whether all running VF is ready
3503                  * before request PF reset
3504                  */
3505                 ret = hclge_func_reset_sync_vf(hdev);
3506                 if (ret)
3507                         return ret;
3508
3509                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3510                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3511                 hdev->rst_stats.flr_rst_cnt++;
3512                 break;
3513         case HNAE3_IMP_RESET:
3514                 hclge_handle_imp_error(hdev);
3515                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3516                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3517                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3518                 break;
3519         default:
3520                 break;
3521         }
3522
3523         /* inform hardware that preparatory work is done */
3524         msleep(HCLGE_RESET_SYNC_TIME);
3525         hclge_reset_handshake(hdev, true);
3526         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3527
3528         return ret;
3529 }
3530
3531 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3532 {
3533 #define MAX_RESET_FAIL_CNT 5
3534
3535         if (hdev->reset_pending) {
3536                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3537                          hdev->reset_pending);
3538                 return true;
3539         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3540                    HCLGE_RESET_INT_M) {
3541                 dev_info(&hdev->pdev->dev,
3542                          "reset failed because new reset interrupt\n");
3543                 hclge_clear_reset_cause(hdev);
3544                 return false;
3545         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3546                 hdev->reset_fail_cnt++;
3547                 set_bit(hdev->reset_type, &hdev->reset_pending);
3548                 dev_info(&hdev->pdev->dev,
3549                          "re-schedule reset task(%d)\n",
3550                          hdev->reset_fail_cnt);
3551                 return true;
3552         }
3553
3554         hclge_clear_reset_cause(hdev);
3555
3556         /* recover the handshake status when reset fail */
3557         hclge_reset_handshake(hdev, true);
3558
3559         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3560         return false;
3561 }
3562
3563 static int hclge_set_rst_done(struct hclge_dev *hdev)
3564 {
3565         struct hclge_pf_rst_done_cmd *req;
3566         struct hclge_desc desc;
3567
3568         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3569         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3570         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3571
3572         return hclge_cmd_send(&hdev->hw, &desc, 1);
3573 }
3574
3575 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3576 {
3577         int ret = 0;
3578
3579         switch (hdev->reset_type) {
3580         case HNAE3_FUNC_RESET:
3581                 /* fall through */
3582         case HNAE3_FLR_RESET:
3583                 ret = hclge_set_all_vf_rst(hdev, false);
3584                 break;
3585         case HNAE3_GLOBAL_RESET:
3586                 /* fall through */
3587         case HNAE3_IMP_RESET:
3588                 ret = hclge_set_rst_done(hdev);
3589                 break;
3590         default:
3591                 break;
3592         }
3593
3594         /* clear up the handshake status after re-initialize done */
3595         hclge_reset_handshake(hdev, false);
3596
3597         return ret;
3598 }
3599
3600 static int hclge_reset_stack(struct hclge_dev *hdev)
3601 {
3602         int ret;
3603
3604         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3605         if (ret)
3606                 return ret;
3607
3608         ret = hclge_reset_ae_dev(hdev->ae_dev);
3609         if (ret)
3610                 return ret;
3611
3612         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3613         if (ret)
3614                 return ret;
3615
3616         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3617 }
3618
3619 static void hclge_reset(struct hclge_dev *hdev)
3620 {
3621         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3622         int ret;
3623
3624         /* Initialize ae_dev reset status as well, in case enet layer wants to
3625          * know if device is undergoing reset
3626          */
3627         ae_dev->reset_type = hdev->reset_type;
3628         hdev->rst_stats.reset_cnt++;
3629         /* perform reset of the stack & ae device for a client */
3630         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3631         if (ret)
3632                 goto err_reset;
3633
3634         ret = hclge_reset_prepare_down(hdev);
3635         if (ret)
3636                 goto err_reset;
3637
3638         rtnl_lock();
3639         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3640         if (ret)
3641                 goto err_reset_lock;
3642
3643         rtnl_unlock();
3644
3645         ret = hclge_reset_prepare_wait(hdev);
3646         if (ret)
3647                 goto err_reset;
3648
3649         if (hclge_reset_wait(hdev))
3650                 goto err_reset;
3651
3652         hdev->rst_stats.hw_reset_done_cnt++;
3653
3654         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3655         if (ret)
3656                 goto err_reset;
3657
3658         rtnl_lock();
3659
3660         ret = hclge_reset_stack(hdev);
3661         if (ret)
3662                 goto err_reset_lock;
3663
3664         hclge_clear_reset_cause(hdev);
3665
3666         ret = hclge_reset_prepare_up(hdev);
3667         if (ret)
3668                 goto err_reset_lock;
3669
3670         rtnl_unlock();
3671
3672         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3673         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3674          * times
3675          */
3676         if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3677                 goto err_reset;
3678
3679         rtnl_lock();
3680
3681         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3682         if (ret)
3683                 goto err_reset_lock;
3684
3685         rtnl_unlock();
3686
3687         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3688         if (ret)
3689                 goto err_reset;
3690
3691         hdev->last_reset_time = jiffies;
3692         hdev->reset_fail_cnt = 0;
3693         hdev->rst_stats.reset_done_cnt++;
3694         ae_dev->reset_type = HNAE3_NONE_RESET;
3695
3696         /* if default_reset_request has a higher level reset request,
3697          * it should be handled as soon as possible. since some errors
3698          * need this kind of reset to fix.
3699          */
3700         hdev->reset_level = hclge_get_reset_level(ae_dev,
3701                                                   &hdev->default_reset_request);
3702         if (hdev->reset_level != HNAE3_NONE_RESET)
3703                 set_bit(hdev->reset_level, &hdev->reset_request);
3704
3705         return;
3706
3707 err_reset_lock:
3708         rtnl_unlock();
3709 err_reset:
3710         if (hclge_reset_err_handle(hdev))
3711                 hclge_reset_task_schedule(hdev);
3712 }
3713
3714 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3715 {
3716         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3717         struct hclge_dev *hdev = ae_dev->priv;
3718
3719         /* We might end up getting called broadly because of 2 below cases:
3720          * 1. Recoverable error was conveyed through APEI and only way to bring
3721          *    normalcy is to reset.
3722          * 2. A new reset request from the stack due to timeout
3723          *
3724          * For the first case,error event might not have ae handle available.
3725          * check if this is a new reset request and we are not here just because
3726          * last reset attempt did not succeed and watchdog hit us again. We will
3727          * know this if last reset request did not occur very recently (watchdog
3728          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3729          * In case of new request we reset the "reset level" to PF reset.
3730          * And if it is a repeat reset request of the most recent one then we
3731          * want to make sure we throttle the reset request. Therefore, we will
3732          * not allow it again before 3*HZ times.
3733          */
3734         if (!handle)
3735                 handle = &hdev->vport[0].nic;
3736
3737         if (time_before(jiffies, (hdev->last_reset_time +
3738                                   HCLGE_RESET_INTERVAL))) {
3739                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3740                 return;
3741         } else if (hdev->default_reset_request)
3742                 hdev->reset_level =
3743                         hclge_get_reset_level(ae_dev,
3744                                               &hdev->default_reset_request);
3745         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3746                 hdev->reset_level = HNAE3_FUNC_RESET;
3747
3748         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3749                  hdev->reset_level);
3750
3751         /* request reset & schedule reset task */
3752         set_bit(hdev->reset_level, &hdev->reset_request);
3753         hclge_reset_task_schedule(hdev);
3754
3755         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3756                 hdev->reset_level++;
3757 }
3758
3759 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3760                                         enum hnae3_reset_type rst_type)
3761 {
3762         struct hclge_dev *hdev = ae_dev->priv;
3763
3764         set_bit(rst_type, &hdev->default_reset_request);
3765 }
3766
3767 static void hclge_reset_timer(struct timer_list *t)
3768 {
3769         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3770
3771         /* if default_reset_request has no value, it means that this reset
3772          * request has already be handled, so just return here
3773          */
3774         if (!hdev->default_reset_request)
3775                 return;
3776
3777         dev_info(&hdev->pdev->dev,
3778                  "triggering reset in reset timer\n");
3779         hclge_reset_event(hdev->pdev, NULL);
3780 }
3781
3782 static void hclge_reset_subtask(struct hclge_dev *hdev)
3783 {
3784         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3785
3786         /* check if there is any ongoing reset in the hardware. This status can
3787          * be checked from reset_pending. If there is then, we need to wait for
3788          * hardware to complete reset.
3789          *    a. If we are able to figure out in reasonable time that hardware
3790          *       has fully resetted then, we can proceed with driver, client
3791          *       reset.
3792          *    b. else, we can come back later to check this status so re-sched
3793          *       now.
3794          */
3795         hdev->last_reset_time = jiffies;
3796         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3797         if (hdev->reset_type != HNAE3_NONE_RESET)
3798                 hclge_reset(hdev);
3799
3800         /* check if we got any *new* reset requests to be honored */
3801         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3802         if (hdev->reset_type != HNAE3_NONE_RESET)
3803                 hclge_do_reset(hdev);
3804
3805         hdev->reset_type = HNAE3_NONE_RESET;
3806 }
3807
3808 static void hclge_reset_service_task(struct work_struct *work)
3809 {
3810         struct hclge_dev *hdev =
3811                 container_of(work, struct hclge_dev, rst_service_task);
3812
3813         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3814                 return;
3815
3816         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3817
3818         hclge_reset_subtask(hdev);
3819
3820         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3821 }
3822
3823 static void hclge_mailbox_service_task(struct work_struct *work)
3824 {
3825         struct hclge_dev *hdev =
3826                 container_of(work, struct hclge_dev, mbx_service_task);
3827
3828         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3829                 return;
3830
3831         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3832
3833         hclge_mbx_handler(hdev);
3834
3835         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3836 }
3837
3838 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3839 {
3840         int i;
3841
3842         /* start from vport 1 for PF is always alive */
3843         for (i = 1; i < hdev->num_alloc_vport; i++) {
3844                 struct hclge_vport *vport = &hdev->vport[i];
3845
3846                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3847                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3848
3849                 /* If vf is not alive, set to default value */
3850                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3851                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3852         }
3853 }
3854
3855 static void hclge_service_task(struct work_struct *work)
3856 {
3857         struct hclge_dev *hdev =
3858                 container_of(work, struct hclge_dev, service_task.work);
3859
3860         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3861
3862         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3863                 hclge_update_stats_for_all(hdev);
3864                 hdev->hw_stats.stats_timer = 0;
3865         }
3866
3867         hclge_update_port_info(hdev);
3868         hclge_update_link_status(hdev);
3869         hclge_update_vport_alive(hdev);
3870         hclge_sync_vlan_filter(hdev);
3871         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3872                 hclge_rfs_filter_expire(hdev);
3873                 hdev->fd_arfs_expire_timer = 0;
3874         }
3875
3876         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3877 }
3878
3879 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3880 {
3881         /* VF handle has no client */
3882         if (!handle->client)
3883                 return container_of(handle, struct hclge_vport, nic);
3884         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3885                 return container_of(handle, struct hclge_vport, roce);
3886         else
3887                 return container_of(handle, struct hclge_vport, nic);
3888 }
3889
3890 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3891                             struct hnae3_vector_info *vector_info)
3892 {
3893         struct hclge_vport *vport = hclge_get_vport(handle);
3894         struct hnae3_vector_info *vector = vector_info;
3895         struct hclge_dev *hdev = vport->back;
3896         int alloc = 0;
3897         int i, j;
3898
3899         vector_num = min(hdev->num_msi_left, vector_num);
3900
3901         for (j = 0; j < vector_num; j++) {
3902                 for (i = 1; i < hdev->num_msi; i++) {
3903                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3904                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3905                                 vector->io_addr = hdev->hw.io_base +
3906                                         HCLGE_VECTOR_REG_BASE +
3907                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3908                                         vport->vport_id *
3909                                         HCLGE_VECTOR_VF_OFFSET;
3910                                 hdev->vector_status[i] = vport->vport_id;
3911                                 hdev->vector_irq[i] = vector->vector;
3912
3913                                 vector++;
3914                                 alloc++;
3915
3916                                 break;
3917                         }
3918                 }
3919         }
3920         hdev->num_msi_left -= alloc;
3921         hdev->num_msi_used += alloc;
3922
3923         return alloc;
3924 }
3925
3926 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3927 {
3928         int i;
3929
3930         for (i = 0; i < hdev->num_msi; i++)
3931                 if (vector == hdev->vector_irq[i])
3932                         return i;
3933
3934         return -EINVAL;
3935 }
3936
3937 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3938 {
3939         struct hclge_vport *vport = hclge_get_vport(handle);
3940         struct hclge_dev *hdev = vport->back;
3941         int vector_id;
3942
3943         vector_id = hclge_get_vector_index(hdev, vector);
3944         if (vector_id < 0) {
3945                 dev_err(&hdev->pdev->dev,
3946                         "Get vector index fail. vector_id =%d\n", vector_id);
3947                 return vector_id;
3948         }
3949
3950         hclge_free_vector(hdev, vector_id);
3951
3952         return 0;
3953 }
3954
3955 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3956 {
3957         return HCLGE_RSS_KEY_SIZE;
3958 }
3959
3960 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3961 {
3962         return HCLGE_RSS_IND_TBL_SIZE;
3963 }
3964
3965 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3966                                   const u8 hfunc, const u8 *key)
3967 {
3968         struct hclge_rss_config_cmd *req;
3969         unsigned int key_offset = 0;
3970         struct hclge_desc desc;
3971         int key_counts;
3972         int key_size;
3973         int ret;
3974
3975         key_counts = HCLGE_RSS_KEY_SIZE;
3976         req = (struct hclge_rss_config_cmd *)desc.data;
3977
3978         while (key_counts) {
3979                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3980                                            false);
3981
3982                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3983                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3984
3985                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3986                 memcpy(req->hash_key,
3987                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3988
3989                 key_counts -= key_size;
3990                 key_offset++;
3991                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3992                 if (ret) {
3993                         dev_err(&hdev->pdev->dev,
3994                                 "Configure RSS config fail, status = %d\n",
3995                                 ret);
3996                         return ret;
3997                 }
3998         }
3999         return 0;
4000 }
4001
4002 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4003 {
4004         struct hclge_rss_indirection_table_cmd *req;
4005         struct hclge_desc desc;
4006         int i, j;
4007         int ret;
4008
4009         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4010
4011         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4012                 hclge_cmd_setup_basic_desc
4013                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4014
4015                 req->start_table_index =
4016                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4017                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4018
4019                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4020                         req->rss_result[j] =
4021                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4022
4023                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4024                 if (ret) {
4025                         dev_err(&hdev->pdev->dev,
4026                                 "Configure rss indir table fail,status = %d\n",
4027                                 ret);
4028                         return ret;
4029                 }
4030         }
4031         return 0;
4032 }
4033
4034 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4035                                  u16 *tc_size, u16 *tc_offset)
4036 {
4037         struct hclge_rss_tc_mode_cmd *req;
4038         struct hclge_desc desc;
4039         int ret;
4040         int i;
4041
4042         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4043         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4044
4045         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4046                 u16 mode = 0;
4047
4048                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4049                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4050                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4051                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4052                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4053
4054                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4055         }
4056
4057         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4058         if (ret)
4059                 dev_err(&hdev->pdev->dev,
4060                         "Configure rss tc mode fail, status = %d\n", ret);
4061
4062         return ret;
4063 }
4064
4065 static void hclge_get_rss_type(struct hclge_vport *vport)
4066 {
4067         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4068             vport->rss_tuple_sets.ipv4_udp_en ||
4069             vport->rss_tuple_sets.ipv4_sctp_en ||
4070             vport->rss_tuple_sets.ipv6_tcp_en ||
4071             vport->rss_tuple_sets.ipv6_udp_en ||
4072             vport->rss_tuple_sets.ipv6_sctp_en)
4073                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4074         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4075                  vport->rss_tuple_sets.ipv6_fragment_en)
4076                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4077         else
4078                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4079 }
4080
4081 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4082 {
4083         struct hclge_rss_input_tuple_cmd *req;
4084         struct hclge_desc desc;
4085         int ret;
4086
4087         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4088
4089         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4090
4091         /* Get the tuple cfg from pf */
4092         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4093         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4094         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4095         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4096         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4097         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4098         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4099         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4100         hclge_get_rss_type(&hdev->vport[0]);
4101         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4102         if (ret)
4103                 dev_err(&hdev->pdev->dev,
4104                         "Configure rss input fail, status = %d\n", ret);
4105         return ret;
4106 }
4107
4108 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4109                          u8 *key, u8 *hfunc)
4110 {
4111         struct hclge_vport *vport = hclge_get_vport(handle);
4112         int i;
4113
4114         /* Get hash algorithm */
4115         if (hfunc) {
4116                 switch (vport->rss_algo) {
4117                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4118                         *hfunc = ETH_RSS_HASH_TOP;
4119                         break;
4120                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4121                         *hfunc = ETH_RSS_HASH_XOR;
4122                         break;
4123                 default:
4124                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4125                         break;
4126                 }
4127         }
4128
4129         /* Get the RSS Key required by the user */
4130         if (key)
4131                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4132
4133         /* Get indirect table */
4134         if (indir)
4135                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4136                         indir[i] =  vport->rss_indirection_tbl[i];
4137
4138         return 0;
4139 }
4140
4141 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4142                          const  u8 *key, const  u8 hfunc)
4143 {
4144         struct hclge_vport *vport = hclge_get_vport(handle);
4145         struct hclge_dev *hdev = vport->back;
4146         u8 hash_algo;
4147         int ret, i;
4148
4149         /* Set the RSS Hash Key if specififed by the user */
4150         if (key) {
4151                 switch (hfunc) {
4152                 case ETH_RSS_HASH_TOP:
4153                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4154                         break;
4155                 case ETH_RSS_HASH_XOR:
4156                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4157                         break;
4158                 case ETH_RSS_HASH_NO_CHANGE:
4159                         hash_algo = vport->rss_algo;
4160                         break;
4161                 default:
4162                         return -EINVAL;
4163                 }
4164
4165                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4166                 if (ret)
4167                         return ret;
4168
4169                 /* Update the shadow RSS key with user specified qids */
4170                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4171                 vport->rss_algo = hash_algo;
4172         }
4173
4174         /* Update the shadow RSS table with user specified qids */
4175         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4176                 vport->rss_indirection_tbl[i] = indir[i];
4177
4178         /* Update the hardware */
4179         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4180 }
4181
4182 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4183 {
4184         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4185
4186         if (nfc->data & RXH_L4_B_2_3)
4187                 hash_sets |= HCLGE_D_PORT_BIT;
4188         else
4189                 hash_sets &= ~HCLGE_D_PORT_BIT;
4190
4191         if (nfc->data & RXH_IP_SRC)
4192                 hash_sets |= HCLGE_S_IP_BIT;
4193         else
4194                 hash_sets &= ~HCLGE_S_IP_BIT;
4195
4196         if (nfc->data & RXH_IP_DST)
4197                 hash_sets |= HCLGE_D_IP_BIT;
4198         else
4199                 hash_sets &= ~HCLGE_D_IP_BIT;
4200
4201         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4202                 hash_sets |= HCLGE_V_TAG_BIT;
4203
4204         return hash_sets;
4205 }
4206
4207 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4208                                struct ethtool_rxnfc *nfc)
4209 {
4210         struct hclge_vport *vport = hclge_get_vport(handle);
4211         struct hclge_dev *hdev = vport->back;
4212         struct hclge_rss_input_tuple_cmd *req;
4213         struct hclge_desc desc;
4214         u8 tuple_sets;
4215         int ret;
4216
4217         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4218                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4219                 return -EINVAL;
4220
4221         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4222         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4223
4224         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4225         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4226         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4227         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4228         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4229         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4230         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4231         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4232
4233         tuple_sets = hclge_get_rss_hash_bits(nfc);
4234         switch (nfc->flow_type) {
4235         case TCP_V4_FLOW:
4236                 req->ipv4_tcp_en = tuple_sets;
4237                 break;
4238         case TCP_V6_FLOW:
4239                 req->ipv6_tcp_en = tuple_sets;
4240                 break;
4241         case UDP_V4_FLOW:
4242                 req->ipv4_udp_en = tuple_sets;
4243                 break;
4244         case UDP_V6_FLOW:
4245                 req->ipv6_udp_en = tuple_sets;
4246                 break;
4247         case SCTP_V4_FLOW:
4248                 req->ipv4_sctp_en = tuple_sets;
4249                 break;
4250         case SCTP_V6_FLOW:
4251                 if ((nfc->data & RXH_L4_B_0_1) ||
4252                     (nfc->data & RXH_L4_B_2_3))
4253                         return -EINVAL;
4254
4255                 req->ipv6_sctp_en = tuple_sets;
4256                 break;
4257         case IPV4_FLOW:
4258                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4259                 break;
4260         case IPV6_FLOW:
4261                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4262                 break;
4263         default:
4264                 return -EINVAL;
4265         }
4266
4267         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4268         if (ret) {
4269                 dev_err(&hdev->pdev->dev,
4270                         "Set rss tuple fail, status = %d\n", ret);
4271                 return ret;
4272         }
4273
4274         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4275         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4276         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4277         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4278         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4279         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4280         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4281         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4282         hclge_get_rss_type(vport);
4283         return 0;
4284 }
4285
4286 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4287                                struct ethtool_rxnfc *nfc)
4288 {
4289         struct hclge_vport *vport = hclge_get_vport(handle);
4290         u8 tuple_sets;
4291
4292         nfc->data = 0;
4293
4294         switch (nfc->flow_type) {
4295         case TCP_V4_FLOW:
4296                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4297                 break;
4298         case UDP_V4_FLOW:
4299                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4300                 break;
4301         case TCP_V6_FLOW:
4302                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4303                 break;
4304         case UDP_V6_FLOW:
4305                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4306                 break;
4307         case SCTP_V4_FLOW:
4308                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4309                 break;
4310         case SCTP_V6_FLOW:
4311                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4312                 break;
4313         case IPV4_FLOW:
4314         case IPV6_FLOW:
4315                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4316                 break;
4317         default:
4318                 return -EINVAL;
4319         }
4320
4321         if (!tuple_sets)
4322                 return 0;
4323
4324         if (tuple_sets & HCLGE_D_PORT_BIT)
4325                 nfc->data |= RXH_L4_B_2_3;
4326         if (tuple_sets & HCLGE_S_PORT_BIT)
4327                 nfc->data |= RXH_L4_B_0_1;
4328         if (tuple_sets & HCLGE_D_IP_BIT)
4329                 nfc->data |= RXH_IP_DST;
4330         if (tuple_sets & HCLGE_S_IP_BIT)
4331                 nfc->data |= RXH_IP_SRC;
4332
4333         return 0;
4334 }
4335
4336 static int hclge_get_tc_size(struct hnae3_handle *handle)
4337 {
4338         struct hclge_vport *vport = hclge_get_vport(handle);
4339         struct hclge_dev *hdev = vport->back;
4340
4341         return hdev->rss_size_max;
4342 }
4343
4344 int hclge_rss_init_hw(struct hclge_dev *hdev)
4345 {
4346         struct hclge_vport *vport = hdev->vport;
4347         u8 *rss_indir = vport[0].rss_indirection_tbl;
4348         u16 rss_size = vport[0].alloc_rss_size;
4349         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4350         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4351         u8 *key = vport[0].rss_hash_key;
4352         u8 hfunc = vport[0].rss_algo;
4353         u16 tc_valid[HCLGE_MAX_TC_NUM];
4354         u16 roundup_size;
4355         unsigned int i;
4356         int ret;
4357
4358         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4359         if (ret)
4360                 return ret;
4361
4362         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4363         if (ret)
4364                 return ret;
4365
4366         ret = hclge_set_rss_input_tuple(hdev);
4367         if (ret)
4368                 return ret;
4369
4370         /* Each TC have the same queue size, and tc_size set to hardware is
4371          * the log2 of roundup power of two of rss_size, the acutal queue
4372          * size is limited by indirection table.
4373          */
4374         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4375                 dev_err(&hdev->pdev->dev,
4376                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4377                         rss_size);
4378                 return -EINVAL;
4379         }
4380
4381         roundup_size = roundup_pow_of_two(rss_size);
4382         roundup_size = ilog2(roundup_size);
4383
4384         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4385                 tc_valid[i] = 0;
4386
4387                 if (!(hdev->hw_tc_map & BIT(i)))
4388                         continue;
4389
4390                 tc_valid[i] = 1;
4391                 tc_size[i] = roundup_size;
4392                 tc_offset[i] = rss_size * i;
4393         }
4394
4395         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4396 }
4397
4398 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4399 {
4400         struct hclge_vport *vport = hdev->vport;
4401         int i, j;
4402
4403         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4404                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4405                         vport[j].rss_indirection_tbl[i] =
4406                                 i % vport[j].alloc_rss_size;
4407         }
4408 }
4409
4410 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4411 {
4412         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4413         struct hclge_vport *vport = hdev->vport;
4414
4415         if (hdev->pdev->revision >= 0x21)
4416                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4417
4418         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4419                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4420                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4421                 vport[i].rss_tuple_sets.ipv4_udp_en =
4422                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4423                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4424                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4425                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4426                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4427                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4428                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4429                 vport[i].rss_tuple_sets.ipv6_udp_en =
4430                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4431                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4432                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4433                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4434                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4435
4436                 vport[i].rss_algo = rss_algo;
4437
4438                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4439                        HCLGE_RSS_KEY_SIZE);
4440         }
4441
4442         hclge_rss_indir_init_cfg(hdev);
4443 }
4444
4445 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4446                                 int vector_id, bool en,
4447                                 struct hnae3_ring_chain_node *ring_chain)
4448 {
4449         struct hclge_dev *hdev = vport->back;
4450         struct hnae3_ring_chain_node *node;
4451         struct hclge_desc desc;
4452         struct hclge_ctrl_vector_chain_cmd *req =
4453                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4454         enum hclge_cmd_status status;
4455         enum hclge_opcode_type op;
4456         u16 tqp_type_and_id;
4457         int i;
4458
4459         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4460         hclge_cmd_setup_basic_desc(&desc, op, false);
4461         req->int_vector_id = vector_id;
4462
4463         i = 0;
4464         for (node = ring_chain; node; node = node->next) {
4465                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4466                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4467                                 HCLGE_INT_TYPE_S,
4468                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4469                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4470                                 HCLGE_TQP_ID_S, node->tqp_index);
4471                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4472                                 HCLGE_INT_GL_IDX_S,
4473                                 hnae3_get_field(node->int_gl_idx,
4474                                                 HNAE3_RING_GL_IDX_M,
4475                                                 HNAE3_RING_GL_IDX_S));
4476                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4477                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4478                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4479                         req->vfid = vport->vport_id;
4480
4481                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4482                         if (status) {
4483                                 dev_err(&hdev->pdev->dev,
4484                                         "Map TQP fail, status is %d.\n",
4485                                         status);
4486                                 return -EIO;
4487                         }
4488                         i = 0;
4489
4490                         hclge_cmd_setup_basic_desc(&desc,
4491                                                    op,
4492                                                    false);
4493                         req->int_vector_id = vector_id;
4494                 }
4495         }
4496
4497         if (i > 0) {
4498                 req->int_cause_num = i;
4499                 req->vfid = vport->vport_id;
4500                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4501                 if (status) {
4502                         dev_err(&hdev->pdev->dev,
4503                                 "Map TQP fail, status is %d.\n", status);
4504                         return -EIO;
4505                 }
4506         }
4507
4508         return 0;
4509 }
4510
4511 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4512                                     struct hnae3_ring_chain_node *ring_chain)
4513 {
4514         struct hclge_vport *vport = hclge_get_vport(handle);
4515         struct hclge_dev *hdev = vport->back;
4516         int vector_id;
4517
4518         vector_id = hclge_get_vector_index(hdev, vector);
4519         if (vector_id < 0) {
4520                 dev_err(&hdev->pdev->dev,
4521                         "Get vector index fail. vector_id =%d\n", vector_id);
4522                 return vector_id;
4523         }
4524
4525         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4526 }
4527
4528 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4529                                        struct hnae3_ring_chain_node *ring_chain)
4530 {
4531         struct hclge_vport *vport = hclge_get_vport(handle);
4532         struct hclge_dev *hdev = vport->back;
4533         int vector_id, ret;
4534
4535         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4536                 return 0;
4537
4538         vector_id = hclge_get_vector_index(hdev, vector);
4539         if (vector_id < 0) {
4540                 dev_err(&handle->pdev->dev,
4541                         "Get vector index fail. ret =%d\n", vector_id);
4542                 return vector_id;
4543         }
4544
4545         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4546         if (ret)
4547                 dev_err(&handle->pdev->dev,
4548                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4549                         vector_id, ret);
4550
4551         return ret;
4552 }
4553
4554 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4555                                struct hclge_promisc_param *param)
4556 {
4557         struct hclge_promisc_cfg_cmd *req;
4558         struct hclge_desc desc;
4559         int ret;
4560
4561         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4562
4563         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4564         req->vf_id = param->vf_id;
4565
4566         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4567          * pdev revision(0x20), new revision support them. The
4568          * value of this two fields will not return error when driver
4569          * send command to fireware in revision(0x20).
4570          */
4571         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4572                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4573
4574         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4575         if (ret)
4576                 dev_err(&hdev->pdev->dev,
4577                         "Set promisc mode fail, status is %d.\n", ret);
4578
4579         return ret;
4580 }
4581
4582 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4583                               bool en_mc, bool en_bc, int vport_id)
4584 {
4585         if (!param)
4586                 return;
4587
4588         memset(param, 0, sizeof(struct hclge_promisc_param));
4589         if (en_uc)
4590                 param->enable = HCLGE_PROMISC_EN_UC;
4591         if (en_mc)
4592                 param->enable |= HCLGE_PROMISC_EN_MC;
4593         if (en_bc)
4594                 param->enable |= HCLGE_PROMISC_EN_BC;
4595         param->vf_id = vport_id;
4596 }
4597
4598 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4599                                   bool en_mc_pmc)
4600 {
4601         struct hclge_vport *vport = hclge_get_vport(handle);
4602         struct hclge_dev *hdev = vport->back;
4603         struct hclge_promisc_param param;
4604         bool en_bc_pmc = true;
4605
4606         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4607          * always bypassed. So broadcast promisc should be disabled until
4608          * user enable promisc mode
4609          */
4610         if (handle->pdev->revision == 0x20)
4611                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4612
4613         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4614                                  vport->vport_id);
4615         return hclge_cmd_set_promisc_mode(hdev, &param);
4616 }
4617
4618 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4619 {
4620         struct hclge_get_fd_mode_cmd *req;
4621         struct hclge_desc desc;
4622         int ret;
4623
4624         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4625
4626         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4627
4628         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4629         if (ret) {
4630                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4631                 return ret;
4632         }
4633
4634         *fd_mode = req->mode;
4635
4636         return ret;
4637 }
4638
4639 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4640                                    u32 *stage1_entry_num,
4641                                    u32 *stage2_entry_num,
4642                                    u16 *stage1_counter_num,
4643                                    u16 *stage2_counter_num)
4644 {
4645         struct hclge_get_fd_allocation_cmd *req;
4646         struct hclge_desc desc;
4647         int ret;
4648
4649         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4650
4651         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4652
4653         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4654         if (ret) {
4655                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4656                         ret);
4657                 return ret;
4658         }
4659
4660         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4661         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4662         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4663         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4664
4665         return ret;
4666 }
4667
4668 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4669 {
4670         struct hclge_set_fd_key_config_cmd *req;
4671         struct hclge_fd_key_cfg *stage;
4672         struct hclge_desc desc;
4673         int ret;
4674
4675         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4676
4677         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4678         stage = &hdev->fd_cfg.key_cfg[stage_num];
4679         req->stage = stage_num;
4680         req->key_select = stage->key_sel;
4681         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4682         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4683         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4684         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4685         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4686         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4687
4688         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4689         if (ret)
4690                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4691
4692         return ret;
4693 }
4694
4695 static int hclge_init_fd_config(struct hclge_dev *hdev)
4696 {
4697 #define LOW_2_WORDS             0x03
4698         struct hclge_fd_key_cfg *key_cfg;
4699         int ret;
4700
4701         if (!hnae3_dev_fd_supported(hdev))
4702                 return 0;
4703
4704         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4705         if (ret)
4706                 return ret;
4707
4708         switch (hdev->fd_cfg.fd_mode) {
4709         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4710                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4711                 break;
4712         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4713                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4714                 break;
4715         default:
4716                 dev_err(&hdev->pdev->dev,
4717                         "Unsupported flow director mode %d\n",
4718                         hdev->fd_cfg.fd_mode);
4719                 return -EOPNOTSUPP;
4720         }
4721
4722         hdev->fd_cfg.proto_support =
4723                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4724                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4725         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4726         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4727         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4728         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4729         key_cfg->outer_sipv6_word_en = 0;
4730         key_cfg->outer_dipv6_word_en = 0;
4731
4732         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4733                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4734                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4735                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4736
4737         /* If use max 400bit key, we can support tuples for ether type */
4738         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4739                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4740                 key_cfg->tuple_active |=
4741                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4742         }
4743
4744         /* roce_type is used to filter roce frames
4745          * dst_vport is used to specify the rule
4746          */
4747         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4748
4749         ret = hclge_get_fd_allocation(hdev,
4750                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4751                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4752                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4753                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4754         if (ret)
4755                 return ret;
4756
4757         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4758 }
4759
4760 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4761                                 int loc, u8 *key, bool is_add)
4762 {
4763         struct hclge_fd_tcam_config_1_cmd *req1;
4764         struct hclge_fd_tcam_config_2_cmd *req2;
4765         struct hclge_fd_tcam_config_3_cmd *req3;
4766         struct hclge_desc desc[3];
4767         int ret;
4768
4769         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4770         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4771         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4772         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4773         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4774
4775         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4776         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4777         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4778
4779         req1->stage = stage;
4780         req1->xy_sel = sel_x ? 1 : 0;
4781         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4782         req1->index = cpu_to_le32(loc);
4783         req1->entry_vld = sel_x ? is_add : 0;
4784
4785         if (key) {
4786                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4787                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4788                        sizeof(req2->tcam_data));
4789                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4790                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4791         }
4792
4793         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4794         if (ret)
4795                 dev_err(&hdev->pdev->dev,
4796                         "config tcam key fail, ret=%d\n",
4797                         ret);
4798
4799         return ret;
4800 }
4801
4802 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4803                               struct hclge_fd_ad_data *action)
4804 {
4805         struct hclge_fd_ad_config_cmd *req;
4806         struct hclge_desc desc;
4807         u64 ad_data = 0;
4808         int ret;
4809
4810         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4811
4812         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4813         req->index = cpu_to_le32(loc);
4814         req->stage = stage;
4815
4816         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4817                       action->write_rule_id_to_bd);
4818         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4819                         action->rule_id);
4820         ad_data <<= 32;
4821         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4822         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4823                       action->forward_to_direct_queue);
4824         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4825                         action->queue_id);
4826         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4827         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4828                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4829         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4830         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4831                         action->counter_id);
4832
4833         req->ad_data = cpu_to_le64(ad_data);
4834         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4835         if (ret)
4836                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4837
4838         return ret;
4839 }
4840
4841 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4842                                    struct hclge_fd_rule *rule)
4843 {
4844         u16 tmp_x_s, tmp_y_s;
4845         u32 tmp_x_l, tmp_y_l;
4846         int i;
4847
4848         if (rule->unused_tuple & tuple_bit)
4849                 return true;
4850
4851         switch (tuple_bit) {
4852         case 0:
4853                 return false;
4854         case BIT(INNER_DST_MAC):
4855                 for (i = 0; i < ETH_ALEN; i++) {
4856                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4857                                rule->tuples_mask.dst_mac[i]);
4858                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4859                                rule->tuples_mask.dst_mac[i]);
4860                 }
4861
4862                 return true;
4863         case BIT(INNER_SRC_MAC):
4864                 for (i = 0; i < ETH_ALEN; i++) {
4865                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4866                                rule->tuples.src_mac[i]);
4867                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4868                                rule->tuples.src_mac[i]);
4869                 }
4870
4871                 return true;
4872         case BIT(INNER_VLAN_TAG_FST):
4873                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4874                        rule->tuples_mask.vlan_tag1);
4875                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4876                        rule->tuples_mask.vlan_tag1);
4877                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4878                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4879
4880                 return true;
4881         case BIT(INNER_ETH_TYPE):
4882                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4883                        rule->tuples_mask.ether_proto);
4884                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4885                        rule->tuples_mask.ether_proto);
4886                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4887                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4888
4889                 return true;
4890         case BIT(INNER_IP_TOS):
4891                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4892                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4893
4894                 return true;
4895         case BIT(INNER_IP_PROTO):
4896                 calc_x(*key_x, rule->tuples.ip_proto,
4897                        rule->tuples_mask.ip_proto);
4898                 calc_y(*key_y, rule->tuples.ip_proto,
4899                        rule->tuples_mask.ip_proto);
4900
4901                 return true;
4902         case BIT(INNER_SRC_IP):
4903                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4904                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4905                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4906                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4907                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4908                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4909
4910                 return true;
4911         case BIT(INNER_DST_IP):
4912                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4913                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4914                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4915                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4916                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4917                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4918
4919                 return true;
4920         case BIT(INNER_SRC_PORT):
4921                 calc_x(tmp_x_s, rule->tuples.src_port,
4922                        rule->tuples_mask.src_port);
4923                 calc_y(tmp_y_s, rule->tuples.src_port,
4924                        rule->tuples_mask.src_port);
4925                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4926                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4927
4928                 return true;
4929         case BIT(INNER_DST_PORT):
4930                 calc_x(tmp_x_s, rule->tuples.dst_port,
4931                        rule->tuples_mask.dst_port);
4932                 calc_y(tmp_y_s, rule->tuples.dst_port,
4933                        rule->tuples_mask.dst_port);
4934                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4935                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4936
4937                 return true;
4938         default:
4939                 return false;
4940         }
4941 }
4942
4943 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4944                                  u8 vf_id, u8 network_port_id)
4945 {
4946         u32 port_number = 0;
4947
4948         if (port_type == HOST_PORT) {
4949                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4950                                 pf_id);
4951                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4952                                 vf_id);
4953                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4954         } else {
4955                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4956                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4957                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4958         }
4959
4960         return port_number;
4961 }
4962
4963 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4964                                        __le32 *key_x, __le32 *key_y,
4965                                        struct hclge_fd_rule *rule)
4966 {
4967         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4968         u8 cur_pos = 0, tuple_size, shift_bits;
4969         unsigned int i;
4970
4971         for (i = 0; i < MAX_META_DATA; i++) {
4972                 tuple_size = meta_data_key_info[i].key_length;
4973                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4974
4975                 switch (tuple_bit) {
4976                 case BIT(ROCE_TYPE):
4977                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4978                         cur_pos += tuple_size;
4979                         break;
4980                 case BIT(DST_VPORT):
4981                         port_number = hclge_get_port_number(HOST_PORT, 0,
4982                                                             rule->vf_id, 0);
4983                         hnae3_set_field(meta_data,
4984                                         GENMASK(cur_pos + tuple_size, cur_pos),
4985                                         cur_pos, port_number);
4986                         cur_pos += tuple_size;
4987                         break;
4988                 default:
4989                         break;
4990                 }
4991         }
4992
4993         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4994         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4995         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4996
4997         *key_x = cpu_to_le32(tmp_x << shift_bits);
4998         *key_y = cpu_to_le32(tmp_y << shift_bits);
4999 }
5000
5001 /* A complete key is combined with meta data key and tuple key.
5002  * Meta data key is stored at the MSB region, and tuple key is stored at
5003  * the LSB region, unused bits will be filled 0.
5004  */
5005 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5006                             struct hclge_fd_rule *rule)
5007 {
5008         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5009         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5010         u8 *cur_key_x, *cur_key_y;
5011         unsigned int i;
5012         int ret, tuple_size;
5013         u8 meta_data_region;
5014
5015         memset(key_x, 0, sizeof(key_x));
5016         memset(key_y, 0, sizeof(key_y));
5017         cur_key_x = key_x;
5018         cur_key_y = key_y;
5019
5020         for (i = 0 ; i < MAX_TUPLE; i++) {
5021                 bool tuple_valid;
5022                 u32 check_tuple;
5023
5024                 tuple_size = tuple_key_info[i].key_length / 8;
5025                 check_tuple = key_cfg->tuple_active & BIT(i);
5026
5027                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5028                                                      cur_key_y, rule);
5029                 if (tuple_valid) {
5030                         cur_key_x += tuple_size;
5031                         cur_key_y += tuple_size;
5032                 }
5033         }
5034
5035         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5036                         MAX_META_DATA_LENGTH / 8;
5037
5038         hclge_fd_convert_meta_data(key_cfg,
5039                                    (__le32 *)(key_x + meta_data_region),
5040                                    (__le32 *)(key_y + meta_data_region),
5041                                    rule);
5042
5043         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5044                                    true);
5045         if (ret) {
5046                 dev_err(&hdev->pdev->dev,
5047                         "fd key_y config fail, loc=%d, ret=%d\n",
5048                         rule->queue_id, ret);
5049                 return ret;
5050         }
5051
5052         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5053                                    true);
5054         if (ret)
5055                 dev_err(&hdev->pdev->dev,
5056                         "fd key_x config fail, loc=%d, ret=%d\n",
5057                         rule->queue_id, ret);
5058         return ret;
5059 }
5060
5061 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5062                                struct hclge_fd_rule *rule)
5063 {
5064         struct hclge_fd_ad_data ad_data;
5065
5066         ad_data.ad_id = rule->location;
5067
5068         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5069                 ad_data.drop_packet = true;
5070                 ad_data.forward_to_direct_queue = false;
5071                 ad_data.queue_id = 0;
5072         } else {
5073                 ad_data.drop_packet = false;
5074                 ad_data.forward_to_direct_queue = true;
5075                 ad_data.queue_id = rule->queue_id;
5076         }
5077
5078         ad_data.use_counter = false;
5079         ad_data.counter_id = 0;
5080
5081         ad_data.use_next_stage = false;
5082         ad_data.next_input_key = 0;
5083
5084         ad_data.write_rule_id_to_bd = true;
5085         ad_data.rule_id = rule->location;
5086
5087         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5088 }
5089
5090 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5091                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5092 {
5093         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5094         struct ethtool_usrip4_spec *usr_ip4_spec;
5095         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5096         struct ethtool_usrip6_spec *usr_ip6_spec;
5097         struct ethhdr *ether_spec;
5098
5099         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5100                 return -EINVAL;
5101
5102         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5103                 return -EOPNOTSUPP;
5104
5105         if ((fs->flow_type & FLOW_EXT) &&
5106             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5107                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5108                 return -EOPNOTSUPP;
5109         }
5110
5111         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5112         case SCTP_V4_FLOW:
5113         case TCP_V4_FLOW:
5114         case UDP_V4_FLOW:
5115                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5116                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5117
5118                 if (!tcp_ip4_spec->ip4src)
5119                         *unused |= BIT(INNER_SRC_IP);
5120
5121                 if (!tcp_ip4_spec->ip4dst)
5122                         *unused |= BIT(INNER_DST_IP);
5123
5124                 if (!tcp_ip4_spec->psrc)
5125                         *unused |= BIT(INNER_SRC_PORT);
5126
5127                 if (!tcp_ip4_spec->pdst)
5128                         *unused |= BIT(INNER_DST_PORT);
5129
5130                 if (!tcp_ip4_spec->tos)
5131                         *unused |= BIT(INNER_IP_TOS);
5132
5133                 break;
5134         case IP_USER_FLOW:
5135                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5136                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5137                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5138
5139                 if (!usr_ip4_spec->ip4src)
5140                         *unused |= BIT(INNER_SRC_IP);
5141
5142                 if (!usr_ip4_spec->ip4dst)
5143                         *unused |= BIT(INNER_DST_IP);
5144
5145                 if (!usr_ip4_spec->tos)
5146                         *unused |= BIT(INNER_IP_TOS);
5147
5148                 if (!usr_ip4_spec->proto)
5149                         *unused |= BIT(INNER_IP_PROTO);
5150
5151                 if (usr_ip4_spec->l4_4_bytes)
5152                         return -EOPNOTSUPP;
5153
5154                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5155                         return -EOPNOTSUPP;
5156
5157                 break;
5158         case SCTP_V6_FLOW:
5159         case TCP_V6_FLOW:
5160         case UDP_V6_FLOW:
5161                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5162                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5163                         BIT(INNER_IP_TOS);
5164
5165                 /* check whether src/dst ip address used */
5166                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5167                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5168                         *unused |= BIT(INNER_SRC_IP);
5169
5170                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5171                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5172                         *unused |= BIT(INNER_DST_IP);
5173
5174                 if (!tcp_ip6_spec->psrc)
5175                         *unused |= BIT(INNER_SRC_PORT);
5176
5177                 if (!tcp_ip6_spec->pdst)
5178                         *unused |= BIT(INNER_DST_PORT);
5179
5180                 if (tcp_ip6_spec->tclass)
5181                         return -EOPNOTSUPP;
5182
5183                 break;
5184         case IPV6_USER_FLOW:
5185                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5186                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5187                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5188                         BIT(INNER_DST_PORT);
5189
5190                 /* check whether src/dst ip address used */
5191                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5192                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5193                         *unused |= BIT(INNER_SRC_IP);
5194
5195                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5196                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5197                         *unused |= BIT(INNER_DST_IP);
5198
5199                 if (!usr_ip6_spec->l4_proto)
5200                         *unused |= BIT(INNER_IP_PROTO);
5201
5202                 if (usr_ip6_spec->tclass)
5203                         return -EOPNOTSUPP;
5204
5205                 if (usr_ip6_spec->l4_4_bytes)
5206                         return -EOPNOTSUPP;
5207
5208                 break;
5209         case ETHER_FLOW:
5210                 ether_spec = &fs->h_u.ether_spec;
5211                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5212                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5213                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5214
5215                 if (is_zero_ether_addr(ether_spec->h_source))
5216                         *unused |= BIT(INNER_SRC_MAC);
5217
5218                 if (is_zero_ether_addr(ether_spec->h_dest))
5219                         *unused |= BIT(INNER_DST_MAC);
5220
5221                 if (!ether_spec->h_proto)
5222                         *unused |= BIT(INNER_ETH_TYPE);
5223
5224                 break;
5225         default:
5226                 return -EOPNOTSUPP;
5227         }
5228
5229         if ((fs->flow_type & FLOW_EXT)) {
5230                 if (fs->h_ext.vlan_etype)
5231                         return -EOPNOTSUPP;
5232                 if (!fs->h_ext.vlan_tci)
5233                         *unused |= BIT(INNER_VLAN_TAG_FST);
5234
5235                 if (fs->m_ext.vlan_tci) {
5236                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5237                                 return -EINVAL;
5238                 }
5239         } else {
5240                 *unused |= BIT(INNER_VLAN_TAG_FST);
5241         }
5242
5243         if (fs->flow_type & FLOW_MAC_EXT) {
5244                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5245                         return -EOPNOTSUPP;
5246
5247                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5248                         *unused |= BIT(INNER_DST_MAC);
5249                 else
5250                         *unused &= ~(BIT(INNER_DST_MAC));
5251         }
5252
5253         return 0;
5254 }
5255
5256 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5257 {
5258         struct hclge_fd_rule *rule = NULL;
5259         struct hlist_node *node2;
5260
5261         spin_lock_bh(&hdev->fd_rule_lock);
5262         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5263                 if (rule->location >= location)
5264                         break;
5265         }
5266
5267         spin_unlock_bh(&hdev->fd_rule_lock);
5268
5269         return  rule && rule->location == location;
5270 }
5271
5272 /* make sure being called after lock up with fd_rule_lock */
5273 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5274                                      struct hclge_fd_rule *new_rule,
5275                                      u16 location,
5276                                      bool is_add)
5277 {
5278         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5279         struct hlist_node *node2;
5280
5281         if (is_add && !new_rule)
5282                 return -EINVAL;
5283
5284         hlist_for_each_entry_safe(rule, node2,
5285                                   &hdev->fd_rule_list, rule_node) {
5286                 if (rule->location >= location)
5287                         break;
5288                 parent = rule;
5289         }
5290
5291         if (rule && rule->location == location) {
5292                 hlist_del(&rule->rule_node);
5293                 kfree(rule);
5294                 hdev->hclge_fd_rule_num--;
5295
5296                 if (!is_add) {
5297                         if (!hdev->hclge_fd_rule_num)
5298                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5299                         clear_bit(location, hdev->fd_bmap);
5300
5301                         return 0;
5302                 }
5303         } else if (!is_add) {
5304                 dev_err(&hdev->pdev->dev,
5305                         "delete fail, rule %d is inexistent\n",
5306                         location);
5307                 return -EINVAL;
5308         }
5309
5310         INIT_HLIST_NODE(&new_rule->rule_node);
5311
5312         if (parent)
5313                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5314         else
5315                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5316
5317         set_bit(location, hdev->fd_bmap);
5318         hdev->hclge_fd_rule_num++;
5319         hdev->fd_active_type = new_rule->rule_type;
5320
5321         return 0;
5322 }
5323
5324 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5325                               struct ethtool_rx_flow_spec *fs,
5326                               struct hclge_fd_rule *rule)
5327 {
5328         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5329
5330         switch (flow_type) {
5331         case SCTP_V4_FLOW:
5332         case TCP_V4_FLOW:
5333         case UDP_V4_FLOW:
5334                 rule->tuples.src_ip[IPV4_INDEX] =
5335                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5336                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5337                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5338
5339                 rule->tuples.dst_ip[IPV4_INDEX] =
5340                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5341                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5342                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5343
5344                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5345                 rule->tuples_mask.src_port =
5346                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5347
5348                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5349                 rule->tuples_mask.dst_port =
5350                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5351
5352                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5353                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5354
5355                 rule->tuples.ether_proto = ETH_P_IP;
5356                 rule->tuples_mask.ether_proto = 0xFFFF;
5357
5358                 break;
5359         case IP_USER_FLOW:
5360                 rule->tuples.src_ip[IPV4_INDEX] =
5361                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5362                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5363                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5364
5365                 rule->tuples.dst_ip[IPV4_INDEX] =
5366                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5367                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5368                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5369
5370                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5371                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5372
5373                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5374                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5375
5376                 rule->tuples.ether_proto = ETH_P_IP;
5377                 rule->tuples_mask.ether_proto = 0xFFFF;
5378
5379                 break;
5380         case SCTP_V6_FLOW:
5381         case TCP_V6_FLOW:
5382         case UDP_V6_FLOW:
5383                 be32_to_cpu_array(rule->tuples.src_ip,
5384                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5385                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5386                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5387
5388                 be32_to_cpu_array(rule->tuples.dst_ip,
5389                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5390                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5391                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5392
5393                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5394                 rule->tuples_mask.src_port =
5395                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5396
5397                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5398                 rule->tuples_mask.dst_port =
5399                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5400
5401                 rule->tuples.ether_proto = ETH_P_IPV6;
5402                 rule->tuples_mask.ether_proto = 0xFFFF;
5403
5404                 break;
5405         case IPV6_USER_FLOW:
5406                 be32_to_cpu_array(rule->tuples.src_ip,
5407                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5408                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5409                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5410
5411                 be32_to_cpu_array(rule->tuples.dst_ip,
5412                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5413                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5414                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5415
5416                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5417                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5418
5419                 rule->tuples.ether_proto = ETH_P_IPV6;
5420                 rule->tuples_mask.ether_proto = 0xFFFF;
5421
5422                 break;
5423         case ETHER_FLOW:
5424                 ether_addr_copy(rule->tuples.src_mac,
5425                                 fs->h_u.ether_spec.h_source);
5426                 ether_addr_copy(rule->tuples_mask.src_mac,
5427                                 fs->m_u.ether_spec.h_source);
5428
5429                 ether_addr_copy(rule->tuples.dst_mac,
5430                                 fs->h_u.ether_spec.h_dest);
5431                 ether_addr_copy(rule->tuples_mask.dst_mac,
5432                                 fs->m_u.ether_spec.h_dest);
5433
5434                 rule->tuples.ether_proto =
5435                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5436                 rule->tuples_mask.ether_proto =
5437                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5438
5439                 break;
5440         default:
5441                 return -EOPNOTSUPP;
5442         }
5443
5444         switch (flow_type) {
5445         case SCTP_V4_FLOW:
5446         case SCTP_V6_FLOW:
5447                 rule->tuples.ip_proto = IPPROTO_SCTP;
5448                 rule->tuples_mask.ip_proto = 0xFF;
5449                 break;
5450         case TCP_V4_FLOW:
5451         case TCP_V6_FLOW:
5452                 rule->tuples.ip_proto = IPPROTO_TCP;
5453                 rule->tuples_mask.ip_proto = 0xFF;
5454                 break;
5455         case UDP_V4_FLOW:
5456         case UDP_V6_FLOW:
5457                 rule->tuples.ip_proto = IPPROTO_UDP;
5458                 rule->tuples_mask.ip_proto = 0xFF;
5459                 break;
5460         default:
5461                 break;
5462         }
5463
5464         if ((fs->flow_type & FLOW_EXT)) {
5465                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5466                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5467         }
5468
5469         if (fs->flow_type & FLOW_MAC_EXT) {
5470                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5471                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5472         }
5473
5474         return 0;
5475 }
5476
5477 /* make sure being called after lock up with fd_rule_lock */
5478 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5479                                 struct hclge_fd_rule *rule)
5480 {
5481         int ret;
5482
5483         if (!rule) {
5484                 dev_err(&hdev->pdev->dev,
5485                         "The flow director rule is NULL\n");
5486                 return -EINVAL;
5487         }
5488
5489         /* it will never fail here, so needn't to check return value */
5490         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5491
5492         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5493         if (ret)
5494                 goto clear_rule;
5495
5496         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5497         if (ret)
5498                 goto clear_rule;
5499
5500         return 0;
5501
5502 clear_rule:
5503         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5504         return ret;
5505 }
5506
5507 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5508                               struct ethtool_rxnfc *cmd)
5509 {
5510         struct hclge_vport *vport = hclge_get_vport(handle);
5511         struct hclge_dev *hdev = vport->back;
5512         u16 dst_vport_id = 0, q_index = 0;
5513         struct ethtool_rx_flow_spec *fs;
5514         struct hclge_fd_rule *rule;
5515         u32 unused = 0;
5516         u8 action;
5517         int ret;
5518
5519         if (!hnae3_dev_fd_supported(hdev))
5520                 return -EOPNOTSUPP;
5521
5522         if (!hdev->fd_en) {
5523                 dev_warn(&hdev->pdev->dev,
5524                          "Please enable flow director first\n");
5525                 return -EOPNOTSUPP;
5526         }
5527
5528         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5529
5530         ret = hclge_fd_check_spec(hdev, fs, &unused);
5531         if (ret) {
5532                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5533                 return ret;
5534         }
5535
5536         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5537                 action = HCLGE_FD_ACTION_DROP_PACKET;
5538         } else {
5539                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5540                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5541                 u16 tqps;
5542
5543                 if (vf > hdev->num_req_vfs) {
5544                         dev_err(&hdev->pdev->dev,
5545                                 "Error: vf id (%d) > max vf num (%d)\n",
5546                                 vf, hdev->num_req_vfs);
5547                         return -EINVAL;
5548                 }
5549
5550                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5551                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5552
5553                 if (ring >= tqps) {
5554                         dev_err(&hdev->pdev->dev,
5555                                 "Error: queue id (%d) > max tqp num (%d)\n",
5556                                 ring, tqps - 1);
5557                         return -EINVAL;
5558                 }
5559
5560                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5561                 q_index = ring;
5562         }
5563
5564         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5565         if (!rule)
5566                 return -ENOMEM;
5567
5568         ret = hclge_fd_get_tuple(hdev, fs, rule);
5569         if (ret) {
5570                 kfree(rule);
5571                 return ret;
5572         }
5573
5574         rule->flow_type = fs->flow_type;
5575
5576         rule->location = fs->location;
5577         rule->unused_tuple = unused;
5578         rule->vf_id = dst_vport_id;
5579         rule->queue_id = q_index;
5580         rule->action = action;
5581         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5582
5583         /* to avoid rule conflict, when user configure rule by ethtool,
5584          * we need to clear all arfs rules
5585          */
5586         hclge_clear_arfs_rules(handle);
5587
5588         spin_lock_bh(&hdev->fd_rule_lock);
5589         ret = hclge_fd_config_rule(hdev, rule);
5590
5591         spin_unlock_bh(&hdev->fd_rule_lock);
5592
5593         return ret;
5594 }
5595
5596 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5597                               struct ethtool_rxnfc *cmd)
5598 {
5599         struct hclge_vport *vport = hclge_get_vport(handle);
5600         struct hclge_dev *hdev = vport->back;
5601         struct ethtool_rx_flow_spec *fs;
5602         int ret;
5603
5604         if (!hnae3_dev_fd_supported(hdev))
5605                 return -EOPNOTSUPP;
5606
5607         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5608
5609         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5610                 return -EINVAL;
5611
5612         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5613                 dev_err(&hdev->pdev->dev,
5614                         "Delete fail, rule %d is inexistent\n", fs->location);
5615                 return -ENOENT;
5616         }
5617
5618         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5619                                    NULL, false);
5620         if (ret)
5621                 return ret;
5622
5623         spin_lock_bh(&hdev->fd_rule_lock);
5624         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5625
5626         spin_unlock_bh(&hdev->fd_rule_lock);
5627
5628         return ret;
5629 }
5630
5631 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5632                                      bool clear_list)
5633 {
5634         struct hclge_vport *vport = hclge_get_vport(handle);
5635         struct hclge_dev *hdev = vport->back;
5636         struct hclge_fd_rule *rule;
5637         struct hlist_node *node;
5638         u16 location;
5639
5640         if (!hnae3_dev_fd_supported(hdev))
5641                 return;
5642
5643         spin_lock_bh(&hdev->fd_rule_lock);
5644         for_each_set_bit(location, hdev->fd_bmap,
5645                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5646                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5647                                      NULL, false);
5648
5649         if (clear_list) {
5650                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5651                                           rule_node) {
5652                         hlist_del(&rule->rule_node);
5653                         kfree(rule);
5654                 }
5655                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5656                 hdev->hclge_fd_rule_num = 0;
5657                 bitmap_zero(hdev->fd_bmap,
5658                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5659         }
5660
5661         spin_unlock_bh(&hdev->fd_rule_lock);
5662 }
5663
5664 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5665 {
5666         struct hclge_vport *vport = hclge_get_vport(handle);
5667         struct hclge_dev *hdev = vport->back;
5668         struct hclge_fd_rule *rule;
5669         struct hlist_node *node;
5670         int ret;
5671
5672         /* Return ok here, because reset error handling will check this
5673          * return value. If error is returned here, the reset process will
5674          * fail.
5675          */
5676         if (!hnae3_dev_fd_supported(hdev))
5677                 return 0;
5678
5679         /* if fd is disabled, should not restore it when reset */
5680         if (!hdev->fd_en)
5681                 return 0;
5682
5683         spin_lock_bh(&hdev->fd_rule_lock);
5684         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5685                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5686                 if (!ret)
5687                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5688
5689                 if (ret) {
5690                         dev_warn(&hdev->pdev->dev,
5691                                  "Restore rule %d failed, remove it\n",
5692                                  rule->location);
5693                         clear_bit(rule->location, hdev->fd_bmap);
5694                         hlist_del(&rule->rule_node);
5695                         kfree(rule);
5696                         hdev->hclge_fd_rule_num--;
5697                 }
5698         }
5699
5700         if (hdev->hclge_fd_rule_num)
5701                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5702
5703         spin_unlock_bh(&hdev->fd_rule_lock);
5704
5705         return 0;
5706 }
5707
5708 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5709                                  struct ethtool_rxnfc *cmd)
5710 {
5711         struct hclge_vport *vport = hclge_get_vport(handle);
5712         struct hclge_dev *hdev = vport->back;
5713
5714         if (!hnae3_dev_fd_supported(hdev))
5715                 return -EOPNOTSUPP;
5716
5717         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5718         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5719
5720         return 0;
5721 }
5722
5723 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5724                                   struct ethtool_rxnfc *cmd)
5725 {
5726         struct hclge_vport *vport = hclge_get_vport(handle);
5727         struct hclge_fd_rule *rule = NULL;
5728         struct hclge_dev *hdev = vport->back;
5729         struct ethtool_rx_flow_spec *fs;
5730         struct hlist_node *node2;
5731
5732         if (!hnae3_dev_fd_supported(hdev))
5733                 return -EOPNOTSUPP;
5734
5735         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5736
5737         spin_lock_bh(&hdev->fd_rule_lock);
5738
5739         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5740                 if (rule->location >= fs->location)
5741                         break;
5742         }
5743
5744         if (!rule || fs->location != rule->location) {
5745                 spin_unlock_bh(&hdev->fd_rule_lock);
5746
5747                 return -ENOENT;
5748         }
5749
5750         fs->flow_type = rule->flow_type;
5751         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5752         case SCTP_V4_FLOW:
5753         case TCP_V4_FLOW:
5754         case UDP_V4_FLOW:
5755                 fs->h_u.tcp_ip4_spec.ip4src =
5756                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5757                 fs->m_u.tcp_ip4_spec.ip4src =
5758                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5759                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5760
5761                 fs->h_u.tcp_ip4_spec.ip4dst =
5762                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5763                 fs->m_u.tcp_ip4_spec.ip4dst =
5764                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5765                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5766
5767                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5768                 fs->m_u.tcp_ip4_spec.psrc =
5769                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5770                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5771
5772                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5773                 fs->m_u.tcp_ip4_spec.pdst =
5774                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5775                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5776
5777                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5778                 fs->m_u.tcp_ip4_spec.tos =
5779                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5780                                 0 : rule->tuples_mask.ip_tos;
5781
5782                 break;
5783         case IP_USER_FLOW:
5784                 fs->h_u.usr_ip4_spec.ip4src =
5785                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5786                 fs->m_u.tcp_ip4_spec.ip4src =
5787                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5788                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5789
5790                 fs->h_u.usr_ip4_spec.ip4dst =
5791                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5792                 fs->m_u.usr_ip4_spec.ip4dst =
5793                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5794                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5795
5796                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5797                 fs->m_u.usr_ip4_spec.tos =
5798                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5799                                 0 : rule->tuples_mask.ip_tos;
5800
5801                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5802                 fs->m_u.usr_ip4_spec.proto =
5803                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5804                                 0 : rule->tuples_mask.ip_proto;
5805
5806                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5807
5808                 break;
5809         case SCTP_V6_FLOW:
5810         case TCP_V6_FLOW:
5811         case UDP_V6_FLOW:
5812                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5813                                   rule->tuples.src_ip, IPV6_SIZE);
5814                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5815                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5816                                sizeof(int) * IPV6_SIZE);
5817                 else
5818                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5819                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5820
5821                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5822                                   rule->tuples.dst_ip, IPV6_SIZE);
5823                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5824                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5825                                sizeof(int) * IPV6_SIZE);
5826                 else
5827                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5828                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5829
5830                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5831                 fs->m_u.tcp_ip6_spec.psrc =
5832                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5833                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5834
5835                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5836                 fs->m_u.tcp_ip6_spec.pdst =
5837                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5838                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5839
5840                 break;
5841         case IPV6_USER_FLOW:
5842                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5843                                   rule->tuples.src_ip, IPV6_SIZE);
5844                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5845                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5846                                sizeof(int) * IPV6_SIZE);
5847                 else
5848                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5849                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5850
5851                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5852                                   rule->tuples.dst_ip, IPV6_SIZE);
5853                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5854                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5855                                sizeof(int) * IPV6_SIZE);
5856                 else
5857                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5858                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5859
5860                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5861                 fs->m_u.usr_ip6_spec.l4_proto =
5862                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5863                                 0 : rule->tuples_mask.ip_proto;
5864
5865                 break;
5866         case ETHER_FLOW:
5867                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5868                                 rule->tuples.src_mac);
5869                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5870                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5871                 else
5872                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5873                                         rule->tuples_mask.src_mac);
5874
5875                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5876                                 rule->tuples.dst_mac);
5877                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5878                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5879                 else
5880                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5881                                         rule->tuples_mask.dst_mac);
5882
5883                 fs->h_u.ether_spec.h_proto =
5884                                 cpu_to_be16(rule->tuples.ether_proto);
5885                 fs->m_u.ether_spec.h_proto =
5886                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5887                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5888
5889                 break;
5890         default:
5891                 spin_unlock_bh(&hdev->fd_rule_lock);
5892                 return -EOPNOTSUPP;
5893         }
5894
5895         if (fs->flow_type & FLOW_EXT) {
5896                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5897                 fs->m_ext.vlan_tci =
5898                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5899                                 cpu_to_be16(VLAN_VID_MASK) :
5900                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5901         }
5902
5903         if (fs->flow_type & FLOW_MAC_EXT) {
5904                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5905                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5906                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5907                 else
5908                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5909                                         rule->tuples_mask.dst_mac);
5910         }
5911
5912         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5913                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5914         } else {
5915                 u64 vf_id;
5916
5917                 fs->ring_cookie = rule->queue_id;
5918                 vf_id = rule->vf_id;
5919                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5920                 fs->ring_cookie |= vf_id;
5921         }
5922
5923         spin_unlock_bh(&hdev->fd_rule_lock);
5924
5925         return 0;
5926 }
5927
5928 static int hclge_get_all_rules(struct hnae3_handle *handle,
5929                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5930 {
5931         struct hclge_vport *vport = hclge_get_vport(handle);
5932         struct hclge_dev *hdev = vport->back;
5933         struct hclge_fd_rule *rule;
5934         struct hlist_node *node2;
5935         int cnt = 0;
5936
5937         if (!hnae3_dev_fd_supported(hdev))
5938                 return -EOPNOTSUPP;
5939
5940         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5941
5942         spin_lock_bh(&hdev->fd_rule_lock);
5943         hlist_for_each_entry_safe(rule, node2,
5944                                   &hdev->fd_rule_list, rule_node) {
5945                 if (cnt == cmd->rule_cnt) {
5946                         spin_unlock_bh(&hdev->fd_rule_lock);
5947                         return -EMSGSIZE;
5948                 }
5949
5950                 rule_locs[cnt] = rule->location;
5951                 cnt++;
5952         }
5953
5954         spin_unlock_bh(&hdev->fd_rule_lock);
5955
5956         cmd->rule_cnt = cnt;
5957
5958         return 0;
5959 }
5960
5961 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5962                                      struct hclge_fd_rule_tuples *tuples)
5963 {
5964         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5965         tuples->ip_proto = fkeys->basic.ip_proto;
5966         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5967
5968         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5969                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5970                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5971         } else {
5972                 memcpy(tuples->src_ip,
5973                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5974                        sizeof(tuples->src_ip));
5975                 memcpy(tuples->dst_ip,
5976                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5977                        sizeof(tuples->dst_ip));
5978         }
5979 }
5980
5981 /* traverse all rules, check whether an existed rule has the same tuples */
5982 static struct hclge_fd_rule *
5983 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5984                           const struct hclge_fd_rule_tuples *tuples)
5985 {
5986         struct hclge_fd_rule *rule = NULL;
5987         struct hlist_node *node;
5988
5989         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5990                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5991                         return rule;
5992         }
5993
5994         return NULL;
5995 }
5996
5997 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5998                                      struct hclge_fd_rule *rule)
5999 {
6000         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6001                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6002                              BIT(INNER_SRC_PORT);
6003         rule->action = 0;
6004         rule->vf_id = 0;
6005         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6006         if (tuples->ether_proto == ETH_P_IP) {
6007                 if (tuples->ip_proto == IPPROTO_TCP)
6008                         rule->flow_type = TCP_V4_FLOW;
6009                 else
6010                         rule->flow_type = UDP_V4_FLOW;
6011         } else {
6012                 if (tuples->ip_proto == IPPROTO_TCP)
6013                         rule->flow_type = TCP_V6_FLOW;
6014                 else
6015                         rule->flow_type = UDP_V6_FLOW;
6016         }
6017         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6018         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6019 }
6020
6021 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6022                                       u16 flow_id, struct flow_keys *fkeys)
6023 {
6024         struct hclge_vport *vport = hclge_get_vport(handle);
6025         struct hclge_fd_rule_tuples new_tuples;
6026         struct hclge_dev *hdev = vport->back;
6027         struct hclge_fd_rule *rule;
6028         u16 tmp_queue_id;
6029         u16 bit_id;
6030         int ret;
6031
6032         if (!hnae3_dev_fd_supported(hdev))
6033                 return -EOPNOTSUPP;
6034
6035         memset(&new_tuples, 0, sizeof(new_tuples));
6036         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6037
6038         spin_lock_bh(&hdev->fd_rule_lock);
6039
6040         /* when there is already fd rule existed add by user,
6041          * arfs should not work
6042          */
6043         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6044                 spin_unlock_bh(&hdev->fd_rule_lock);
6045
6046                 return -EOPNOTSUPP;
6047         }
6048
6049         /* check is there flow director filter existed for this flow,
6050          * if not, create a new filter for it;
6051          * if filter exist with different queue id, modify the filter;
6052          * if filter exist with same queue id, do nothing
6053          */
6054         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6055         if (!rule) {
6056                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6057                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6058                         spin_unlock_bh(&hdev->fd_rule_lock);
6059
6060                         return -ENOSPC;
6061                 }
6062
6063                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6064                 if (!rule) {
6065                         spin_unlock_bh(&hdev->fd_rule_lock);
6066
6067                         return -ENOMEM;
6068                 }
6069
6070                 set_bit(bit_id, hdev->fd_bmap);
6071                 rule->location = bit_id;
6072                 rule->flow_id = flow_id;
6073                 rule->queue_id = queue_id;
6074                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6075                 ret = hclge_fd_config_rule(hdev, rule);
6076
6077                 spin_unlock_bh(&hdev->fd_rule_lock);
6078
6079                 if (ret)
6080                         return ret;
6081
6082                 return rule->location;
6083         }
6084
6085         spin_unlock_bh(&hdev->fd_rule_lock);
6086
6087         if (rule->queue_id == queue_id)
6088                 return rule->location;
6089
6090         tmp_queue_id = rule->queue_id;
6091         rule->queue_id = queue_id;
6092         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6093         if (ret) {
6094                 rule->queue_id = tmp_queue_id;
6095                 return ret;
6096         }
6097
6098         return rule->location;
6099 }
6100
6101 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6102 {
6103 #ifdef CONFIG_RFS_ACCEL
6104         struct hnae3_handle *handle = &hdev->vport[0].nic;
6105         struct hclge_fd_rule *rule;
6106         struct hlist_node *node;
6107         HLIST_HEAD(del_list);
6108
6109         spin_lock_bh(&hdev->fd_rule_lock);
6110         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6111                 spin_unlock_bh(&hdev->fd_rule_lock);
6112                 return;
6113         }
6114         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6115                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6116                                         rule->flow_id, rule->location)) {
6117                         hlist_del_init(&rule->rule_node);
6118                         hlist_add_head(&rule->rule_node, &del_list);
6119                         hdev->hclge_fd_rule_num--;
6120                         clear_bit(rule->location, hdev->fd_bmap);
6121                 }
6122         }
6123         spin_unlock_bh(&hdev->fd_rule_lock);
6124
6125         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6126                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6127                                      rule->location, NULL, false);
6128                 kfree(rule);
6129         }
6130 #endif
6131 }
6132
6133 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6134 {
6135 #ifdef CONFIG_RFS_ACCEL
6136         struct hclge_vport *vport = hclge_get_vport(handle);
6137         struct hclge_dev *hdev = vport->back;
6138
6139         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6140                 hclge_del_all_fd_entries(handle, true);
6141 #endif
6142 }
6143
6144 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6145 {
6146         struct hclge_vport *vport = hclge_get_vport(handle);
6147         struct hclge_dev *hdev = vport->back;
6148
6149         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6150                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6151 }
6152
6153 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6154 {
6155         struct hclge_vport *vport = hclge_get_vport(handle);
6156         struct hclge_dev *hdev = vport->back;
6157
6158         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6159 }
6160
6161 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6162 {
6163         struct hclge_vport *vport = hclge_get_vport(handle);
6164         struct hclge_dev *hdev = vport->back;
6165
6166         return hdev->rst_stats.hw_reset_done_cnt;
6167 }
6168
6169 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6170 {
6171         struct hclge_vport *vport = hclge_get_vport(handle);
6172         struct hclge_dev *hdev = vport->back;
6173         bool clear;
6174
6175         hdev->fd_en = enable;
6176         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
6177         if (!enable)
6178                 hclge_del_all_fd_entries(handle, clear);
6179         else
6180                 hclge_restore_fd_entries(handle);
6181 }
6182
6183 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6184 {
6185         struct hclge_desc desc;
6186         struct hclge_config_mac_mode_cmd *req =
6187                 (struct hclge_config_mac_mode_cmd *)desc.data;
6188         u32 loop_en = 0;
6189         int ret;
6190
6191         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6192
6193         if (enable) {
6194                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6195                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6196                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6197                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6198                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6199                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6200                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6201                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6202                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6203                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6204         }
6205
6206         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6207
6208         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6209         if (ret)
6210                 dev_err(&hdev->pdev->dev,
6211                         "mac enable fail, ret =%d.\n", ret);
6212 }
6213
6214 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6215                                      u8 switch_param, u8 param_mask)
6216 {
6217         struct hclge_mac_vlan_switch_cmd *req;
6218         struct hclge_desc desc;
6219         u32 func_id;
6220         int ret;
6221
6222         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6223         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6224         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6225                                    false);
6226         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6227         req->func_id = cpu_to_le32(func_id);
6228         req->switch_param = switch_param;
6229         req->param_mask = param_mask;
6230
6231         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6232         if (ret)
6233                 dev_err(&hdev->pdev->dev,
6234                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6235         return ret;
6236 }
6237
6238 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6239                                        int link_ret)
6240 {
6241 #define HCLGE_PHY_LINK_STATUS_NUM  200
6242
6243         struct phy_device *phydev = hdev->hw.mac.phydev;
6244         int i = 0;
6245         int ret;
6246
6247         do {
6248                 ret = phy_read_status(phydev);
6249                 if (ret) {
6250                         dev_err(&hdev->pdev->dev,
6251                                 "phy update link status fail, ret = %d\n", ret);
6252                         return;
6253                 }
6254
6255                 if (phydev->link == link_ret)
6256                         break;
6257
6258                 msleep(HCLGE_LINK_STATUS_MS);
6259         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6260 }
6261
6262 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6263 {
6264 #define HCLGE_MAC_LINK_STATUS_NUM  100
6265
6266         int i = 0;
6267         int ret;
6268
6269         do {
6270                 ret = hclge_get_mac_link_status(hdev);
6271                 if (ret < 0)
6272                         return ret;
6273                 else if (ret == link_ret)
6274                         return 0;
6275
6276                 msleep(HCLGE_LINK_STATUS_MS);
6277         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6278         return -EBUSY;
6279 }
6280
6281 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6282                                           bool is_phy)
6283 {
6284 #define HCLGE_LINK_STATUS_DOWN 0
6285 #define HCLGE_LINK_STATUS_UP   1
6286
6287         int link_ret;
6288
6289         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6290
6291         if (is_phy)
6292                 hclge_phy_link_status_wait(hdev, link_ret);
6293
6294         return hclge_mac_link_status_wait(hdev, link_ret);
6295 }
6296
6297 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6298 {
6299         struct hclge_config_mac_mode_cmd *req;
6300         struct hclge_desc desc;
6301         u32 loop_en;
6302         int ret;
6303
6304         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6305         /* 1 Read out the MAC mode config at first */
6306         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6307         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6308         if (ret) {
6309                 dev_err(&hdev->pdev->dev,
6310                         "mac loopback get fail, ret =%d.\n", ret);
6311                 return ret;
6312         }
6313
6314         /* 2 Then setup the loopback flag */
6315         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6316         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6317         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6318         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6319
6320         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6321
6322         /* 3 Config mac work mode with loopback flag
6323          * and its original configure parameters
6324          */
6325         hclge_cmd_reuse_desc(&desc, false);
6326         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6327         if (ret)
6328                 dev_err(&hdev->pdev->dev,
6329                         "mac loopback set fail, ret =%d.\n", ret);
6330         return ret;
6331 }
6332
6333 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6334                                      enum hnae3_loop loop_mode)
6335 {
6336 #define HCLGE_SERDES_RETRY_MS   10
6337 #define HCLGE_SERDES_RETRY_NUM  100
6338
6339         struct hclge_serdes_lb_cmd *req;
6340         struct hclge_desc desc;
6341         int ret, i = 0;
6342         u8 loop_mode_b;
6343
6344         req = (struct hclge_serdes_lb_cmd *)desc.data;
6345         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6346
6347         switch (loop_mode) {
6348         case HNAE3_LOOP_SERIAL_SERDES:
6349                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6350                 break;
6351         case HNAE3_LOOP_PARALLEL_SERDES:
6352                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6353                 break;
6354         default:
6355                 dev_err(&hdev->pdev->dev,
6356                         "unsupported serdes loopback mode %d\n", loop_mode);
6357                 return -ENOTSUPP;
6358         }
6359
6360         if (en) {
6361                 req->enable = loop_mode_b;
6362                 req->mask = loop_mode_b;
6363         } else {
6364                 req->mask = loop_mode_b;
6365         }
6366
6367         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6368         if (ret) {
6369                 dev_err(&hdev->pdev->dev,
6370                         "serdes loopback set fail, ret = %d\n", ret);
6371                 return ret;
6372         }
6373
6374         do {
6375                 msleep(HCLGE_SERDES_RETRY_MS);
6376                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6377                                            true);
6378                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6379                 if (ret) {
6380                         dev_err(&hdev->pdev->dev,
6381                                 "serdes loopback get, ret = %d\n", ret);
6382                         return ret;
6383                 }
6384         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6385                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6386
6387         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6388                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6389                 return -EBUSY;
6390         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6391                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6392                 return -EIO;
6393         }
6394
6395         hclge_cfg_mac_mode(hdev, en);
6396
6397         ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6398         if (ret)
6399                 dev_err(&hdev->pdev->dev,
6400                         "serdes loopback config mac mode timeout\n");
6401
6402         return ret;
6403 }
6404
6405 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6406                                      struct phy_device *phydev)
6407 {
6408         int ret;
6409
6410         if (!phydev->suspended) {
6411                 ret = phy_suspend(phydev);
6412                 if (ret)
6413                         return ret;
6414         }
6415
6416         ret = phy_resume(phydev);
6417         if (ret)
6418                 return ret;
6419
6420         return phy_loopback(phydev, true);
6421 }
6422
6423 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6424                                       struct phy_device *phydev)
6425 {
6426         int ret;
6427
6428         ret = phy_loopback(phydev, false);
6429         if (ret)
6430                 return ret;
6431
6432         return phy_suspend(phydev);
6433 }
6434
6435 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6436 {
6437         struct phy_device *phydev = hdev->hw.mac.phydev;
6438         int ret;
6439
6440         if (!phydev)
6441                 return -ENOTSUPP;
6442
6443         if (en)
6444                 ret = hclge_enable_phy_loopback(hdev, phydev);
6445         else
6446                 ret = hclge_disable_phy_loopback(hdev, phydev);
6447         if (ret) {
6448                 dev_err(&hdev->pdev->dev,
6449                         "set phy loopback fail, ret = %d\n", ret);
6450                 return ret;
6451         }
6452
6453         hclge_cfg_mac_mode(hdev, en);
6454
6455         ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6456         if (ret)
6457                 dev_err(&hdev->pdev->dev,
6458                         "phy loopback config mac mode timeout\n");
6459
6460         return ret;
6461 }
6462
6463 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6464                             int stream_id, bool enable)
6465 {
6466         struct hclge_desc desc;
6467         struct hclge_cfg_com_tqp_queue_cmd *req =
6468                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6469         int ret;
6470
6471         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6472         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6473         req->stream_id = cpu_to_le16(stream_id);
6474         if (enable)
6475                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6476
6477         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6478         if (ret)
6479                 dev_err(&hdev->pdev->dev,
6480                         "Tqp enable fail, status =%d.\n", ret);
6481         return ret;
6482 }
6483
6484 static int hclge_set_loopback(struct hnae3_handle *handle,
6485                               enum hnae3_loop loop_mode, bool en)
6486 {
6487         struct hclge_vport *vport = hclge_get_vport(handle);
6488         struct hnae3_knic_private_info *kinfo;
6489         struct hclge_dev *hdev = vport->back;
6490         int i, ret;
6491
6492         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6493          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6494          * the same, the packets are looped back in the SSU. If SSU loopback
6495          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6496          */
6497         if (hdev->pdev->revision >= 0x21) {
6498                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6499
6500                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6501                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6502                 if (ret)
6503                         return ret;
6504         }
6505
6506         switch (loop_mode) {
6507         case HNAE3_LOOP_APP:
6508                 ret = hclge_set_app_loopback(hdev, en);
6509                 break;
6510         case HNAE3_LOOP_SERIAL_SERDES:
6511         case HNAE3_LOOP_PARALLEL_SERDES:
6512                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6513                 break;
6514         case HNAE3_LOOP_PHY:
6515                 ret = hclge_set_phy_loopback(hdev, en);
6516                 break;
6517         default:
6518                 ret = -ENOTSUPP;
6519                 dev_err(&hdev->pdev->dev,
6520                         "loop_mode %d is not supported\n", loop_mode);
6521                 break;
6522         }
6523
6524         if (ret)
6525                 return ret;
6526
6527         kinfo = &vport->nic.kinfo;
6528         for (i = 0; i < kinfo->num_tqps; i++) {
6529                 ret = hclge_tqp_enable(hdev, i, 0, en);
6530                 if (ret)
6531                         return ret;
6532         }
6533
6534         return 0;
6535 }
6536
6537 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6538 {
6539         struct hclge_vport *vport = hclge_get_vport(handle);
6540         struct hnae3_knic_private_info *kinfo;
6541         struct hnae3_queue *queue;
6542         struct hclge_tqp *tqp;
6543         int i;
6544
6545         kinfo = &vport->nic.kinfo;
6546         for (i = 0; i < kinfo->num_tqps; i++) {
6547                 queue = handle->kinfo.tqp[i];
6548                 tqp = container_of(queue, struct hclge_tqp, q);
6549                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6550         }
6551 }
6552
6553 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6554 {
6555         struct hclge_vport *vport = hclge_get_vport(handle);
6556         struct hclge_dev *hdev = vport->back;
6557
6558         if (enable) {
6559                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6560         } else {
6561                 /* Set the DOWN flag here to disable the service to be
6562                  * scheduled again
6563                  */
6564                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6565                 cancel_delayed_work_sync(&hdev->service_task);
6566                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6567         }
6568 }
6569
6570 static int hclge_ae_start(struct hnae3_handle *handle)
6571 {
6572         struct hclge_vport *vport = hclge_get_vport(handle);
6573         struct hclge_dev *hdev = vport->back;
6574
6575         /* mac enable */
6576         hclge_cfg_mac_mode(hdev, true);
6577         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6578         hdev->hw.mac.link = 0;
6579
6580         /* reset tqp stats */
6581         hclge_reset_tqp_stats(handle);
6582
6583         hclge_mac_start_phy(hdev);
6584
6585         return 0;
6586 }
6587
6588 static void hclge_ae_stop(struct hnae3_handle *handle)
6589 {
6590         struct hclge_vport *vport = hclge_get_vport(handle);
6591         struct hclge_dev *hdev = vport->back;
6592         int i;
6593
6594         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6595
6596         hclge_clear_arfs_rules(handle);
6597
6598         /* If it is not PF reset, the firmware will disable the MAC,
6599          * so it only need to stop phy here.
6600          */
6601         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6602             hdev->reset_type != HNAE3_FUNC_RESET) {
6603                 hclge_mac_stop_phy(hdev);
6604                 hclge_update_link_status(hdev);
6605                 return;
6606         }
6607
6608         for (i = 0; i < handle->kinfo.num_tqps; i++)
6609                 hclge_reset_tqp(handle, i);
6610
6611         hclge_config_mac_tnl_int(hdev, false);
6612
6613         /* Mac disable */
6614         hclge_cfg_mac_mode(hdev, false);
6615
6616         hclge_mac_stop_phy(hdev);
6617
6618         /* reset tqp stats */
6619         hclge_reset_tqp_stats(handle);
6620         hclge_update_link_status(hdev);
6621 }
6622
6623 int hclge_vport_start(struct hclge_vport *vport)
6624 {
6625         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6626         vport->last_active_jiffies = jiffies;
6627         return 0;
6628 }
6629
6630 void hclge_vport_stop(struct hclge_vport *vport)
6631 {
6632         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6633 }
6634
6635 static int hclge_client_start(struct hnae3_handle *handle)
6636 {
6637         struct hclge_vport *vport = hclge_get_vport(handle);
6638
6639         return hclge_vport_start(vport);
6640 }
6641
6642 static void hclge_client_stop(struct hnae3_handle *handle)
6643 {
6644         struct hclge_vport *vport = hclge_get_vport(handle);
6645
6646         hclge_vport_stop(vport);
6647 }
6648
6649 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6650                                          u16 cmdq_resp, u8  resp_code,
6651                                          enum hclge_mac_vlan_tbl_opcode op)
6652 {
6653         struct hclge_dev *hdev = vport->back;
6654
6655         if (cmdq_resp) {
6656                 dev_err(&hdev->pdev->dev,
6657                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6658                         cmdq_resp);
6659                 return -EIO;
6660         }
6661
6662         if (op == HCLGE_MAC_VLAN_ADD) {
6663                 if ((!resp_code) || (resp_code == 1)) {
6664                         return 0;
6665                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6666                         dev_err(&hdev->pdev->dev,
6667                                 "add mac addr failed for uc_overflow.\n");
6668                         return -ENOSPC;
6669                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6670                         dev_err(&hdev->pdev->dev,
6671                                 "add mac addr failed for mc_overflow.\n");
6672                         return -ENOSPC;
6673                 }
6674
6675                 dev_err(&hdev->pdev->dev,
6676                         "add mac addr failed for undefined, code=%u.\n",
6677                         resp_code);
6678                 return -EIO;
6679         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6680                 if (!resp_code) {
6681                         return 0;
6682                 } else if (resp_code == 1) {
6683                         dev_dbg(&hdev->pdev->dev,
6684                                 "remove mac addr failed for miss.\n");
6685                         return -ENOENT;
6686                 }
6687
6688                 dev_err(&hdev->pdev->dev,
6689                         "remove mac addr failed for undefined, code=%u.\n",
6690                         resp_code);
6691                 return -EIO;
6692         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6693                 if (!resp_code) {
6694                         return 0;
6695                 } else if (resp_code == 1) {
6696                         dev_dbg(&hdev->pdev->dev,
6697                                 "lookup mac addr failed for miss.\n");
6698                         return -ENOENT;
6699                 }
6700
6701                 dev_err(&hdev->pdev->dev,
6702                         "lookup mac addr failed for undefined, code=%u.\n",
6703                         resp_code);
6704                 return -EIO;
6705         }
6706
6707         dev_err(&hdev->pdev->dev,
6708                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6709
6710         return -EINVAL;
6711 }
6712
6713 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6714 {
6715 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6716
6717         unsigned int word_num;
6718         unsigned int bit_num;
6719
6720         if (vfid > 255 || vfid < 0)
6721                 return -EIO;
6722
6723         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6724                 word_num = vfid / 32;
6725                 bit_num  = vfid % 32;
6726                 if (clr)
6727                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6728                 else
6729                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6730         } else {
6731                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6732                 bit_num  = vfid % 32;
6733                 if (clr)
6734                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6735                 else
6736                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6737         }
6738
6739         return 0;
6740 }
6741
6742 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6743 {
6744 #define HCLGE_DESC_NUMBER 3
6745 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6746         int i, j;
6747
6748         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6749                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6750                         if (desc[i].data[j])
6751                                 return false;
6752
6753         return true;
6754 }
6755
6756 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6757                                    const u8 *addr, bool is_mc)
6758 {
6759         const unsigned char *mac_addr = addr;
6760         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6761                        (mac_addr[0]) | (mac_addr[1] << 8);
6762         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6763
6764         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6765         if (is_mc) {
6766                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6767                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6768         }
6769
6770         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6771         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6772 }
6773
6774 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6775                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6776 {
6777         struct hclge_dev *hdev = vport->back;
6778         struct hclge_desc desc;
6779         u8 resp_code;
6780         u16 retval;
6781         int ret;
6782
6783         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6784
6785         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6786
6787         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6788         if (ret) {
6789                 dev_err(&hdev->pdev->dev,
6790                         "del mac addr failed for cmd_send, ret =%d.\n",
6791                         ret);
6792                 return ret;
6793         }
6794         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6795         retval = le16_to_cpu(desc.retval);
6796
6797         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6798                                              HCLGE_MAC_VLAN_REMOVE);
6799 }
6800
6801 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6802                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6803                                      struct hclge_desc *desc,
6804                                      bool is_mc)
6805 {
6806         struct hclge_dev *hdev = vport->back;
6807         u8 resp_code;
6808         u16 retval;
6809         int ret;
6810
6811         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6812         if (is_mc) {
6813                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6814                 memcpy(desc[0].data,
6815                        req,
6816                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6817                 hclge_cmd_setup_basic_desc(&desc[1],
6818                                            HCLGE_OPC_MAC_VLAN_ADD,
6819                                            true);
6820                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6821                 hclge_cmd_setup_basic_desc(&desc[2],
6822                                            HCLGE_OPC_MAC_VLAN_ADD,
6823                                            true);
6824                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6825         } else {
6826                 memcpy(desc[0].data,
6827                        req,
6828                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6829                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6830         }
6831         if (ret) {
6832                 dev_err(&hdev->pdev->dev,
6833                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6834                         ret);
6835                 return ret;
6836         }
6837         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6838         retval = le16_to_cpu(desc[0].retval);
6839
6840         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6841                                              HCLGE_MAC_VLAN_LKUP);
6842 }
6843
6844 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6845                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6846                                   struct hclge_desc *mc_desc)
6847 {
6848         struct hclge_dev *hdev = vport->back;
6849         int cfg_status;
6850         u8 resp_code;
6851         u16 retval;
6852         int ret;
6853
6854         if (!mc_desc) {
6855                 struct hclge_desc desc;
6856
6857                 hclge_cmd_setup_basic_desc(&desc,
6858                                            HCLGE_OPC_MAC_VLAN_ADD,
6859                                            false);
6860                 memcpy(desc.data, req,
6861                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6862                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6863                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6864                 retval = le16_to_cpu(desc.retval);
6865
6866                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6867                                                            resp_code,
6868                                                            HCLGE_MAC_VLAN_ADD);
6869         } else {
6870                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6871                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6872                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6873                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6874                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6875                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6876                 memcpy(mc_desc[0].data, req,
6877                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6878                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6879                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6880                 retval = le16_to_cpu(mc_desc[0].retval);
6881
6882                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6883                                                            resp_code,
6884                                                            HCLGE_MAC_VLAN_ADD);
6885         }
6886
6887         if (ret) {
6888                 dev_err(&hdev->pdev->dev,
6889                         "add mac addr failed for cmd_send, ret =%d.\n",
6890                         ret);
6891                 return ret;
6892         }
6893
6894         return cfg_status;
6895 }
6896
6897 static int hclge_init_umv_space(struct hclge_dev *hdev)
6898 {
6899         u16 allocated_size = 0;
6900         int ret;
6901
6902         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6903                                   true);
6904         if (ret)
6905                 return ret;
6906
6907         if (allocated_size < hdev->wanted_umv_size)
6908                 dev_warn(&hdev->pdev->dev,
6909                          "Alloc umv space failed, want %d, get %d\n",
6910                          hdev->wanted_umv_size, allocated_size);
6911
6912         mutex_init(&hdev->umv_mutex);
6913         hdev->max_umv_size = allocated_size;
6914         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6915          * preserve some unicast mac vlan table entries shared by pf
6916          * and its vfs.
6917          */
6918         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6919         hdev->share_umv_size = hdev->priv_umv_size +
6920                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6921
6922         return 0;
6923 }
6924
6925 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6926 {
6927         int ret;
6928
6929         if (hdev->max_umv_size > 0) {
6930                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6931                                           false);
6932                 if (ret)
6933                         return ret;
6934                 hdev->max_umv_size = 0;
6935         }
6936         mutex_destroy(&hdev->umv_mutex);
6937
6938         return 0;
6939 }
6940
6941 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6942                                u16 *allocated_size, bool is_alloc)
6943 {
6944         struct hclge_umv_spc_alc_cmd *req;
6945         struct hclge_desc desc;
6946         int ret;
6947
6948         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6949         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6950         if (!is_alloc)
6951                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6952
6953         req->space_size = cpu_to_le32(space_size);
6954
6955         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6956         if (ret) {
6957                 dev_err(&hdev->pdev->dev,
6958                         "%s umv space failed for cmd_send, ret =%d\n",
6959                         is_alloc ? "allocate" : "free", ret);
6960                 return ret;
6961         }
6962
6963         if (is_alloc && allocated_size)
6964                 *allocated_size = le32_to_cpu(desc.data[1]);
6965
6966         return 0;
6967 }
6968
6969 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6970 {
6971         struct hclge_vport *vport;
6972         int i;
6973
6974         for (i = 0; i < hdev->num_alloc_vport; i++) {
6975                 vport = &hdev->vport[i];
6976                 vport->used_umv_num = 0;
6977         }
6978
6979         mutex_lock(&hdev->umv_mutex);
6980         hdev->share_umv_size = hdev->priv_umv_size +
6981                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6982         mutex_unlock(&hdev->umv_mutex);
6983 }
6984
6985 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6986 {
6987         struct hclge_dev *hdev = vport->back;
6988         bool is_full;
6989
6990         mutex_lock(&hdev->umv_mutex);
6991         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6992                    hdev->share_umv_size == 0);
6993         mutex_unlock(&hdev->umv_mutex);
6994
6995         return is_full;
6996 }
6997
6998 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6999 {
7000         struct hclge_dev *hdev = vport->back;
7001
7002         mutex_lock(&hdev->umv_mutex);
7003         if (is_free) {
7004                 if (vport->used_umv_num > hdev->priv_umv_size)
7005                         hdev->share_umv_size++;
7006
7007                 if (vport->used_umv_num > 0)
7008                         vport->used_umv_num--;
7009         } else {
7010                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7011                     hdev->share_umv_size > 0)
7012                         hdev->share_umv_size--;
7013                 vport->used_umv_num++;
7014         }
7015         mutex_unlock(&hdev->umv_mutex);
7016 }
7017
7018 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7019                              const unsigned char *addr)
7020 {
7021         struct hclge_vport *vport = hclge_get_vport(handle);
7022
7023         return hclge_add_uc_addr_common(vport, addr);
7024 }
7025
7026 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7027                              const unsigned char *addr)
7028 {
7029         struct hclge_dev *hdev = vport->back;
7030         struct hclge_mac_vlan_tbl_entry_cmd req;
7031         struct hclge_desc desc;
7032         u16 egress_port = 0;
7033         int ret;
7034
7035         /* mac addr check */
7036         if (is_zero_ether_addr(addr) ||
7037             is_broadcast_ether_addr(addr) ||
7038             is_multicast_ether_addr(addr)) {
7039                 dev_err(&hdev->pdev->dev,
7040                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7041                          addr, is_zero_ether_addr(addr),
7042                          is_broadcast_ether_addr(addr),
7043                          is_multicast_ether_addr(addr));
7044                 return -EINVAL;
7045         }
7046
7047         memset(&req, 0, sizeof(req));
7048
7049         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7050                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7051
7052         req.egress_port = cpu_to_le16(egress_port);
7053
7054         hclge_prepare_mac_addr(&req, addr, false);
7055
7056         /* Lookup the mac address in the mac_vlan table, and add
7057          * it if the entry is inexistent. Repeated unicast entry
7058          * is not allowed in the mac vlan table.
7059          */
7060         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7061         if (ret == -ENOENT) {
7062                 if (!hclge_is_umv_space_full(vport)) {
7063                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7064                         if (!ret)
7065                                 hclge_update_umv_space(vport, false);
7066                         return ret;
7067                 }
7068
7069                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7070                         hdev->priv_umv_size);
7071
7072                 return -ENOSPC;
7073         }
7074
7075         /* check if we just hit the duplicate */
7076         if (!ret) {
7077                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
7078                          vport->vport_id, addr);
7079                 return 0;
7080         }
7081
7082         dev_err(&hdev->pdev->dev,
7083                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7084                 addr);
7085
7086         return ret;
7087 }
7088
7089 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7090                             const unsigned char *addr)
7091 {
7092         struct hclge_vport *vport = hclge_get_vport(handle);
7093
7094         return hclge_rm_uc_addr_common(vport, addr);
7095 }
7096
7097 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7098                             const unsigned char *addr)
7099 {
7100         struct hclge_dev *hdev = vport->back;
7101         struct hclge_mac_vlan_tbl_entry_cmd req;
7102         int ret;
7103
7104         /* mac addr check */
7105         if (is_zero_ether_addr(addr) ||
7106             is_broadcast_ether_addr(addr) ||
7107             is_multicast_ether_addr(addr)) {
7108                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7109                         addr);
7110                 return -EINVAL;
7111         }
7112
7113         memset(&req, 0, sizeof(req));
7114         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7115         hclge_prepare_mac_addr(&req, addr, false);
7116         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7117         if (!ret)
7118                 hclge_update_umv_space(vport, true);
7119
7120         return ret;
7121 }
7122
7123 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7124                              const unsigned char *addr)
7125 {
7126         struct hclge_vport *vport = hclge_get_vport(handle);
7127
7128         return hclge_add_mc_addr_common(vport, addr);
7129 }
7130
7131 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7132                              const unsigned char *addr)
7133 {
7134         struct hclge_dev *hdev = vport->back;
7135         struct hclge_mac_vlan_tbl_entry_cmd req;
7136         struct hclge_desc desc[3];
7137         int status;
7138
7139         /* mac addr check */
7140         if (!is_multicast_ether_addr(addr)) {
7141                 dev_err(&hdev->pdev->dev,
7142                         "Add mc mac err! invalid mac:%pM.\n",
7143                          addr);
7144                 return -EINVAL;
7145         }
7146         memset(&req, 0, sizeof(req));
7147         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7148         hclge_prepare_mac_addr(&req, addr, true);
7149         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7150         if (status) {
7151                 /* This mac addr do not exist, add new entry for it */
7152                 memset(desc[0].data, 0, sizeof(desc[0].data));
7153                 memset(desc[1].data, 0, sizeof(desc[0].data));
7154                 memset(desc[2].data, 0, sizeof(desc[0].data));
7155         }
7156         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7157         if (status)
7158                 return status;
7159         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7160
7161         if (status == -ENOSPC)
7162                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7163
7164         return status;
7165 }
7166
7167 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7168                             const unsigned char *addr)
7169 {
7170         struct hclge_vport *vport = hclge_get_vport(handle);
7171
7172         return hclge_rm_mc_addr_common(vport, addr);
7173 }
7174
7175 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7176                             const unsigned char *addr)
7177 {
7178         struct hclge_dev *hdev = vport->back;
7179         struct hclge_mac_vlan_tbl_entry_cmd req;
7180         enum hclge_cmd_status status;
7181         struct hclge_desc desc[3];
7182
7183         /* mac addr check */
7184         if (!is_multicast_ether_addr(addr)) {
7185                 dev_dbg(&hdev->pdev->dev,
7186                         "Remove mc mac err! invalid mac:%pM.\n",
7187                          addr);
7188                 return -EINVAL;
7189         }
7190
7191         memset(&req, 0, sizeof(req));
7192         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7193         hclge_prepare_mac_addr(&req, addr, true);
7194         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7195         if (!status) {
7196                 /* This mac addr exist, remove this handle's VFID for it */
7197                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7198                 if (status)
7199                         return status;
7200
7201                 if (hclge_is_all_function_id_zero(desc))
7202                         /* All the vfid is zero, so need to delete this entry */
7203                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7204                 else
7205                         /* Not all the vfid is zero, update the vfid */
7206                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7207
7208         } else {
7209                 /* Maybe this mac address is in mta table, but it cannot be
7210                  * deleted here because an entry of mta represents an address
7211                  * range rather than a specific address. the delete action to
7212                  * all entries will take effect in update_mta_status called by
7213                  * hns3_nic_set_rx_mode.
7214                  */
7215                 status = 0;
7216         }
7217
7218         return status;
7219 }
7220
7221 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7222                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7223 {
7224         struct hclge_vport_mac_addr_cfg *mac_cfg;
7225         struct list_head *list;
7226
7227         if (!vport->vport_id)
7228                 return;
7229
7230         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7231         if (!mac_cfg)
7232                 return;
7233
7234         mac_cfg->hd_tbl_status = true;
7235         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7236
7237         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7238                &vport->uc_mac_list : &vport->mc_mac_list;
7239
7240         list_add_tail(&mac_cfg->node, list);
7241 }
7242
7243 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7244                               bool is_write_tbl,
7245                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7246 {
7247         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7248         struct list_head *list;
7249         bool uc_flag, mc_flag;
7250
7251         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7252                &vport->uc_mac_list : &vport->mc_mac_list;
7253
7254         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7255         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7256
7257         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7258                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7259                         if (uc_flag && mac_cfg->hd_tbl_status)
7260                                 hclge_rm_uc_addr_common(vport, mac_addr);
7261
7262                         if (mc_flag && mac_cfg->hd_tbl_status)
7263                                 hclge_rm_mc_addr_common(vport, mac_addr);
7264
7265                         list_del(&mac_cfg->node);
7266                         kfree(mac_cfg);
7267                         break;
7268                 }
7269         }
7270 }
7271
7272 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7273                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7274 {
7275         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7276         struct list_head *list;
7277
7278         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7279                &vport->uc_mac_list : &vport->mc_mac_list;
7280
7281         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7282                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7283                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7284
7285                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7286                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7287
7288                 mac_cfg->hd_tbl_status = false;
7289                 if (is_del_list) {
7290                         list_del(&mac_cfg->node);
7291                         kfree(mac_cfg);
7292                 }
7293         }
7294 }
7295
7296 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7297 {
7298         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7299         struct hclge_vport *vport;
7300         int i;
7301
7302         mutex_lock(&hdev->vport_cfg_mutex);
7303         for (i = 0; i < hdev->num_alloc_vport; i++) {
7304                 vport = &hdev->vport[i];
7305                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7306                         list_del(&mac->node);
7307                         kfree(mac);
7308                 }
7309
7310                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7311                         list_del(&mac->node);
7312                         kfree(mac);
7313                 }
7314         }
7315         mutex_unlock(&hdev->vport_cfg_mutex);
7316 }
7317
7318 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7319                                               u16 cmdq_resp, u8 resp_code)
7320 {
7321 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7322 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7323 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7324 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7325
7326         int return_status;
7327
7328         if (cmdq_resp) {
7329                 dev_err(&hdev->pdev->dev,
7330                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7331                         cmdq_resp);
7332                 return -EIO;
7333         }
7334
7335         switch (resp_code) {
7336         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7337         case HCLGE_ETHERTYPE_ALREADY_ADD:
7338                 return_status = 0;
7339                 break;
7340         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7341                 dev_err(&hdev->pdev->dev,
7342                         "add mac ethertype failed for manager table overflow.\n");
7343                 return_status = -EIO;
7344                 break;
7345         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7346                 dev_err(&hdev->pdev->dev,
7347                         "add mac ethertype failed for key conflict.\n");
7348                 return_status = -EIO;
7349                 break;
7350         default:
7351                 dev_err(&hdev->pdev->dev,
7352                         "add mac ethertype failed for undefined, code=%d.\n",
7353                         resp_code);
7354                 return_status = -EIO;
7355         }
7356
7357         return return_status;
7358 }
7359
7360 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7361                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7362 {
7363         struct hclge_desc desc;
7364         u8 resp_code;
7365         u16 retval;
7366         int ret;
7367
7368         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7369         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7370
7371         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7372         if (ret) {
7373                 dev_err(&hdev->pdev->dev,
7374                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7375                         ret);
7376                 return ret;
7377         }
7378
7379         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7380         retval = le16_to_cpu(desc.retval);
7381
7382         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7383 }
7384
7385 static int init_mgr_tbl(struct hclge_dev *hdev)
7386 {
7387         int ret;
7388         int i;
7389
7390         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7391                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7392                 if (ret) {
7393                         dev_err(&hdev->pdev->dev,
7394                                 "add mac ethertype failed, ret =%d.\n",
7395                                 ret);
7396                         return ret;
7397                 }
7398         }
7399
7400         return 0;
7401 }
7402
7403 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7404 {
7405         struct hclge_vport *vport = hclge_get_vport(handle);
7406         struct hclge_dev *hdev = vport->back;
7407
7408         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7409 }
7410
7411 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7412                               bool is_first)
7413 {
7414         const unsigned char *new_addr = (const unsigned char *)p;
7415         struct hclge_vport *vport = hclge_get_vport(handle);
7416         struct hclge_dev *hdev = vport->back;
7417         int ret;
7418
7419         /* mac addr check */
7420         if (is_zero_ether_addr(new_addr) ||
7421             is_broadcast_ether_addr(new_addr) ||
7422             is_multicast_ether_addr(new_addr)) {
7423                 dev_err(&hdev->pdev->dev,
7424                         "Change uc mac err! invalid mac:%pM.\n",
7425                          new_addr);
7426                 return -EINVAL;
7427         }
7428
7429         if ((!is_first || is_kdump_kernel()) &&
7430             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7431                 dev_warn(&hdev->pdev->dev,
7432                          "remove old uc mac address fail.\n");
7433
7434         ret = hclge_add_uc_addr(handle, new_addr);
7435         if (ret) {
7436                 dev_err(&hdev->pdev->dev,
7437                         "add uc mac address fail, ret =%d.\n",
7438                         ret);
7439
7440                 if (!is_first &&
7441                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7442                         dev_err(&hdev->pdev->dev,
7443                                 "restore uc mac address fail.\n");
7444
7445                 return -EIO;
7446         }
7447
7448         ret = hclge_pause_addr_cfg(hdev, new_addr);
7449         if (ret) {
7450                 dev_err(&hdev->pdev->dev,
7451                         "configure mac pause address fail, ret =%d.\n",
7452                         ret);
7453                 return -EIO;
7454         }
7455
7456         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7457
7458         return 0;
7459 }
7460
7461 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7462                           int cmd)
7463 {
7464         struct hclge_vport *vport = hclge_get_vport(handle);
7465         struct hclge_dev *hdev = vport->back;
7466
7467         if (!hdev->hw.mac.phydev)
7468                 return -EOPNOTSUPP;
7469
7470         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7471 }
7472
7473 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7474                                       u8 fe_type, bool filter_en, u8 vf_id)
7475 {
7476         struct hclge_vlan_filter_ctrl_cmd *req;
7477         struct hclge_desc desc;
7478         int ret;
7479
7480         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7481
7482         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7483         req->vlan_type = vlan_type;
7484         req->vlan_fe = filter_en ? fe_type : 0;
7485         req->vf_id = vf_id;
7486
7487         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7488         if (ret)
7489                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7490                         ret);
7491
7492         return ret;
7493 }
7494
7495 #define HCLGE_FILTER_TYPE_VF            0
7496 #define HCLGE_FILTER_TYPE_PORT          1
7497 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7498 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7499 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7500 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7501 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7502 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7503                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7504 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7505                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7506
7507 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7508 {
7509         struct hclge_vport *vport = hclge_get_vport(handle);
7510         struct hclge_dev *hdev = vport->back;
7511
7512         if (hdev->pdev->revision >= 0x21) {
7513                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7514                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7515                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7516                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7517         } else {
7518                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7519                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7520                                            0);
7521         }
7522         if (enable)
7523                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7524         else
7525                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7526 }
7527
7528 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7529                                     bool is_kill, u16 vlan,
7530                                     __be16 proto)
7531 {
7532 #define HCLGE_MAX_VF_BYTES  16
7533         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7534         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7535         struct hclge_desc desc[2];
7536         u8 vf_byte_val;
7537         u8 vf_byte_off;
7538         int ret;
7539
7540         /* if vf vlan table is full, firmware will close vf vlan filter, it
7541          * is unable and unnecessary to add new vlan id to vf vlan filter
7542          */
7543         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7544                 return 0;
7545
7546         hclge_cmd_setup_basic_desc(&desc[0],
7547                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7548         hclge_cmd_setup_basic_desc(&desc[1],
7549                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7550
7551         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7552
7553         vf_byte_off = vfid / 8;
7554         vf_byte_val = 1 << (vfid % 8);
7555
7556         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7557         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7558
7559         req0->vlan_id  = cpu_to_le16(vlan);
7560         req0->vlan_cfg = is_kill;
7561
7562         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7563                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7564         else
7565                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7566
7567         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7568         if (ret) {
7569                 dev_err(&hdev->pdev->dev,
7570                         "Send vf vlan command fail, ret =%d.\n",
7571                         ret);
7572                 return ret;
7573         }
7574
7575         if (!is_kill) {
7576 #define HCLGE_VF_VLAN_NO_ENTRY  2
7577                 if (!req0->resp_code || req0->resp_code == 1)
7578                         return 0;
7579
7580                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7581                         set_bit(vfid, hdev->vf_vlan_full);
7582                         dev_warn(&hdev->pdev->dev,
7583                                  "vf vlan table is full, vf vlan filter is disabled\n");
7584                         return 0;
7585                 }
7586
7587                 dev_err(&hdev->pdev->dev,
7588                         "Add vf vlan filter fail, ret =%d.\n",
7589                         req0->resp_code);
7590         } else {
7591 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7592                 if (!req0->resp_code)
7593                         return 0;
7594
7595                 /* vf vlan filter is disabled when vf vlan table is full,
7596                  * then new vlan id will not be added into vf vlan table.
7597                  * Just return 0 without warning, avoid massive verbose
7598                  * print logs when unload.
7599                  */
7600                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7601                         return 0;
7602
7603                 dev_err(&hdev->pdev->dev,
7604                         "Kill vf vlan filter fail, ret =%d.\n",
7605                         req0->resp_code);
7606         }
7607
7608         return -EIO;
7609 }
7610
7611 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7612                                       u16 vlan_id, bool is_kill)
7613 {
7614         struct hclge_vlan_filter_pf_cfg_cmd *req;
7615         struct hclge_desc desc;
7616         u8 vlan_offset_byte_val;
7617         u8 vlan_offset_byte;
7618         u8 vlan_offset_160;
7619         int ret;
7620
7621         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7622
7623         vlan_offset_160 = vlan_id / 160;
7624         vlan_offset_byte = (vlan_id % 160) / 8;
7625         vlan_offset_byte_val = 1 << (vlan_id % 8);
7626
7627         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7628         req->vlan_offset = vlan_offset_160;
7629         req->vlan_cfg = is_kill;
7630         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7631
7632         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7633         if (ret)
7634                 dev_err(&hdev->pdev->dev,
7635                         "port vlan command, send fail, ret =%d.\n", ret);
7636         return ret;
7637 }
7638
7639 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7640                                     u16 vport_id, u16 vlan_id,
7641                                     bool is_kill)
7642 {
7643         u16 vport_idx, vport_num = 0;
7644         int ret;
7645
7646         if (is_kill && !vlan_id)
7647                 return 0;
7648
7649         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7650                                        proto);
7651         if (ret) {
7652                 dev_err(&hdev->pdev->dev,
7653                         "Set %d vport vlan filter config fail, ret =%d.\n",
7654                         vport_id, ret);
7655                 return ret;
7656         }
7657
7658         /* vlan 0 may be added twice when 8021q module is enabled */
7659         if (!is_kill && !vlan_id &&
7660             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7661                 return 0;
7662
7663         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7664                 dev_err(&hdev->pdev->dev,
7665                         "Add port vlan failed, vport %d is already in vlan %d\n",
7666                         vport_id, vlan_id);
7667                 return -EINVAL;
7668         }
7669
7670         if (is_kill &&
7671             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7672                 dev_err(&hdev->pdev->dev,
7673                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7674                         vport_id, vlan_id);
7675                 return -EINVAL;
7676         }
7677
7678         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7679                 vport_num++;
7680
7681         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7682                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7683                                                  is_kill);
7684
7685         return ret;
7686 }
7687
7688 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7689 {
7690         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7691         struct hclge_vport_vtag_tx_cfg_cmd *req;
7692         struct hclge_dev *hdev = vport->back;
7693         struct hclge_desc desc;
7694         u16 bmap_index;
7695         int status;
7696
7697         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7698
7699         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7700         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7701         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7702         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7703                       vcfg->accept_tag1 ? 1 : 0);
7704         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7705                       vcfg->accept_untag1 ? 1 : 0);
7706         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7707                       vcfg->accept_tag2 ? 1 : 0);
7708         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7709                       vcfg->accept_untag2 ? 1 : 0);
7710         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7711                       vcfg->insert_tag1_en ? 1 : 0);
7712         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7713                       vcfg->insert_tag2_en ? 1 : 0);
7714         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7715
7716         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7717         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7718                         HCLGE_VF_NUM_PER_BYTE;
7719         req->vf_bitmap[bmap_index] =
7720                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7721
7722         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7723         if (status)
7724                 dev_err(&hdev->pdev->dev,
7725                         "Send port txvlan cfg command fail, ret =%d\n",
7726                         status);
7727
7728         return status;
7729 }
7730
7731 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7732 {
7733         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7734         struct hclge_vport_vtag_rx_cfg_cmd *req;
7735         struct hclge_dev *hdev = vport->back;
7736         struct hclge_desc desc;
7737         u16 bmap_index;
7738         int status;
7739
7740         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7741
7742         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7743         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7744                       vcfg->strip_tag1_en ? 1 : 0);
7745         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7746                       vcfg->strip_tag2_en ? 1 : 0);
7747         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7748                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7749         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7750                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7751
7752         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7753         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7754                         HCLGE_VF_NUM_PER_BYTE;
7755         req->vf_bitmap[bmap_index] =
7756                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7757
7758         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7759         if (status)
7760                 dev_err(&hdev->pdev->dev,
7761                         "Send port rxvlan cfg command fail, ret =%d\n",
7762                         status);
7763
7764         return status;
7765 }
7766
7767 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7768                                   u16 port_base_vlan_state,
7769                                   u16 vlan_tag)
7770 {
7771         int ret;
7772
7773         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7774                 vport->txvlan_cfg.accept_tag1 = true;
7775                 vport->txvlan_cfg.insert_tag1_en = false;
7776                 vport->txvlan_cfg.default_tag1 = 0;
7777         } else {
7778                 vport->txvlan_cfg.accept_tag1 = false;
7779                 vport->txvlan_cfg.insert_tag1_en = true;
7780                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7781         }
7782
7783         vport->txvlan_cfg.accept_untag1 = true;
7784
7785         /* accept_tag2 and accept_untag2 are not supported on
7786          * pdev revision(0x20), new revision support them,
7787          * this two fields can not be configured by user.
7788          */
7789         vport->txvlan_cfg.accept_tag2 = true;
7790         vport->txvlan_cfg.accept_untag2 = true;
7791         vport->txvlan_cfg.insert_tag2_en = false;
7792         vport->txvlan_cfg.default_tag2 = 0;
7793
7794         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7795                 vport->rxvlan_cfg.strip_tag1_en = false;
7796                 vport->rxvlan_cfg.strip_tag2_en =
7797                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7798         } else {
7799                 vport->rxvlan_cfg.strip_tag1_en =
7800                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7801                 vport->rxvlan_cfg.strip_tag2_en = true;
7802         }
7803         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7804         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7805
7806         ret = hclge_set_vlan_tx_offload_cfg(vport);
7807         if (ret)
7808                 return ret;
7809
7810         return hclge_set_vlan_rx_offload_cfg(vport);
7811 }
7812
7813 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7814 {
7815         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7816         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7817         struct hclge_desc desc;
7818         int status;
7819
7820         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7821         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7822         rx_req->ot_fst_vlan_type =
7823                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7824         rx_req->ot_sec_vlan_type =
7825                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7826         rx_req->in_fst_vlan_type =
7827                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7828         rx_req->in_sec_vlan_type =
7829                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7830
7831         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7832         if (status) {
7833                 dev_err(&hdev->pdev->dev,
7834                         "Send rxvlan protocol type command fail, ret =%d\n",
7835                         status);
7836                 return status;
7837         }
7838
7839         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7840
7841         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7842         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7843         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7844
7845         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7846         if (status)
7847                 dev_err(&hdev->pdev->dev,
7848                         "Send txvlan protocol type command fail, ret =%d\n",
7849                         status);
7850
7851         return status;
7852 }
7853
7854 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7855 {
7856 #define HCLGE_DEF_VLAN_TYPE             0x8100
7857
7858         struct hnae3_handle *handle = &hdev->vport[0].nic;
7859         struct hclge_vport *vport;
7860         int ret;
7861         int i;
7862
7863         if (hdev->pdev->revision >= 0x21) {
7864                 /* for revision 0x21, vf vlan filter is per function */
7865                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7866                         vport = &hdev->vport[i];
7867                         ret = hclge_set_vlan_filter_ctrl(hdev,
7868                                                          HCLGE_FILTER_TYPE_VF,
7869                                                          HCLGE_FILTER_FE_EGRESS,
7870                                                          true,
7871                                                          vport->vport_id);
7872                         if (ret)
7873                                 return ret;
7874                 }
7875
7876                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7877                                                  HCLGE_FILTER_FE_INGRESS, true,
7878                                                  0);
7879                 if (ret)
7880                         return ret;
7881         } else {
7882                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7883                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7884                                                  true, 0);
7885                 if (ret)
7886                         return ret;
7887         }
7888
7889         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7890
7891         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7892         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7893         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7894         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7895         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7896         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7897
7898         ret = hclge_set_vlan_protocol_type(hdev);
7899         if (ret)
7900                 return ret;
7901
7902         for (i = 0; i < hdev->num_alloc_vport; i++) {
7903                 u16 vlan_tag;
7904
7905                 vport = &hdev->vport[i];
7906                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7907
7908                 ret = hclge_vlan_offload_cfg(vport,
7909                                              vport->port_base_vlan_cfg.state,
7910                                              vlan_tag);
7911                 if (ret)
7912                         return ret;
7913         }
7914
7915         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7916 }
7917
7918 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7919                                        bool writen_to_tbl)
7920 {
7921         struct hclge_vport_vlan_cfg *vlan;
7922
7923         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7924         if (!vlan)
7925                 return;
7926
7927         vlan->hd_tbl_status = writen_to_tbl;
7928         vlan->vlan_id = vlan_id;
7929
7930         list_add_tail(&vlan->node, &vport->vlan_list);
7931 }
7932
7933 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7934 {
7935         struct hclge_vport_vlan_cfg *vlan, *tmp;
7936         struct hclge_dev *hdev = vport->back;
7937         int ret;
7938
7939         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7940                 if (!vlan->hd_tbl_status) {
7941                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7942                                                        vport->vport_id,
7943                                                        vlan->vlan_id, false);
7944                         if (ret) {
7945                                 dev_err(&hdev->pdev->dev,
7946                                         "restore vport vlan list failed, ret=%d\n",
7947                                         ret);
7948                                 return ret;
7949                         }
7950                 }
7951                 vlan->hd_tbl_status = true;
7952         }
7953
7954         return 0;
7955 }
7956
7957 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7958                                       bool is_write_tbl)
7959 {
7960         struct hclge_vport_vlan_cfg *vlan, *tmp;
7961         struct hclge_dev *hdev = vport->back;
7962
7963         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7964                 if (vlan->vlan_id == vlan_id) {
7965                         if (is_write_tbl && vlan->hd_tbl_status)
7966                                 hclge_set_vlan_filter_hw(hdev,
7967                                                          htons(ETH_P_8021Q),
7968                                                          vport->vport_id,
7969                                                          vlan_id,
7970                                                          true);
7971
7972                         list_del(&vlan->node);
7973                         kfree(vlan);
7974                         break;
7975                 }
7976         }
7977 }
7978
7979 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7980 {
7981         struct hclge_vport_vlan_cfg *vlan, *tmp;
7982         struct hclge_dev *hdev = vport->back;
7983
7984         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7985                 if (vlan->hd_tbl_status)
7986                         hclge_set_vlan_filter_hw(hdev,
7987                                                  htons(ETH_P_8021Q),
7988                                                  vport->vport_id,
7989                                                  vlan->vlan_id,
7990                                                  true);
7991
7992                 vlan->hd_tbl_status = false;
7993                 if (is_del_list) {
7994                         list_del(&vlan->node);
7995                         kfree(vlan);
7996                 }
7997         }
7998 }
7999
8000 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8001 {
8002         struct hclge_vport_vlan_cfg *vlan, *tmp;
8003         struct hclge_vport *vport;
8004         int i;
8005
8006         mutex_lock(&hdev->vport_cfg_mutex);
8007         for (i = 0; i < hdev->num_alloc_vport; i++) {
8008                 vport = &hdev->vport[i];
8009                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8010                         list_del(&vlan->node);
8011                         kfree(vlan);
8012                 }
8013         }
8014         mutex_unlock(&hdev->vport_cfg_mutex);
8015 }
8016
8017 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8018 {
8019         struct hclge_vport *vport = hclge_get_vport(handle);
8020         struct hclge_vport_vlan_cfg *vlan, *tmp;
8021         struct hclge_dev *hdev = vport->back;
8022         u16 vlan_proto;
8023         u16 state, vlan_id;
8024         int i;
8025
8026         mutex_lock(&hdev->vport_cfg_mutex);
8027         for (i = 0; i < hdev->num_alloc_vport; i++) {
8028                 vport = &hdev->vport[i];
8029                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8030                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8031                 state = vport->port_base_vlan_cfg.state;
8032
8033                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8034                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8035                                                  vport->vport_id, vlan_id,
8036                                                  false);
8037                         continue;
8038                 }
8039
8040                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8041                         if (vlan->hd_tbl_status)
8042                                 hclge_set_vlan_filter_hw(hdev,
8043                                                          htons(ETH_P_8021Q),
8044                                                          vport->vport_id,
8045                                                          vlan->vlan_id,
8046                                                          false);
8047                 }
8048         }
8049
8050         mutex_unlock(&hdev->vport_cfg_mutex);
8051 }
8052
8053 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8054 {
8055         struct hclge_vport *vport = hclge_get_vport(handle);
8056
8057         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8058                 vport->rxvlan_cfg.strip_tag1_en = false;
8059                 vport->rxvlan_cfg.strip_tag2_en = enable;
8060         } else {
8061                 vport->rxvlan_cfg.strip_tag1_en = enable;
8062                 vport->rxvlan_cfg.strip_tag2_en = true;
8063         }
8064         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8065         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8066         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8067
8068         return hclge_set_vlan_rx_offload_cfg(vport);
8069 }
8070
8071 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8072                                             u16 port_base_vlan_state,
8073                                             struct hclge_vlan_info *new_info,
8074                                             struct hclge_vlan_info *old_info)
8075 {
8076         struct hclge_dev *hdev = vport->back;
8077         int ret;
8078
8079         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8080                 hclge_rm_vport_all_vlan_table(vport, false);
8081                 return hclge_set_vlan_filter_hw(hdev,
8082                                                  htons(new_info->vlan_proto),
8083                                                  vport->vport_id,
8084                                                  new_info->vlan_tag,
8085                                                  false);
8086         }
8087
8088         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8089                                        vport->vport_id, old_info->vlan_tag,
8090                                        true);
8091         if (ret)
8092                 return ret;
8093
8094         return hclge_add_vport_all_vlan_table(vport);
8095 }
8096
8097 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8098                                     struct hclge_vlan_info *vlan_info)
8099 {
8100         struct hnae3_handle *nic = &vport->nic;
8101         struct hclge_vlan_info *old_vlan_info;
8102         struct hclge_dev *hdev = vport->back;
8103         int ret;
8104
8105         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8106
8107         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8108         if (ret)
8109                 return ret;
8110
8111         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8112                 /* add new VLAN tag */
8113                 ret = hclge_set_vlan_filter_hw(hdev,
8114                                                htons(vlan_info->vlan_proto),
8115                                                vport->vport_id,
8116                                                vlan_info->vlan_tag,
8117                                                false);
8118                 if (ret)
8119                         return ret;
8120
8121                 /* remove old VLAN tag */
8122                 ret = hclge_set_vlan_filter_hw(hdev,
8123                                                htons(old_vlan_info->vlan_proto),
8124                                                vport->vport_id,
8125                                                old_vlan_info->vlan_tag,
8126                                                true);
8127                 if (ret)
8128                         return ret;
8129
8130                 goto update;
8131         }
8132
8133         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8134                                                old_vlan_info);
8135         if (ret)
8136                 return ret;
8137
8138         /* update state only when disable/enable port based VLAN */
8139         vport->port_base_vlan_cfg.state = state;
8140         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8141                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8142         else
8143                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8144
8145 update:
8146         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8147         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8148         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8149
8150         return 0;
8151 }
8152
8153 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8154                                           enum hnae3_port_base_vlan_state state,
8155                                           u16 vlan)
8156 {
8157         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8158                 if (!vlan)
8159                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8160                 else
8161                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8162         } else {
8163                 if (!vlan)
8164                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8165                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8166                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8167                 else
8168                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8169         }
8170 }
8171
8172 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8173                                     u16 vlan, u8 qos, __be16 proto)
8174 {
8175         struct hclge_vport *vport = hclge_get_vport(handle);
8176         struct hclge_dev *hdev = vport->back;
8177         struct hclge_vlan_info vlan_info;
8178         u16 state;
8179         int ret;
8180
8181         if (hdev->pdev->revision == 0x20)
8182                 return -EOPNOTSUPP;
8183
8184         /* qos is a 3 bits value, so can not be bigger than 7 */
8185         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
8186                 return -EINVAL;
8187         if (proto != htons(ETH_P_8021Q))
8188                 return -EPROTONOSUPPORT;
8189
8190         vport = &hdev->vport[vfid];
8191         state = hclge_get_port_base_vlan_state(vport,
8192                                                vport->port_base_vlan_cfg.state,
8193                                                vlan);
8194         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8195                 return 0;
8196
8197         vlan_info.vlan_tag = vlan;
8198         vlan_info.qos = qos;
8199         vlan_info.vlan_proto = ntohs(proto);
8200
8201         /* update port based VLAN for PF */
8202         if (!vfid) {
8203                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8204                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8205                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8206
8207                 return ret;
8208         }
8209
8210         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8211                 return hclge_update_port_base_vlan_cfg(vport, state,
8212                                                        &vlan_info);
8213         } else {
8214                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8215                                                         (u8)vfid, state,
8216                                                         vlan, qos,
8217                                                         ntohs(proto));
8218                 return ret;
8219         }
8220 }
8221
8222 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8223                           u16 vlan_id, bool is_kill)
8224 {
8225         struct hclge_vport *vport = hclge_get_vport(handle);
8226         struct hclge_dev *hdev = vport->back;
8227         bool writen_to_tbl = false;
8228         int ret = 0;
8229
8230         /* When device is resetting, firmware is unable to handle
8231          * mailbox. Just record the vlan id, and remove it after
8232          * reset finished.
8233          */
8234         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8235                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8236                 return -EBUSY;
8237         }
8238
8239         /* when port base vlan enabled, we use port base vlan as the vlan
8240          * filter entry. In this case, we don't update vlan filter table
8241          * when user add new vlan or remove exist vlan, just update the vport
8242          * vlan list. The vlan id in vlan list will be writen in vlan filter
8243          * table until port base vlan disabled
8244          */
8245         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8246                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8247                                                vlan_id, is_kill);
8248                 writen_to_tbl = true;
8249         }
8250
8251         if (!ret) {
8252                 if (is_kill)
8253                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8254                 else
8255                         hclge_add_vport_vlan_table(vport, vlan_id,
8256                                                    writen_to_tbl);
8257         } else if (is_kill) {
8258                 /* when remove hw vlan filter failed, record the vlan id,
8259                  * and try to remove it from hw later, to be consistence
8260                  * with stack
8261                  */
8262                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8263         }
8264         return ret;
8265 }
8266
8267 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8268 {
8269 #define HCLGE_MAX_SYNC_COUNT    60
8270
8271         int i, ret, sync_cnt = 0;
8272         u16 vlan_id;
8273
8274         /* start from vport 1 for PF is always alive */
8275         for (i = 0; i < hdev->num_alloc_vport; i++) {
8276                 struct hclge_vport *vport = &hdev->vport[i];
8277
8278                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8279                                          VLAN_N_VID);
8280                 while (vlan_id != VLAN_N_VID) {
8281                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8282                                                        vport->vport_id, vlan_id,
8283                                                        true);
8284                         if (ret && ret != -EINVAL)
8285                                 return;
8286
8287                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8288                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8289
8290                         sync_cnt++;
8291                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8292                                 return;
8293
8294                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8295                                                  VLAN_N_VID);
8296                 }
8297         }
8298 }
8299
8300 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8301 {
8302         struct hclge_config_max_frm_size_cmd *req;
8303         struct hclge_desc desc;
8304
8305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8306
8307         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8308         req->max_frm_size = cpu_to_le16(new_mps);
8309         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8310
8311         return hclge_cmd_send(&hdev->hw, &desc, 1);
8312 }
8313
8314 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8315 {
8316         struct hclge_vport *vport = hclge_get_vport(handle);
8317
8318         return hclge_set_vport_mtu(vport, new_mtu);
8319 }
8320
8321 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8322 {
8323         struct hclge_dev *hdev = vport->back;
8324         int i, max_frm_size, ret;
8325
8326         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8327         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8328             max_frm_size > HCLGE_MAC_MAX_FRAME)
8329                 return -EINVAL;
8330
8331         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8332         mutex_lock(&hdev->vport_lock);
8333         /* VF's mps must fit within hdev->mps */
8334         if (vport->vport_id && max_frm_size > hdev->mps) {
8335                 mutex_unlock(&hdev->vport_lock);
8336                 return -EINVAL;
8337         } else if (vport->vport_id) {
8338                 vport->mps = max_frm_size;
8339                 mutex_unlock(&hdev->vport_lock);
8340                 return 0;
8341         }
8342
8343         /* PF's mps must be greater then VF's mps */
8344         for (i = 1; i < hdev->num_alloc_vport; i++)
8345                 if (max_frm_size < hdev->vport[i].mps) {
8346                         mutex_unlock(&hdev->vport_lock);
8347                         return -EINVAL;
8348                 }
8349
8350         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8351
8352         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8353         if (ret) {
8354                 dev_err(&hdev->pdev->dev,
8355                         "Change mtu fail, ret =%d\n", ret);
8356                 goto out;
8357         }
8358
8359         hdev->mps = max_frm_size;
8360         vport->mps = max_frm_size;
8361
8362         ret = hclge_buffer_alloc(hdev);
8363         if (ret)
8364                 dev_err(&hdev->pdev->dev,
8365                         "Allocate buffer fail, ret =%d\n", ret);
8366
8367 out:
8368         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8369         mutex_unlock(&hdev->vport_lock);
8370         return ret;
8371 }
8372
8373 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8374                                     bool enable)
8375 {
8376         struct hclge_reset_tqp_queue_cmd *req;
8377         struct hclge_desc desc;
8378         int ret;
8379
8380         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8381
8382         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8383         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8384         if (enable)
8385                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8386
8387         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8388         if (ret) {
8389                 dev_err(&hdev->pdev->dev,
8390                         "Send tqp reset cmd error, status =%d\n", ret);
8391                 return ret;
8392         }
8393
8394         return 0;
8395 }
8396
8397 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8398 {
8399         struct hclge_reset_tqp_queue_cmd *req;
8400         struct hclge_desc desc;
8401         int ret;
8402
8403         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8404
8405         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8406         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8407
8408         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8409         if (ret) {
8410                 dev_err(&hdev->pdev->dev,
8411                         "Get reset status error, status =%d\n", ret);
8412                 return ret;
8413         }
8414
8415         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8416 }
8417
8418 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8419 {
8420         struct hnae3_queue *queue;
8421         struct hclge_tqp *tqp;
8422
8423         queue = handle->kinfo.tqp[queue_id];
8424         tqp = container_of(queue, struct hclge_tqp, q);
8425
8426         return tqp->index;
8427 }
8428
8429 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8430 {
8431         struct hclge_vport *vport = hclge_get_vport(handle);
8432         struct hclge_dev *hdev = vport->back;
8433         int reset_try_times = 0;
8434         int reset_status;
8435         u16 queue_gid;
8436         int ret;
8437
8438         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8439
8440         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8441         if (ret) {
8442                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8443                 return ret;
8444         }
8445
8446         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8447         if (ret) {
8448                 dev_err(&hdev->pdev->dev,
8449                         "Send reset tqp cmd fail, ret = %d\n", ret);
8450                 return ret;
8451         }
8452
8453         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8454                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8455                 if (reset_status)
8456                         break;
8457
8458                 /* Wait for tqp hw reset */
8459                 usleep_range(1000, 1200);
8460         }
8461
8462         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8463                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8464                 return ret;
8465         }
8466
8467         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8468         if (ret)
8469                 dev_err(&hdev->pdev->dev,
8470                         "Deassert the soft reset fail, ret = %d\n", ret);
8471
8472         return ret;
8473 }
8474
8475 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8476 {
8477         struct hclge_dev *hdev = vport->back;
8478         int reset_try_times = 0;
8479         int reset_status;
8480         u16 queue_gid;
8481         int ret;
8482
8483         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8484
8485         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8486         if (ret) {
8487                 dev_warn(&hdev->pdev->dev,
8488                          "Send reset tqp cmd fail, ret = %d\n", ret);
8489                 return;
8490         }
8491
8492         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8493                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8494                 if (reset_status)
8495                         break;
8496
8497                 /* Wait for tqp hw reset */
8498                 usleep_range(1000, 1200);
8499         }
8500
8501         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8502                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8503                 return;
8504         }
8505
8506         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8507         if (ret)
8508                 dev_warn(&hdev->pdev->dev,
8509                          "Deassert the soft reset fail, ret = %d\n", ret);
8510 }
8511
8512 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8513 {
8514         struct hclge_vport *vport = hclge_get_vport(handle);
8515         struct hclge_dev *hdev = vport->back;
8516
8517         return hdev->fw_version;
8518 }
8519
8520 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8521 {
8522         struct phy_device *phydev = hdev->hw.mac.phydev;
8523
8524         if (!phydev)
8525                 return;
8526
8527         phy_set_asym_pause(phydev, rx_en, tx_en);
8528 }
8529
8530 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8531 {
8532         int ret;
8533
8534         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8535                 return 0;
8536
8537         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8538         if (ret)
8539                 dev_err(&hdev->pdev->dev,
8540                         "configure pauseparam error, ret = %d.\n", ret);
8541
8542         return ret;
8543 }
8544
8545 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8546 {
8547         struct phy_device *phydev = hdev->hw.mac.phydev;
8548         u16 remote_advertising = 0;
8549         u16 local_advertising;
8550         u32 rx_pause, tx_pause;
8551         u8 flowctl;
8552
8553         if (!phydev->link || !phydev->autoneg)
8554                 return 0;
8555
8556         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8557
8558         if (phydev->pause)
8559                 remote_advertising = LPA_PAUSE_CAP;
8560
8561         if (phydev->asym_pause)
8562                 remote_advertising |= LPA_PAUSE_ASYM;
8563
8564         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8565                                            remote_advertising);
8566         tx_pause = flowctl & FLOW_CTRL_TX;
8567         rx_pause = flowctl & FLOW_CTRL_RX;
8568
8569         if (phydev->duplex == HCLGE_MAC_HALF) {
8570                 tx_pause = 0;
8571                 rx_pause = 0;
8572         }
8573
8574         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8575 }
8576
8577 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8578                                  u32 *rx_en, u32 *tx_en)
8579 {
8580         struct hclge_vport *vport = hclge_get_vport(handle);
8581         struct hclge_dev *hdev = vport->back;
8582         struct phy_device *phydev = hdev->hw.mac.phydev;
8583
8584         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8585
8586         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8587                 *rx_en = 0;
8588                 *tx_en = 0;
8589                 return;
8590         }
8591
8592         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8593                 *rx_en = 1;
8594                 *tx_en = 0;
8595         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8596                 *tx_en = 1;
8597                 *rx_en = 0;
8598         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8599                 *rx_en = 1;
8600                 *tx_en = 1;
8601         } else {
8602                 *rx_en = 0;
8603                 *tx_en = 0;
8604         }
8605 }
8606
8607 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8608                                          u32 rx_en, u32 tx_en)
8609 {
8610         if (rx_en && tx_en)
8611                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8612         else if (rx_en && !tx_en)
8613                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8614         else if (!rx_en && tx_en)
8615                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8616         else
8617                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8618
8619         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8620 }
8621
8622 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8623                                 u32 rx_en, u32 tx_en)
8624 {
8625         struct hclge_vport *vport = hclge_get_vport(handle);
8626         struct hclge_dev *hdev = vport->back;
8627         struct phy_device *phydev = hdev->hw.mac.phydev;
8628         u32 fc_autoneg;
8629
8630         if (phydev) {
8631                 fc_autoneg = hclge_get_autoneg(handle);
8632                 if (auto_neg != fc_autoneg) {
8633                         dev_info(&hdev->pdev->dev,
8634                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8635                         return -EOPNOTSUPP;
8636                 }
8637         }
8638
8639         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8640                 dev_info(&hdev->pdev->dev,
8641                          "Priority flow control enabled. Cannot set link flow control.\n");
8642                 return -EOPNOTSUPP;
8643         }
8644
8645         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8646
8647         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8648
8649         if (!auto_neg)
8650                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8651
8652         if (phydev)
8653                 return phy_start_aneg(phydev);
8654
8655         return -EOPNOTSUPP;
8656 }
8657
8658 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8659                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8660 {
8661         struct hclge_vport *vport = hclge_get_vport(handle);
8662         struct hclge_dev *hdev = vport->back;
8663
8664         if (speed)
8665                 *speed = hdev->hw.mac.speed;
8666         if (duplex)
8667                 *duplex = hdev->hw.mac.duplex;
8668         if (auto_neg)
8669                 *auto_neg = hdev->hw.mac.autoneg;
8670 }
8671
8672 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8673                                  u8 *module_type)
8674 {
8675         struct hclge_vport *vport = hclge_get_vport(handle);
8676         struct hclge_dev *hdev = vport->back;
8677
8678         if (media_type)
8679                 *media_type = hdev->hw.mac.media_type;
8680
8681         if (module_type)
8682                 *module_type = hdev->hw.mac.module_type;
8683 }
8684
8685 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8686                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8687 {
8688         struct hclge_vport *vport = hclge_get_vport(handle);
8689         struct hclge_dev *hdev = vport->back;
8690         struct phy_device *phydev = hdev->hw.mac.phydev;
8691         int mdix_ctrl, mdix, is_resolved;
8692         unsigned int retval;
8693
8694         if (!phydev) {
8695                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8696                 *tp_mdix = ETH_TP_MDI_INVALID;
8697                 return;
8698         }
8699
8700         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8701
8702         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8703         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8704                                     HCLGE_PHY_MDIX_CTRL_S);
8705
8706         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8707         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8708         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8709
8710         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8711
8712         switch (mdix_ctrl) {
8713         case 0x0:
8714                 *tp_mdix_ctrl = ETH_TP_MDI;
8715                 break;
8716         case 0x1:
8717                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8718                 break;
8719         case 0x3:
8720                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8721                 break;
8722         default:
8723                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8724                 break;
8725         }
8726
8727         if (!is_resolved)
8728                 *tp_mdix = ETH_TP_MDI_INVALID;
8729         else if (mdix)
8730                 *tp_mdix = ETH_TP_MDI_X;
8731         else
8732                 *tp_mdix = ETH_TP_MDI;
8733 }
8734
8735 static void hclge_info_show(struct hclge_dev *hdev)
8736 {
8737         struct device *dev = &hdev->pdev->dev;
8738
8739         dev_info(dev, "PF info begin:\n");
8740
8741         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8742         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8743         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8744         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8745         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8746         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8747         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8748         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8749         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8750         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8751         dev_info(dev, "This is %s PF\n",
8752                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8753         dev_info(dev, "DCB %s\n",
8754                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8755         dev_info(dev, "MQPRIO %s\n",
8756                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8757
8758         dev_info(dev, "PF info end.\n");
8759 }
8760
8761 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8762                                           struct hclge_vport *vport)
8763 {
8764         struct hnae3_client *client = vport->nic.client;
8765         struct hclge_dev *hdev = ae_dev->priv;
8766         int rst_cnt;
8767         int ret;
8768
8769         rst_cnt = hdev->rst_stats.reset_cnt;
8770         ret = client->ops->init_instance(&vport->nic);
8771         if (ret)
8772                 return ret;
8773
8774         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8775         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8776             rst_cnt != hdev->rst_stats.reset_cnt) {
8777                 ret = -EBUSY;
8778                 goto init_nic_err;
8779         }
8780
8781         /* Enable nic hw error interrupts */
8782         ret = hclge_config_nic_hw_error(hdev, true);
8783         if (ret) {
8784                 dev_err(&ae_dev->pdev->dev,
8785                         "fail(%d) to enable hw error interrupts\n", ret);
8786                 goto init_nic_err;
8787         }
8788
8789         hnae3_set_client_init_flag(client, ae_dev, 1);
8790
8791         if (netif_msg_drv(&hdev->vport->nic))
8792                 hclge_info_show(hdev);
8793
8794         return ret;
8795
8796 init_nic_err:
8797         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8798         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8799                 msleep(HCLGE_WAIT_RESET_DONE);
8800
8801         client->ops->uninit_instance(&vport->nic, 0);
8802
8803         return ret;
8804 }
8805
8806 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8807                                            struct hclge_vport *vport)
8808 {
8809         struct hnae3_client *client = vport->roce.client;
8810         struct hclge_dev *hdev = ae_dev->priv;
8811         int rst_cnt;
8812         int ret;
8813
8814         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8815             !hdev->nic_client)
8816                 return 0;
8817
8818         client = hdev->roce_client;
8819         ret = hclge_init_roce_base_info(vport);
8820         if (ret)
8821                 return ret;
8822
8823         rst_cnt = hdev->rst_stats.reset_cnt;
8824         ret = client->ops->init_instance(&vport->roce);
8825         if (ret)
8826                 return ret;
8827
8828         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8829         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8830             rst_cnt != hdev->rst_stats.reset_cnt) {
8831                 ret = -EBUSY;
8832                 goto init_roce_err;
8833         }
8834
8835         /* Enable roce ras interrupts */
8836         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8837         if (ret) {
8838                 dev_err(&ae_dev->pdev->dev,
8839                         "fail(%d) to enable roce ras interrupts\n", ret);
8840                 goto init_roce_err;
8841         }
8842
8843         hnae3_set_client_init_flag(client, ae_dev, 1);
8844
8845         return 0;
8846
8847 init_roce_err:
8848         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8849         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8850                 msleep(HCLGE_WAIT_RESET_DONE);
8851
8852         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8853
8854         return ret;
8855 }
8856
8857 static int hclge_init_client_instance(struct hnae3_client *client,
8858                                       struct hnae3_ae_dev *ae_dev)
8859 {
8860         struct hclge_dev *hdev = ae_dev->priv;
8861         struct hclge_vport *vport;
8862         int i, ret;
8863
8864         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8865                 vport = &hdev->vport[i];
8866
8867                 switch (client->type) {
8868                 case HNAE3_CLIENT_KNIC:
8869
8870                         hdev->nic_client = client;
8871                         vport->nic.client = client;
8872                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8873                         if (ret)
8874                                 goto clear_nic;
8875
8876                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8877                         if (ret)
8878                                 goto clear_roce;
8879
8880                         break;
8881                 case HNAE3_CLIENT_ROCE:
8882                         if (hnae3_dev_roce_supported(hdev)) {
8883                                 hdev->roce_client = client;
8884                                 vport->roce.client = client;
8885                         }
8886
8887                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8888                         if (ret)
8889                                 goto clear_roce;
8890
8891                         break;
8892                 default:
8893                         return -EINVAL;
8894                 }
8895         }
8896
8897         return 0;
8898
8899 clear_nic:
8900         hdev->nic_client = NULL;
8901         vport->nic.client = NULL;
8902         return ret;
8903 clear_roce:
8904         hdev->roce_client = NULL;
8905         vport->roce.client = NULL;
8906         return ret;
8907 }
8908
8909 static void hclge_uninit_client_instance(struct hnae3_client *client,
8910                                          struct hnae3_ae_dev *ae_dev)
8911 {
8912         struct hclge_dev *hdev = ae_dev->priv;
8913         struct hclge_vport *vport;
8914         int i;
8915
8916         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8917                 vport = &hdev->vport[i];
8918                 if (hdev->roce_client) {
8919                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8920                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8921                                 msleep(HCLGE_WAIT_RESET_DONE);
8922
8923                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8924                                                                 0);
8925                         hdev->roce_client = NULL;
8926                         vport->roce.client = NULL;
8927                 }
8928                 if (client->type == HNAE3_CLIENT_ROCE)
8929                         return;
8930                 if (hdev->nic_client && client->ops->uninit_instance) {
8931                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8932                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8933                                 msleep(HCLGE_WAIT_RESET_DONE);
8934
8935                         client->ops->uninit_instance(&vport->nic, 0);
8936                         hdev->nic_client = NULL;
8937                         vport->nic.client = NULL;
8938                 }
8939         }
8940 }
8941
8942 static int hclge_pci_init(struct hclge_dev *hdev)
8943 {
8944         struct pci_dev *pdev = hdev->pdev;
8945         struct hclge_hw *hw;
8946         int ret;
8947
8948         ret = pci_enable_device(pdev);
8949         if (ret) {
8950                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8951                 return ret;
8952         }
8953
8954         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8955         if (ret) {
8956                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8957                 if (ret) {
8958                         dev_err(&pdev->dev,
8959                                 "can't set consistent PCI DMA");
8960                         goto err_disable_device;
8961                 }
8962                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8963         }
8964
8965         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8966         if (ret) {
8967                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8968                 goto err_disable_device;
8969         }
8970
8971         pci_set_master(pdev);
8972         hw = &hdev->hw;
8973         hw->io_base = pcim_iomap(pdev, 2, 0);
8974         if (!hw->io_base) {
8975                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8976                 ret = -ENOMEM;
8977                 goto err_clr_master;
8978         }
8979
8980         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8981
8982         return 0;
8983 err_clr_master:
8984         pci_clear_master(pdev);
8985         pci_release_regions(pdev);
8986 err_disable_device:
8987         pci_disable_device(pdev);
8988
8989         return ret;
8990 }
8991
8992 static void hclge_pci_uninit(struct hclge_dev *hdev)
8993 {
8994         struct pci_dev *pdev = hdev->pdev;
8995
8996         pcim_iounmap(pdev, hdev->hw.io_base);
8997         pci_free_irq_vectors(pdev);
8998         pci_clear_master(pdev);
8999         pci_release_mem_regions(pdev);
9000         pci_disable_device(pdev);
9001 }
9002
9003 static void hclge_state_init(struct hclge_dev *hdev)
9004 {
9005         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9006         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9007         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9008         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9009         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9010         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9011 }
9012
9013 static void hclge_state_uninit(struct hclge_dev *hdev)
9014 {
9015         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9016         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9017
9018         if (hdev->reset_timer.function)
9019                 del_timer_sync(&hdev->reset_timer);
9020         if (hdev->service_task.work.func)
9021                 cancel_delayed_work_sync(&hdev->service_task);
9022         if (hdev->rst_service_task.func)
9023                 cancel_work_sync(&hdev->rst_service_task);
9024         if (hdev->mbx_service_task.func)
9025                 cancel_work_sync(&hdev->mbx_service_task);
9026 }
9027
9028 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9029 {
9030 #define HCLGE_FLR_WAIT_MS       100
9031 #define HCLGE_FLR_WAIT_CNT      50
9032         struct hclge_dev *hdev = ae_dev->priv;
9033         int cnt = 0;
9034
9035         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9036         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9037         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9038         hclge_reset_event(hdev->pdev, NULL);
9039
9040         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9041                cnt++ < HCLGE_FLR_WAIT_CNT)
9042                 msleep(HCLGE_FLR_WAIT_MS);
9043
9044         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9045                 dev_err(&hdev->pdev->dev,
9046                         "flr wait down timeout: %d\n", cnt);
9047 }
9048
9049 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9050 {
9051         struct hclge_dev *hdev = ae_dev->priv;
9052
9053         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9054 }
9055
9056 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9057 {
9058         u16 i;
9059
9060         for (i = 0; i < hdev->num_alloc_vport; i++) {
9061                 struct hclge_vport *vport = &hdev->vport[i];
9062                 int ret;
9063
9064                  /* Send cmd to clear VF's FUNC_RST_ING */
9065                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9066                 if (ret)
9067                         dev_warn(&hdev->pdev->dev,
9068                                  "clear vf(%d) rst failed %d!\n",
9069                                  vport->vport_id, ret);
9070         }
9071 }
9072
9073 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9074 {
9075         struct pci_dev *pdev = ae_dev->pdev;
9076         struct hclge_dev *hdev;
9077         int ret;
9078
9079         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9080         if (!hdev) {
9081                 ret = -ENOMEM;
9082                 goto out;
9083         }
9084
9085         hdev->pdev = pdev;
9086         hdev->ae_dev = ae_dev;
9087         hdev->reset_type = HNAE3_NONE_RESET;
9088         hdev->reset_level = HNAE3_FUNC_RESET;
9089         ae_dev->priv = hdev;
9090         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9091
9092         mutex_init(&hdev->vport_lock);
9093         mutex_init(&hdev->vport_cfg_mutex);
9094         spin_lock_init(&hdev->fd_rule_lock);
9095
9096         ret = hclge_pci_init(hdev);
9097         if (ret) {
9098                 dev_err(&pdev->dev, "PCI init failed\n");
9099                 goto out;
9100         }
9101
9102         /* Firmware command queue initialize */
9103         ret = hclge_cmd_queue_init(hdev);
9104         if (ret) {
9105                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9106                 goto err_pci_uninit;
9107         }
9108
9109         /* Firmware command initialize */
9110         ret = hclge_cmd_init(hdev);
9111         if (ret)
9112                 goto err_cmd_uninit;
9113
9114         ret = hclge_get_cap(hdev);
9115         if (ret) {
9116                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9117                         ret);
9118                 goto err_cmd_uninit;
9119         }
9120
9121         ret = hclge_configure(hdev);
9122         if (ret) {
9123                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9124                 goto err_cmd_uninit;
9125         }
9126
9127         ret = hclge_init_msi(hdev);
9128         if (ret) {
9129                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9130                 goto err_cmd_uninit;
9131         }
9132
9133         ret = hclge_misc_irq_init(hdev);
9134         if (ret) {
9135                 dev_err(&pdev->dev,
9136                         "Misc IRQ(vector0) init error, ret = %d.\n",
9137                         ret);
9138                 goto err_msi_uninit;
9139         }
9140
9141         ret = hclge_alloc_tqps(hdev);
9142         if (ret) {
9143                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9144                 goto err_msi_irq_uninit;
9145         }
9146
9147         ret = hclge_alloc_vport(hdev);
9148         if (ret) {
9149                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9150                 goto err_msi_irq_uninit;
9151         }
9152
9153         ret = hclge_map_tqp(hdev);
9154         if (ret) {
9155                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9156                 goto err_msi_irq_uninit;
9157         }
9158
9159         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9160                 ret = hclge_mac_mdio_config(hdev);
9161                 if (ret) {
9162                         dev_err(&hdev->pdev->dev,
9163                                 "mdio config fail ret=%d\n", ret);
9164                         goto err_msi_irq_uninit;
9165                 }
9166         }
9167
9168         ret = hclge_init_umv_space(hdev);
9169         if (ret) {
9170                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9171                 goto err_mdiobus_unreg;
9172         }
9173
9174         ret = hclge_mac_init(hdev);
9175         if (ret) {
9176                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9177                 goto err_mdiobus_unreg;
9178         }
9179
9180         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9181         if (ret) {
9182                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9183                 goto err_mdiobus_unreg;
9184         }
9185
9186         ret = hclge_config_gro(hdev, true);
9187         if (ret)
9188                 goto err_mdiobus_unreg;
9189
9190         ret = hclge_init_vlan_config(hdev);
9191         if (ret) {
9192                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9193                 goto err_mdiobus_unreg;
9194         }
9195
9196         ret = hclge_tm_schd_init(hdev);
9197         if (ret) {
9198                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9199                 goto err_mdiobus_unreg;
9200         }
9201
9202         hclge_rss_init_cfg(hdev);
9203         ret = hclge_rss_init_hw(hdev);
9204         if (ret) {
9205                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9206                 goto err_mdiobus_unreg;
9207         }
9208
9209         ret = init_mgr_tbl(hdev);
9210         if (ret) {
9211                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9212                 goto err_mdiobus_unreg;
9213         }
9214
9215         ret = hclge_init_fd_config(hdev);
9216         if (ret) {
9217                 dev_err(&pdev->dev,
9218                         "fd table init fail, ret=%d\n", ret);
9219                 goto err_mdiobus_unreg;
9220         }
9221
9222         INIT_KFIFO(hdev->mac_tnl_log);
9223
9224         hclge_dcb_ops_set(hdev);
9225
9226         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9227         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9228         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9229         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9230
9231         /* Setup affinity after service timer setup because add_timer_on
9232          * is called in affinity notify.
9233          */
9234         hclge_misc_affinity_setup(hdev);
9235
9236         hclge_clear_all_event_cause(hdev);
9237         hclge_clear_resetting_state(hdev);
9238
9239         /* Log and clear the hw errors those already occurred */
9240         hclge_handle_all_hns_hw_errors(ae_dev);
9241
9242         /* request delayed reset for the error recovery because an immediate
9243          * global reset on a PF affecting pending initialization of other PFs
9244          */
9245         if (ae_dev->hw_err_reset_req) {
9246                 enum hnae3_reset_type reset_level;
9247
9248                 reset_level = hclge_get_reset_level(ae_dev,
9249                                                     &ae_dev->hw_err_reset_req);
9250                 hclge_set_def_reset_request(ae_dev, reset_level);
9251                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9252         }
9253
9254         /* Enable MISC vector(vector0) */
9255         hclge_enable_vector(&hdev->misc_vector, true);
9256
9257         hclge_state_init(hdev);
9258         hdev->last_reset_time = jiffies;
9259
9260         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9261                  HCLGE_DRIVER_NAME);
9262
9263         return 0;
9264
9265 err_mdiobus_unreg:
9266         if (hdev->hw.mac.phydev)
9267                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9268 err_msi_irq_uninit:
9269         hclge_misc_irq_uninit(hdev);
9270 err_msi_uninit:
9271         pci_free_irq_vectors(pdev);
9272 err_cmd_uninit:
9273         hclge_cmd_uninit(hdev);
9274 err_pci_uninit:
9275         pcim_iounmap(pdev, hdev->hw.io_base);
9276         pci_clear_master(pdev);
9277         pci_release_regions(pdev);
9278         pci_disable_device(pdev);
9279 out:
9280         return ret;
9281 }
9282
9283 static void hclge_stats_clear(struct hclge_dev *hdev)
9284 {
9285         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9286 }
9287
9288 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9289 {
9290         struct hclge_vport *vport = hdev->vport;
9291         int i;
9292
9293         for (i = 0; i < hdev->num_alloc_vport; i++) {
9294                 hclge_vport_stop(vport);
9295                 vport++;
9296         }
9297 }
9298
9299 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9300 {
9301         struct hclge_dev *hdev = ae_dev->priv;
9302         struct pci_dev *pdev = ae_dev->pdev;
9303         int ret;
9304
9305         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9306
9307         hclge_stats_clear(hdev);
9308         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9309         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9310
9311         ret = hclge_cmd_init(hdev);
9312         if (ret) {
9313                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9314                 return ret;
9315         }
9316
9317         ret = hclge_map_tqp(hdev);
9318         if (ret) {
9319                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9320                 return ret;
9321         }
9322
9323         hclge_reset_umv_space(hdev);
9324
9325         ret = hclge_mac_init(hdev);
9326         if (ret) {
9327                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9328                 return ret;
9329         }
9330
9331         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9332         if (ret) {
9333                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9334                 return ret;
9335         }
9336
9337         ret = hclge_config_gro(hdev, true);
9338         if (ret)
9339                 return ret;
9340
9341         ret = hclge_init_vlan_config(hdev);
9342         if (ret) {
9343                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9344                 return ret;
9345         }
9346
9347         ret = hclge_tm_init_hw(hdev, true);
9348         if (ret) {
9349                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9350                 return ret;
9351         }
9352
9353         ret = hclge_rss_init_hw(hdev);
9354         if (ret) {
9355                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9356                 return ret;
9357         }
9358
9359         ret = hclge_init_fd_config(hdev);
9360         if (ret) {
9361                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9362                 return ret;
9363         }
9364
9365         /* Re-enable the hw error interrupts because
9366          * the interrupts get disabled on global reset.
9367          */
9368         ret = hclge_config_nic_hw_error(hdev, true);
9369         if (ret) {
9370                 dev_err(&pdev->dev,
9371                         "fail(%d) to re-enable NIC hw error interrupts\n",
9372                         ret);
9373                 return ret;
9374         }
9375
9376         if (hdev->roce_client) {
9377                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9378                 if (ret) {
9379                         dev_err(&pdev->dev,
9380                                 "fail(%d) to re-enable roce ras interrupts\n",
9381                                 ret);
9382                         return ret;
9383                 }
9384         }
9385
9386         hclge_reset_vport_state(hdev);
9387
9388         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9389                  HCLGE_DRIVER_NAME);
9390
9391         return 0;
9392 }
9393
9394 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9395 {
9396         struct hclge_dev *hdev = ae_dev->priv;
9397         struct hclge_mac *mac = &hdev->hw.mac;
9398
9399         hclge_misc_affinity_teardown(hdev);
9400         hclge_state_uninit(hdev);
9401
9402         if (mac->phydev)
9403                 mdiobus_unregister(mac->mdio_bus);
9404
9405         hclge_uninit_umv_space(hdev);
9406
9407         /* Disable MISC vector(vector0) */
9408         hclge_enable_vector(&hdev->misc_vector, false);
9409         synchronize_irq(hdev->misc_vector.vector_irq);
9410
9411         /* Disable all hw interrupts */
9412         hclge_config_mac_tnl_int(hdev, false);
9413         hclge_config_nic_hw_error(hdev, false);
9414         hclge_config_rocee_ras_interrupt(hdev, false);
9415
9416         hclge_cmd_uninit(hdev);
9417         hclge_misc_irq_uninit(hdev);
9418         hclge_pci_uninit(hdev);
9419         mutex_destroy(&hdev->vport_lock);
9420         hclge_uninit_vport_mac_table(hdev);
9421         hclge_uninit_vport_vlan_table(hdev);
9422         mutex_destroy(&hdev->vport_cfg_mutex);
9423         ae_dev->priv = NULL;
9424 }
9425
9426 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9427 {
9428         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9429         struct hclge_vport *vport = hclge_get_vport(handle);
9430         struct hclge_dev *hdev = vport->back;
9431
9432         return min_t(u32, hdev->rss_size_max,
9433                      vport->alloc_tqps / kinfo->num_tc);
9434 }
9435
9436 static void hclge_get_channels(struct hnae3_handle *handle,
9437                                struct ethtool_channels *ch)
9438 {
9439         ch->max_combined = hclge_get_max_channels(handle);
9440         ch->other_count = 1;
9441         ch->max_other = 1;
9442         ch->combined_count = handle->kinfo.rss_size;
9443 }
9444
9445 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9446                                         u16 *alloc_tqps, u16 *max_rss_size)
9447 {
9448         struct hclge_vport *vport = hclge_get_vport(handle);
9449         struct hclge_dev *hdev = vport->back;
9450
9451         *alloc_tqps = vport->alloc_tqps;
9452         *max_rss_size = hdev->rss_size_max;
9453 }
9454
9455 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9456                               bool rxfh_configured)
9457 {
9458         struct hclge_vport *vport = hclge_get_vport(handle);
9459         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9460         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9461         struct hclge_dev *hdev = vport->back;
9462         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9463         int cur_rss_size = kinfo->rss_size;
9464         int cur_tqps = kinfo->num_tqps;
9465         u16 tc_valid[HCLGE_MAX_TC_NUM];
9466         u16 roundup_size;
9467         u32 *rss_indir;
9468         unsigned int i;
9469         int ret;
9470
9471         kinfo->req_rss_size = new_tqps_num;
9472
9473         ret = hclge_tm_vport_map_update(hdev);
9474         if (ret) {
9475                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9476                 return ret;
9477         }
9478
9479         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9480         roundup_size = ilog2(roundup_size);
9481         /* Set the RSS TC mode according to the new RSS size */
9482         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9483                 tc_valid[i] = 0;
9484
9485                 if (!(hdev->hw_tc_map & BIT(i)))
9486                         continue;
9487
9488                 tc_valid[i] = 1;
9489                 tc_size[i] = roundup_size;
9490                 tc_offset[i] = kinfo->rss_size * i;
9491         }
9492         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9493         if (ret)
9494                 return ret;
9495
9496         /* RSS indirection table has been configuared by user */
9497         if (rxfh_configured)
9498                 goto out;
9499
9500         /* Reinitializes the rss indirect table according to the new RSS size */
9501         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9502         if (!rss_indir)
9503                 return -ENOMEM;
9504
9505         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9506                 rss_indir[i] = i % kinfo->rss_size;
9507
9508         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9509         if (ret)
9510                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9511                         ret);
9512
9513         kfree(rss_indir);
9514
9515 out:
9516         if (!ret)
9517                 dev_info(&hdev->pdev->dev,
9518                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9519                          cur_rss_size, kinfo->rss_size,
9520                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
9521
9522         return ret;
9523 }
9524
9525 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9526                               u32 *regs_num_64_bit)
9527 {
9528         struct hclge_desc desc;
9529         u32 total_num;
9530         int ret;
9531
9532         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9533         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9534         if (ret) {
9535                 dev_err(&hdev->pdev->dev,
9536                         "Query register number cmd failed, ret = %d.\n", ret);
9537                 return ret;
9538         }
9539
9540         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9541         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9542
9543         total_num = *regs_num_32_bit + *regs_num_64_bit;
9544         if (!total_num)
9545                 return -EINVAL;
9546
9547         return 0;
9548 }
9549
9550 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9551                                  void *data)
9552 {
9553 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9554 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9555
9556         struct hclge_desc *desc;
9557         u32 *reg_val = data;
9558         __le32 *desc_data;
9559         int nodata_num;
9560         int cmd_num;
9561         int i, k, n;
9562         int ret;
9563
9564         if (regs_num == 0)
9565                 return 0;
9566
9567         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9568         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9569                                HCLGE_32_BIT_REG_RTN_DATANUM);
9570         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9571         if (!desc)
9572                 return -ENOMEM;
9573
9574         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9575         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9576         if (ret) {
9577                 dev_err(&hdev->pdev->dev,
9578                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
9579                 kfree(desc);
9580                 return ret;
9581         }
9582
9583         for (i = 0; i < cmd_num; i++) {
9584                 if (i == 0) {
9585                         desc_data = (__le32 *)(&desc[i].data[0]);
9586                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9587                 } else {
9588                         desc_data = (__le32 *)(&desc[i]);
9589                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
9590                 }
9591                 for (k = 0; k < n; k++) {
9592                         *reg_val++ = le32_to_cpu(*desc_data++);
9593
9594                         regs_num--;
9595                         if (!regs_num)
9596                                 break;
9597                 }
9598         }
9599
9600         kfree(desc);
9601         return 0;
9602 }
9603
9604 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9605                                  void *data)
9606 {
9607 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9608 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9609
9610         struct hclge_desc *desc;
9611         u64 *reg_val = data;
9612         __le64 *desc_data;
9613         int nodata_len;
9614         int cmd_num;
9615         int i, k, n;
9616         int ret;
9617
9618         if (regs_num == 0)
9619                 return 0;
9620
9621         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9622         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9623                                HCLGE_64_BIT_REG_RTN_DATANUM);
9624         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9625         if (!desc)
9626                 return -ENOMEM;
9627
9628         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9629         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9630         if (ret) {
9631                 dev_err(&hdev->pdev->dev,
9632                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
9633                 kfree(desc);
9634                 return ret;
9635         }
9636
9637         for (i = 0; i < cmd_num; i++) {
9638                 if (i == 0) {
9639                         desc_data = (__le64 *)(&desc[i].data[0]);
9640                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9641                 } else {
9642                         desc_data = (__le64 *)(&desc[i]);
9643                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
9644                 }
9645                 for (k = 0; k < n; k++) {
9646                         *reg_val++ = le64_to_cpu(*desc_data++);
9647
9648                         regs_num--;
9649                         if (!regs_num)
9650                                 break;
9651                 }
9652         }
9653
9654         kfree(desc);
9655         return 0;
9656 }
9657
9658 #define MAX_SEPARATE_NUM        4
9659 #define SEPARATOR_VALUE         0xFDFCFBFA
9660 #define REG_NUM_PER_LINE        4
9661 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
9662 #define REG_SEPARATOR_LINE      1
9663 #define REG_NUM_REMAIN_MASK     3
9664 #define BD_LIST_MAX_NUM         30
9665
9666 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9667 {
9668         /*prepare 4 commands to query DFX BD number*/
9669         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9670         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9671         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9672         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9673         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9674         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9675         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9676
9677         return hclge_cmd_send(&hdev->hw, desc, 4);
9678 }
9679
9680 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9681                                     int *bd_num_list,
9682                                     u32 type_num)
9683 {
9684 #define HCLGE_DFX_REG_BD_NUM    4
9685
9686         u32 entries_per_desc, desc_index, index, offset, i;
9687         struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9688         int ret;
9689
9690         ret = hclge_query_bd_num_cmd_send(hdev, desc);
9691         if (ret) {
9692                 dev_err(&hdev->pdev->dev,
9693                         "Get dfx bd num fail, status is %d.\n", ret);
9694                 return ret;
9695         }
9696
9697         entries_per_desc = ARRAY_SIZE(desc[0].data);
9698         for (i = 0; i < type_num; i++) {
9699                 offset = hclge_dfx_bd_offset_list[i];
9700                 index = offset % entries_per_desc;
9701                 desc_index = offset / entries_per_desc;
9702                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9703         }
9704
9705         return ret;
9706 }
9707
9708 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9709                                   struct hclge_desc *desc_src, int bd_num,
9710                                   enum hclge_opcode_type cmd)
9711 {
9712         struct hclge_desc *desc = desc_src;
9713         int i, ret;
9714
9715         hclge_cmd_setup_basic_desc(desc, cmd, true);
9716         for (i = 0; i < bd_num - 1; i++) {
9717                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9718                 desc++;
9719                 hclge_cmd_setup_basic_desc(desc, cmd, true);
9720         }
9721
9722         desc = desc_src;
9723         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9724         if (ret)
9725                 dev_err(&hdev->pdev->dev,
9726                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9727                         cmd, ret);
9728
9729         return ret;
9730 }
9731
9732 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9733                                     void *data)
9734 {
9735         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9736         struct hclge_desc *desc = desc_src;
9737         u32 *reg = data;
9738
9739         entries_per_desc = ARRAY_SIZE(desc->data);
9740         reg_num = entries_per_desc * bd_num;
9741         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9742         for (i = 0; i < reg_num; i++) {
9743                 index = i % entries_per_desc;
9744                 desc_index = i / entries_per_desc;
9745                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
9746         }
9747         for (i = 0; i < separator_num; i++)
9748                 *reg++ = SEPARATOR_VALUE;
9749
9750         return reg_num + separator_num;
9751 }
9752
9753 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9754 {
9755         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9756         int data_len_per_desc, data_len, bd_num, i;
9757         int bd_num_list[BD_LIST_MAX_NUM];
9758         int ret;
9759
9760         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9761         if (ret) {
9762                 dev_err(&hdev->pdev->dev,
9763                         "Get dfx reg bd num fail, status is %d.\n", ret);
9764                 return ret;
9765         }
9766
9767         data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9768         *len = 0;
9769         for (i = 0; i < dfx_reg_type_num; i++) {
9770                 bd_num = bd_num_list[i];
9771                 data_len = data_len_per_desc * bd_num;
9772                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9773         }
9774
9775         return ret;
9776 }
9777
9778 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9779 {
9780         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9781         int bd_num, bd_num_max, buf_len, i;
9782         int bd_num_list[BD_LIST_MAX_NUM];
9783         struct hclge_desc *desc_src;
9784         u32 *reg = data;
9785         int ret;
9786
9787         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9788         if (ret) {
9789                 dev_err(&hdev->pdev->dev,
9790                         "Get dfx reg bd num fail, status is %d.\n", ret);
9791                 return ret;
9792         }
9793
9794         bd_num_max = bd_num_list[0];
9795         for (i = 1; i < dfx_reg_type_num; i++)
9796                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9797
9798         buf_len = sizeof(*desc_src) * bd_num_max;
9799         desc_src = kzalloc(buf_len, GFP_KERNEL);
9800         if (!desc_src) {
9801                 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9802                 return -ENOMEM;
9803         }
9804
9805         for (i = 0; i < dfx_reg_type_num; i++) {
9806                 bd_num = bd_num_list[i];
9807                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9808                                              hclge_dfx_reg_opcode_list[i]);
9809                 if (ret) {
9810                         dev_err(&hdev->pdev->dev,
9811                                 "Get dfx reg fail, status is %d.\n", ret);
9812                         break;
9813                 }
9814
9815                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9816         }
9817
9818         kfree(desc_src);
9819         return ret;
9820 }
9821
9822 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9823                               struct hnae3_knic_private_info *kinfo)
9824 {
9825 #define HCLGE_RING_REG_OFFSET           0x200
9826 #define HCLGE_RING_INT_REG_OFFSET       0x4
9827
9828         int i, j, reg_num, separator_num;
9829         int data_num_sum;
9830         u32 *reg = data;
9831
9832         /* fetching per-PF registers valus from PF PCIe register space */
9833         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9834         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9835         for (i = 0; i < reg_num; i++)
9836                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9837         for (i = 0; i < separator_num; i++)
9838                 *reg++ = SEPARATOR_VALUE;
9839         data_num_sum = reg_num + separator_num;
9840
9841         reg_num = ARRAY_SIZE(common_reg_addr_list);
9842         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9843         for (i = 0; i < reg_num; i++)
9844                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9845         for (i = 0; i < separator_num; i++)
9846                 *reg++ = SEPARATOR_VALUE;
9847         data_num_sum += reg_num + separator_num;
9848
9849         reg_num = ARRAY_SIZE(ring_reg_addr_list);
9850         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9851         for (j = 0; j < kinfo->num_tqps; j++) {
9852                 for (i = 0; i < reg_num; i++)
9853                         *reg++ = hclge_read_dev(&hdev->hw,
9854                                                 ring_reg_addr_list[i] +
9855                                                 HCLGE_RING_REG_OFFSET * j);
9856                 for (i = 0; i < separator_num; i++)
9857                         *reg++ = SEPARATOR_VALUE;
9858         }
9859         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9860
9861         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9862         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9863         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9864                 for (i = 0; i < reg_num; i++)
9865                         *reg++ = hclge_read_dev(&hdev->hw,
9866                                                 tqp_intr_reg_addr_list[i] +
9867                                                 HCLGE_RING_INT_REG_OFFSET * j);
9868                 for (i = 0; i < separator_num; i++)
9869                         *reg++ = SEPARATOR_VALUE;
9870         }
9871         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9872
9873         return data_num_sum;
9874 }
9875
9876 static int hclge_get_regs_len(struct hnae3_handle *handle)
9877 {
9878         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9879         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9880         struct hclge_vport *vport = hclge_get_vport(handle);
9881         struct hclge_dev *hdev = vport->back;
9882         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9883         int regs_lines_32_bit, regs_lines_64_bit;
9884         int ret;
9885
9886         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9887         if (ret) {
9888                 dev_err(&hdev->pdev->dev,
9889                         "Get register number failed, ret = %d.\n", ret);
9890                 return ret;
9891         }
9892
9893         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9894         if (ret) {
9895                 dev_err(&hdev->pdev->dev,
9896                         "Get dfx reg len failed, ret = %d.\n", ret);
9897                 return ret;
9898         }
9899
9900         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9901                 REG_SEPARATOR_LINE;
9902         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9903                 REG_SEPARATOR_LINE;
9904         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9905                 REG_SEPARATOR_LINE;
9906         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9907                 REG_SEPARATOR_LINE;
9908         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9909                 REG_SEPARATOR_LINE;
9910         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9911                 REG_SEPARATOR_LINE;
9912
9913         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9914                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9915                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9916 }
9917
9918 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9919                            void *data)
9920 {
9921         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9922         struct hclge_vport *vport = hclge_get_vport(handle);
9923         struct hclge_dev *hdev = vport->back;
9924         u32 regs_num_32_bit, regs_num_64_bit;
9925         int i, reg_num, separator_num, ret;
9926         u32 *reg = data;
9927
9928         *version = hdev->fw_version;
9929
9930         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9931         if (ret) {
9932                 dev_err(&hdev->pdev->dev,
9933                         "Get register number failed, ret = %d.\n", ret);
9934                 return;
9935         }
9936
9937         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
9938
9939         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9940         if (ret) {
9941                 dev_err(&hdev->pdev->dev,
9942                         "Get 32 bit register failed, ret = %d.\n", ret);
9943                 return;
9944         }
9945         reg_num = regs_num_32_bit;
9946         reg += reg_num;
9947         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9948         for (i = 0; i < separator_num; i++)
9949                 *reg++ = SEPARATOR_VALUE;
9950
9951         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9952         if (ret) {
9953                 dev_err(&hdev->pdev->dev,
9954                         "Get 64 bit register failed, ret = %d.\n", ret);
9955                 return;
9956         }
9957         reg_num = regs_num_64_bit * 2;
9958         reg += reg_num;
9959         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9960         for (i = 0; i < separator_num; i++)
9961                 *reg++ = SEPARATOR_VALUE;
9962
9963         ret = hclge_get_dfx_reg(hdev, reg);
9964         if (ret)
9965                 dev_err(&hdev->pdev->dev,
9966                         "Get dfx register failed, ret = %d.\n", ret);
9967 }
9968
9969 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9970 {
9971         struct hclge_set_led_state_cmd *req;
9972         struct hclge_desc desc;
9973         int ret;
9974
9975         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9976
9977         req = (struct hclge_set_led_state_cmd *)desc.data;
9978         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9979                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9980
9981         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9982         if (ret)
9983                 dev_err(&hdev->pdev->dev,
9984                         "Send set led state cmd error, ret =%d\n", ret);
9985
9986         return ret;
9987 }
9988
9989 enum hclge_led_status {
9990         HCLGE_LED_OFF,
9991         HCLGE_LED_ON,
9992         HCLGE_LED_NO_CHANGE = 0xFF,
9993 };
9994
9995 static int hclge_set_led_id(struct hnae3_handle *handle,
9996                             enum ethtool_phys_id_state status)
9997 {
9998         struct hclge_vport *vport = hclge_get_vport(handle);
9999         struct hclge_dev *hdev = vport->back;
10000
10001         switch (status) {
10002         case ETHTOOL_ID_ACTIVE:
10003                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10004         case ETHTOOL_ID_INACTIVE:
10005                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10006         default:
10007                 return -EINVAL;
10008         }
10009 }
10010
10011 static void hclge_get_link_mode(struct hnae3_handle *handle,
10012                                 unsigned long *supported,
10013                                 unsigned long *advertising)
10014 {
10015         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10016         struct hclge_vport *vport = hclge_get_vport(handle);
10017         struct hclge_dev *hdev = vport->back;
10018         unsigned int idx = 0;
10019
10020         for (; idx < size; idx++) {
10021                 supported[idx] = hdev->hw.mac.supported[idx];
10022                 advertising[idx] = hdev->hw.mac.advertising[idx];
10023         }
10024 }
10025
10026 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10027 {
10028         struct hclge_vport *vport = hclge_get_vport(handle);
10029         struct hclge_dev *hdev = vport->back;
10030
10031         return hclge_config_gro(hdev, enable);
10032 }
10033
10034 static const struct hnae3_ae_ops hclge_ops = {
10035         .init_ae_dev = hclge_init_ae_dev,
10036         .uninit_ae_dev = hclge_uninit_ae_dev,
10037         .flr_prepare = hclge_flr_prepare,
10038         .flr_done = hclge_flr_done,
10039         .init_client_instance = hclge_init_client_instance,
10040         .uninit_client_instance = hclge_uninit_client_instance,
10041         .map_ring_to_vector = hclge_map_ring_to_vector,
10042         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10043         .get_vector = hclge_get_vector,
10044         .put_vector = hclge_put_vector,
10045         .set_promisc_mode = hclge_set_promisc_mode,
10046         .set_loopback = hclge_set_loopback,
10047         .start = hclge_ae_start,
10048         .stop = hclge_ae_stop,
10049         .client_start = hclge_client_start,
10050         .client_stop = hclge_client_stop,
10051         .get_status = hclge_get_status,
10052         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10053         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10054         .get_media_type = hclge_get_media_type,
10055         .check_port_speed = hclge_check_port_speed,
10056         .get_fec = hclge_get_fec,
10057         .set_fec = hclge_set_fec,
10058         .get_rss_key_size = hclge_get_rss_key_size,
10059         .get_rss_indir_size = hclge_get_rss_indir_size,
10060         .get_rss = hclge_get_rss,
10061         .set_rss = hclge_set_rss,
10062         .set_rss_tuple = hclge_set_rss_tuple,
10063         .get_rss_tuple = hclge_get_rss_tuple,
10064         .get_tc_size = hclge_get_tc_size,
10065         .get_mac_addr = hclge_get_mac_addr,
10066         .set_mac_addr = hclge_set_mac_addr,
10067         .do_ioctl = hclge_do_ioctl,
10068         .add_uc_addr = hclge_add_uc_addr,
10069         .rm_uc_addr = hclge_rm_uc_addr,
10070         .add_mc_addr = hclge_add_mc_addr,
10071         .rm_mc_addr = hclge_rm_mc_addr,
10072         .set_autoneg = hclge_set_autoneg,
10073         .get_autoneg = hclge_get_autoneg,
10074         .restart_autoneg = hclge_restart_autoneg,
10075         .halt_autoneg = hclge_halt_autoneg,
10076         .get_pauseparam = hclge_get_pauseparam,
10077         .set_pauseparam = hclge_set_pauseparam,
10078         .set_mtu = hclge_set_mtu,
10079         .reset_queue = hclge_reset_tqp,
10080         .get_stats = hclge_get_stats,
10081         .get_mac_stats = hclge_get_mac_stat,
10082         .update_stats = hclge_update_stats,
10083         .get_strings = hclge_get_strings,
10084         .get_sset_count = hclge_get_sset_count,
10085         .get_fw_version = hclge_get_fw_version,
10086         .get_mdix_mode = hclge_get_mdix_mode,
10087         .enable_vlan_filter = hclge_enable_vlan_filter,
10088         .set_vlan_filter = hclge_set_vlan_filter,
10089         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10090         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10091         .reset_event = hclge_reset_event,
10092         .get_reset_level = hclge_get_reset_level,
10093         .set_default_reset_request = hclge_set_def_reset_request,
10094         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10095         .set_channels = hclge_set_channels,
10096         .get_channels = hclge_get_channels,
10097         .get_regs_len = hclge_get_regs_len,
10098         .get_regs = hclge_get_regs,
10099         .set_led_id = hclge_set_led_id,
10100         .get_link_mode = hclge_get_link_mode,
10101         .add_fd_entry = hclge_add_fd_entry,
10102         .del_fd_entry = hclge_del_fd_entry,
10103         .del_all_fd_entries = hclge_del_all_fd_entries,
10104         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10105         .get_fd_rule_info = hclge_get_fd_rule_info,
10106         .get_fd_all_rules = hclge_get_all_rules,
10107         .restore_fd_rules = hclge_restore_fd_entries,
10108         .enable_fd = hclge_enable_fd,
10109         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10110         .dbg_run_cmd = hclge_dbg_run_cmd,
10111         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10112         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10113         .ae_dev_resetting = hclge_ae_dev_resetting,
10114         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10115         .set_gro_en = hclge_gro_en,
10116         .get_global_queue_id = hclge_covert_handle_qid_global,
10117         .set_timer_task = hclge_set_timer_task,
10118         .mac_connect_phy = hclge_mac_connect_phy,
10119         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10120         .restore_vlan_table = hclge_restore_vlan_table,
10121 };
10122
10123 static struct hnae3_ae_algo ae_algo = {
10124         .ops = &hclge_ops,
10125         .pdev_id_table = ae_algo_pci_tbl,
10126 };
10127
10128 static int hclge_init(void)
10129 {
10130         pr_info("%s is initializing\n", HCLGE_NAME);
10131
10132         hnae3_register_ae_algo(&ae_algo);
10133
10134         return 0;
10135 }
10136
10137 static void hclge_exit(void)
10138 {
10139         hnae3_unregister_ae_algo(&ae_algo);
10140 }
10141 module_init(hclge_init);
10142 module_exit(hclge_exit);
10143
10144 MODULE_LICENSE("GPL");
10145 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10146 MODULE_DESCRIPTION("HCLGE Driver");
10147 MODULE_VERSION(HCLGE_MOD_VERSION);