]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
fb0dc185fb4aee3f2f371b2f9d43439e4beda9f4
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
40
41 static struct hnae3_ae_algo ae_algo;
42
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51         /* required last entry */
52         {0, }
53 };
54
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
56
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58                                          HCLGE_CMDQ_TX_ADDR_H_REG,
59                                          HCLGE_CMDQ_TX_DEPTH_REG,
60                                          HCLGE_CMDQ_TX_TAIL_REG,
61                                          HCLGE_CMDQ_TX_HEAD_REG,
62                                          HCLGE_CMDQ_RX_ADDR_L_REG,
63                                          HCLGE_CMDQ_RX_ADDR_H_REG,
64                                          HCLGE_CMDQ_RX_DEPTH_REG,
65                                          HCLGE_CMDQ_RX_TAIL_REG,
66                                          HCLGE_CMDQ_RX_HEAD_REG,
67                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
68                                          HCLGE_CMDQ_INTR_STS_REG,
69                                          HCLGE_CMDQ_INTR_EN_REG,
70                                          HCLGE_CMDQ_INTR_GEN_REG};
71
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73                                            HCLGE_VECTOR0_OTER_EN_REG,
74                                            HCLGE_MISC_RESET_STS_REG,
75                                            HCLGE_MISC_VECTOR_INT_STS,
76                                            HCLGE_GLOBAL_RESET_REG,
77                                            HCLGE_FUN_RST_ING,
78                                            HCLGE_GRO_EN_REG};
79
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81                                          HCLGE_RING_RX_ADDR_H_REG,
82                                          HCLGE_RING_RX_BD_NUM_REG,
83                                          HCLGE_RING_RX_BD_LENGTH_REG,
84                                          HCLGE_RING_RX_MERGE_EN_REG,
85                                          HCLGE_RING_RX_TAIL_REG,
86                                          HCLGE_RING_RX_HEAD_REG,
87                                          HCLGE_RING_RX_FBD_NUM_REG,
88                                          HCLGE_RING_RX_OFFSET_REG,
89                                          HCLGE_RING_RX_FBD_OFFSET_REG,
90                                          HCLGE_RING_RX_STASH_REG,
91                                          HCLGE_RING_RX_BD_ERR_REG,
92                                          HCLGE_RING_TX_ADDR_L_REG,
93                                          HCLGE_RING_TX_ADDR_H_REG,
94                                          HCLGE_RING_TX_BD_NUM_REG,
95                                          HCLGE_RING_TX_PRIORITY_REG,
96                                          HCLGE_RING_TX_TC_REG,
97                                          HCLGE_RING_TX_MERGE_EN_REG,
98                                          HCLGE_RING_TX_TAIL_REG,
99                                          HCLGE_RING_TX_HEAD_REG,
100                                          HCLGE_RING_TX_FBD_NUM_REG,
101                                          HCLGE_RING_TX_OFFSET_REG,
102                                          HCLGE_RING_TX_EBD_NUM_REG,
103                                          HCLGE_RING_TX_EBD_OFFSET_REG,
104                                          HCLGE_RING_TX_BD_ERR_REG,
105                                          HCLGE_RING_EN_REG};
106
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108                                              HCLGE_TQP_INTR_GL0_REG,
109                                              HCLGE_TQP_INTR_GL1_REG,
110                                              HCLGE_TQP_INTR_GL2_REG,
111                                              HCLGE_TQP_INTR_RL_REG};
112
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
114         "App    Loopback test",
115         "Serdes serial Loopback test",
116         "Serdes parallel Loopback test",
117         "Phy    Loopback test"
118 };
119
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121         {"mac_tx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123         {"mac_rx_mac_pause_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125         {"mac_tx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127         {"mac_rx_control_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129         {"mac_tx_pfc_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131         {"mac_tx_pfc_pri0_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133         {"mac_tx_pfc_pri1_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135         {"mac_tx_pfc_pri2_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137         {"mac_tx_pfc_pri3_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139         {"mac_tx_pfc_pri4_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141         {"mac_tx_pfc_pri5_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143         {"mac_tx_pfc_pri6_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145         {"mac_tx_pfc_pri7_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147         {"mac_rx_pfc_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149         {"mac_rx_pfc_pri0_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151         {"mac_rx_pfc_pri1_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153         {"mac_rx_pfc_pri2_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155         {"mac_rx_pfc_pri3_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157         {"mac_rx_pfc_pri4_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159         {"mac_rx_pfc_pri5_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161         {"mac_rx_pfc_pri6_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163         {"mac_rx_pfc_pri7_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165         {"mac_tx_total_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167         {"mac_tx_total_oct_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169         {"mac_tx_good_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171         {"mac_tx_bad_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173         {"mac_tx_good_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175         {"mac_tx_bad_oct_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177         {"mac_tx_uni_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179         {"mac_tx_multi_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181         {"mac_tx_broad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183         {"mac_tx_undersize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185         {"mac_tx_oversize_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187         {"mac_tx_64_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189         {"mac_tx_65_127_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191         {"mac_tx_128_255_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193         {"mac_tx_256_511_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195         {"mac_tx_512_1023_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197         {"mac_tx_1024_1518_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199         {"mac_tx_1519_2047_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201         {"mac_tx_2048_4095_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203         {"mac_tx_4096_8191_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205         {"mac_tx_8192_9216_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207         {"mac_tx_9217_12287_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209         {"mac_tx_12288_16383_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211         {"mac_tx_1519_max_good_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213         {"mac_tx_1519_max_bad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215         {"mac_rx_total_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217         {"mac_rx_total_oct_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219         {"mac_rx_good_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221         {"mac_rx_bad_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223         {"mac_rx_good_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225         {"mac_rx_bad_oct_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227         {"mac_rx_uni_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229         {"mac_rx_multi_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231         {"mac_rx_broad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233         {"mac_rx_undersize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235         {"mac_rx_oversize_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237         {"mac_rx_64_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239         {"mac_rx_65_127_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241         {"mac_rx_128_255_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243         {"mac_rx_256_511_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245         {"mac_rx_512_1023_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247         {"mac_rx_1024_1518_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249         {"mac_rx_1519_2047_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251         {"mac_rx_2048_4095_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253         {"mac_rx_4096_8191_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255         {"mac_rx_8192_9216_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257         {"mac_rx_9217_12287_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259         {"mac_rx_12288_16383_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261         {"mac_rx_1519_max_good_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263         {"mac_rx_1519_max_bad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
265
266         {"mac_tx_fragment_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268         {"mac_tx_undermin_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270         {"mac_tx_jabber_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272         {"mac_tx_err_all_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274         {"mac_tx_from_app_good_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276         {"mac_tx_from_app_bad_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278         {"mac_rx_fragment_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280         {"mac_rx_undermin_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282         {"mac_rx_jabber_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284         {"mac_rx_fcs_err_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286         {"mac_rx_send_app_good_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288         {"mac_rx_send_app_bad_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 };
291
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
293         {
294                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298                 .i_port_bitmap = 0x1,
299         },
300 };
301
302 static const u8 hclge_hash_key[] = {
303         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 };
309
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
311 {
312 #define HCLGE_MAC_CMD_NUM 21
313
314         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
316         __le64 *desc_data;
317         int i, k, n;
318         int ret;
319
320         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
322         if (ret) {
323                 dev_err(&hdev->pdev->dev,
324                         "Get MAC pkt stats fail, status = %d.\n", ret);
325
326                 return ret;
327         }
328
329         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330                 /* for special opcode 0032, only the first desc has the head */
331                 if (unlikely(i == 0)) {
332                         desc_data = (__le64 *)(&desc[i].data[0]);
333                         n = HCLGE_RD_FIRST_STATS_NUM;
334                 } else {
335                         desc_data = (__le64 *)(&desc[i]);
336                         n = HCLGE_RD_OTHER_STATS_NUM;
337                 }
338
339                 for (k = 0; k < n; k++) {
340                         *data += le64_to_cpu(*desc_data);
341                         data++;
342                         desc_data++;
343                 }
344         }
345
346         return 0;
347 }
348
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
350 {
351         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352         struct hclge_desc *desc;
353         __le64 *desc_data;
354         u16 i, k, n;
355         int ret;
356
357         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358         if (!desc)
359                 return -ENOMEM;
360         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
362         if (ret) {
363                 kfree(desc);
364                 return ret;
365         }
366
367         for (i = 0; i < desc_num; i++) {
368                 /* for special opcode 0034, only the first desc has the head */
369                 if (i == 0) {
370                         desc_data = (__le64 *)(&desc[i].data[0]);
371                         n = HCLGE_RD_FIRST_STATS_NUM;
372                 } else {
373                         desc_data = (__le64 *)(&desc[i]);
374                         n = HCLGE_RD_OTHER_STATS_NUM;
375                 }
376
377                 for (k = 0; k < n; k++) {
378                         *data += le64_to_cpu(*desc_data);
379                         data++;
380                         desc_data++;
381                 }
382         }
383
384         kfree(desc);
385
386         return 0;
387 }
388
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
390 {
391         struct hclge_desc desc;
392         __le32 *desc_data;
393         u32 reg_num;
394         int ret;
395
396         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398         if (ret)
399                 return ret;
400
401         desc_data = (__le32 *)(&desc.data[0]);
402         reg_num = le32_to_cpu(*desc_data);
403
404         *desc_num = 1 + ((reg_num - 3) >> 2) +
405                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
406
407         return 0;
408 }
409
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
411 {
412         u32 desc_num;
413         int ret;
414
415         ret = hclge_mac_query_reg_num(hdev, &desc_num);
416
417         /* The firmware supports the new statistics acquisition method */
418         if (!ret)
419                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420         else if (ret == -EOPNOTSUPP)
421                 ret = hclge_mac_update_stats_defective(hdev);
422         else
423                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
424
425         return ret;
426 }
427
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
429 {
430         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431         struct hclge_vport *vport = hclge_get_vport(handle);
432         struct hclge_dev *hdev = vport->back;
433         struct hnae3_queue *queue;
434         struct hclge_desc desc[1];
435         struct hclge_tqp *tqp;
436         int ret, i;
437
438         for (i = 0; i < kinfo->num_tqps; i++) {
439                 queue = handle->kinfo.tqp[i];
440                 tqp = container_of(queue, struct hclge_tqp, q);
441                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442                 hclge_cmd_setup_basic_desc(&desc[0],
443                                            HCLGE_OPC_QUERY_RX_STATUS,
444                                            true);
445
446                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
448                 if (ret) {
449                         dev_err(&hdev->pdev->dev,
450                                 "Query tqp stat fail, status = %d,queue = %d\n",
451                                 ret,    i);
452                         return ret;
453                 }
454                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455                         le32_to_cpu(desc[0].data[1]);
456         }
457
458         for (i = 0; i < kinfo->num_tqps; i++) {
459                 queue = handle->kinfo.tqp[i];
460                 tqp = container_of(queue, struct hclge_tqp, q);
461                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462                 hclge_cmd_setup_basic_desc(&desc[0],
463                                            HCLGE_OPC_QUERY_TX_STATUS,
464                                            true);
465
466                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
468                 if (ret) {
469                         dev_err(&hdev->pdev->dev,
470                                 "Query tqp stat fail, status = %d,queue = %d\n",
471                                 ret, i);
472                         return ret;
473                 }
474                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475                         le32_to_cpu(desc[0].data[1]);
476         }
477
478         return 0;
479 }
480
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
482 {
483         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484         struct hclge_tqp *tqp;
485         u64 *buff = data;
486         int i;
487
488         for (i = 0; i < kinfo->num_tqps; i++) {
489                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491         }
492
493         for (i = 0; i < kinfo->num_tqps; i++) {
494                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
496         }
497
498         return buff;
499 }
500
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
502 {
503         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504
505         return kinfo->num_tqps * (2);
506 }
507
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
509 {
510         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
511         u8 *buff = data;
512         int i = 0;
513
514         for (i = 0; i < kinfo->num_tqps; i++) {
515                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516                         struct hclge_tqp, q);
517                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
518                          tqp->index);
519                 buff = buff + ETH_GSTRING_LEN;
520         }
521
522         for (i = 0; i < kinfo->num_tqps; i++) {
523                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524                         struct hclge_tqp, q);
525                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
526                          tqp->index);
527                 buff = buff + ETH_GSTRING_LEN;
528         }
529
530         return buff;
531 }
532
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534                                  const struct hclge_comm_stats_str strs[],
535                                  int size, u64 *data)
536 {
537         u64 *buf = data;
538         u32 i;
539
540         for (i = 0; i < size; i++)
541                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
542
543         return buf + size;
544 }
545
546 static u8 *hclge_comm_get_strings(u32 stringset,
547                                   const struct hclge_comm_stats_str strs[],
548                                   int size, u8 *data)
549 {
550         char *buff = (char *)data;
551         u32 i;
552
553         if (stringset != ETH_SS_STATS)
554                 return buff;
555
556         for (i = 0; i < size; i++) {
557                 snprintf(buff, ETH_GSTRING_LEN,
558                          strs[i].desc);
559                 buff = buff + ETH_GSTRING_LEN;
560         }
561
562         return (u8 *)buff;
563 }
564
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
566 {
567         struct hnae3_handle *handle;
568         int status;
569
570         handle = &hdev->vport[0].nic;
571         if (handle->client) {
572                 status = hclge_tqps_update_stats(handle);
573                 if (status) {
574                         dev_err(&hdev->pdev->dev,
575                                 "Update TQPS stats fail, status = %d.\n",
576                                 status);
577                 }
578         }
579
580         status = hclge_mac_update_stats(hdev);
581         if (status)
582                 dev_err(&hdev->pdev->dev,
583                         "Update MAC stats fail, status = %d.\n", status);
584 }
585
586 static void hclge_update_stats(struct hnae3_handle *handle,
587                                struct net_device_stats *net_stats)
588 {
589         struct hclge_vport *vport = hclge_get_vport(handle);
590         struct hclge_dev *hdev = vport->back;
591         int status;
592
593         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594                 return;
595
596         status = hclge_mac_update_stats(hdev);
597         if (status)
598                 dev_err(&hdev->pdev->dev,
599                         "Update MAC stats fail, status = %d.\n",
600                         status);
601
602         status = hclge_tqps_update_stats(handle);
603         if (status)
604                 dev_err(&hdev->pdev->dev,
605                         "Update TQPS stats fail, status = %d.\n",
606                         status);
607
608         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 }
610
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614                 HNAE3_SUPPORT_PHY_LOOPBACK |\
615                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
617
618         struct hclge_vport *vport = hclge_get_vport(handle);
619         struct hclge_dev *hdev = vport->back;
620         int count = 0;
621
622         /* Loopback test support rules:
623          * mac: only GE mode support
624          * serdes: all mac mode will support include GE/XGE/LGE/CGE
625          * phy: only support when phy device exist on board
626          */
627         if (stringset == ETH_SS_TEST) {
628                 /* clear loopback bit flags at first */
629                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630                 if (hdev->pdev->revision >= 0x21 ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
634                         count += 1;
635                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
636                 }
637
638                 count += 2;
639                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641         } else if (stringset == ETH_SS_STATS) {
642                 count = ARRAY_SIZE(g_mac_stats_string) +
643                         hclge_tqps_get_sset_count(handle, stringset);
644         }
645
646         return count;
647 }
648
649 static void hclge_get_strings(struct hnae3_handle *handle,
650                               u32 stringset,
651                               u8 *data)
652 {
653         u8 *p = (char *)data;
654         int size;
655
656         if (stringset == ETH_SS_STATS) {
657                 size = ARRAY_SIZE(g_mac_stats_string);
658                 p = hclge_comm_get_strings(stringset,
659                                            g_mac_stats_string,
660                                            size,
661                                            p);
662                 p = hclge_tqps_get_strings(handle, p);
663         } else if (stringset == ETH_SS_TEST) {
664                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
665                         memcpy(p,
666                                hns3_nic_test_strs[HNAE3_LOOP_APP],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671                         memcpy(p,
672                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
677                         memcpy(p,
678                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
679                                ETH_GSTRING_LEN);
680                         p += ETH_GSTRING_LEN;
681                 }
682                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
683                         memcpy(p,
684                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
685                                ETH_GSTRING_LEN);
686                         p += ETH_GSTRING_LEN;
687                 }
688         }
689 }
690
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
692 {
693         struct hclge_vport *vport = hclge_get_vport(handle);
694         struct hclge_dev *hdev = vport->back;
695         u64 *p;
696
697         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
698                                  g_mac_stats_string,
699                                  ARRAY_SIZE(g_mac_stats_string),
700                                  data);
701         p = hclge_tqps_get_stats(handle, p);
702 }
703
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705                                      u64 *rx_cnt)
706 {
707         struct hclge_vport *vport = hclge_get_vport(handle);
708         struct hclge_dev *hdev = vport->back;
709
710         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 }
713
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715                                    struct hclge_func_status_cmd *status)
716 {
717         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718                 return -EINVAL;
719
720         /* Set the pf to main pf */
721         if (status->pf_state & HCLGE_PF_STATE_MAIN)
722                 hdev->flag |= HCLGE_FLAG_MAIN;
723         else
724                 hdev->flag &= ~HCLGE_FLAG_MAIN;
725
726         return 0;
727 }
728
729 static int hclge_query_function_status(struct hclge_dev *hdev)
730 {
731         struct hclge_func_status_cmd *req;
732         struct hclge_desc desc;
733         int timeout = 0;
734         int ret;
735
736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737         req = (struct hclge_func_status_cmd *)desc.data;
738
739         do {
740                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741                 if (ret) {
742                         dev_err(&hdev->pdev->dev,
743                                 "query function status failed %d.\n",
744                                 ret);
745
746                         return ret;
747                 }
748
749                 /* Check pf reset is done */
750                 if (req->pf_state)
751                         break;
752                 usleep_range(1000, 2000);
753         } while (timeout++ < 5);
754
755         ret = hclge_parse_func_status(hdev, req);
756
757         return ret;
758 }
759
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
761 {
762         struct hclge_pf_res_cmd *req;
763         struct hclge_desc desc;
764         int ret;
765
766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768         if (ret) {
769                 dev_err(&hdev->pdev->dev,
770                         "query pf resource failed %d.\n", ret);
771                 return ret;
772         }
773
774         req = (struct hclge_pf_res_cmd *)desc.data;
775         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
777
778         if (req->tx_buf_size)
779                 hdev->tx_buf_size =
780                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
781         else
782                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
783
784         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
785
786         if (req->dv_buf_size)
787                 hdev->dv_buf_size =
788                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
789         else
790                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
791
792         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
793
794         if (hnae3_dev_roce_supported(hdev)) {
795                 hdev->roce_base_msix_offset =
796                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
798                 hdev->num_roce_msi =
799                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
801
802                 /* PF should have NIC vectors and Roce vectors,
803                  * NIC vectors are queued before Roce vectors.
804                  */
805                 hdev->num_msi = hdev->num_roce_msi  +
806                                 hdev->roce_base_msix_offset;
807         } else {
808                 hdev->num_msi =
809                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
811         }
812
813         return 0;
814 }
815
816 static int hclge_parse_speed(int speed_cmd, int *speed)
817 {
818         switch (speed_cmd) {
819         case 6:
820                 *speed = HCLGE_MAC_SPEED_10M;
821                 break;
822         case 7:
823                 *speed = HCLGE_MAC_SPEED_100M;
824                 break;
825         case 0:
826                 *speed = HCLGE_MAC_SPEED_1G;
827                 break;
828         case 1:
829                 *speed = HCLGE_MAC_SPEED_10G;
830                 break;
831         case 2:
832                 *speed = HCLGE_MAC_SPEED_25G;
833                 break;
834         case 3:
835                 *speed = HCLGE_MAC_SPEED_40G;
836                 break;
837         case 4:
838                 *speed = HCLGE_MAC_SPEED_50G;
839                 break;
840         case 5:
841                 *speed = HCLGE_MAC_SPEED_100G;
842                 break;
843         default:
844                 return -EINVAL;
845         }
846
847         return 0;
848 }
849
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
851 {
852         struct hclge_vport *vport = hclge_get_vport(handle);
853         struct hclge_dev *hdev = vport->back;
854         u32 speed_ability = hdev->hw.mac.speed_ability;
855         u32 speed_bit = 0;
856
857         switch (speed) {
858         case HCLGE_MAC_SPEED_10M:
859                 speed_bit = HCLGE_SUPPORT_10M_BIT;
860                 break;
861         case HCLGE_MAC_SPEED_100M:
862                 speed_bit = HCLGE_SUPPORT_100M_BIT;
863                 break;
864         case HCLGE_MAC_SPEED_1G:
865                 speed_bit = HCLGE_SUPPORT_1G_BIT;
866                 break;
867         case HCLGE_MAC_SPEED_10G:
868                 speed_bit = HCLGE_SUPPORT_10G_BIT;
869                 break;
870         case HCLGE_MAC_SPEED_25G:
871                 speed_bit = HCLGE_SUPPORT_25G_BIT;
872                 break;
873         case HCLGE_MAC_SPEED_40G:
874                 speed_bit = HCLGE_SUPPORT_40G_BIT;
875                 break;
876         case HCLGE_MAC_SPEED_50G:
877                 speed_bit = HCLGE_SUPPORT_50G_BIT;
878                 break;
879         case HCLGE_MAC_SPEED_100G:
880                 speed_bit = HCLGE_SUPPORT_100G_BIT;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885
886         if (speed_bit & speed_ability)
887                 return 0;
888
889         return -EINVAL;
890 }
891
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
893 {
894         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
896                                  mac->supported);
897         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
899                                  mac->supported);
900         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
902                                  mac->supported);
903         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
905                                  mac->supported);
906         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
908                                  mac->supported);
909 }
910
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
912 {
913         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
915                                  mac->supported);
916         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
918                                  mac->supported);
919         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
924                                  mac->supported);
925         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
927                                  mac->supported);
928 }
929
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
931 {
932         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
934                                  mac->supported);
935         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
937                                  mac->supported);
938         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
940                                  mac->supported);
941         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
943                                  mac->supported);
944         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
946                                  mac->supported);
947 }
948
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
950 {
951         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
953                                  mac->supported);
954         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
956                                  mac->supported);
957         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
962                                  mac->supported);
963         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
965                                  mac->supported);
966         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
968                                  mac->supported);
969 }
970
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
972 {
973         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
975
976         switch (mac->speed) {
977         case HCLGE_MAC_SPEED_10G:
978         case HCLGE_MAC_SPEED_40G:
979                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980                                  mac->supported);
981                 mac->fec_ability =
982                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
983                 break;
984         case HCLGE_MAC_SPEED_25G:
985         case HCLGE_MAC_SPEED_50G:
986                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987                                  mac->supported);
988                 mac->fec_ability =
989                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990                         BIT(HNAE3_FEC_AUTO);
991                 break;
992         case HCLGE_MAC_SPEED_100G:
993                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995                 break;
996         default:
997                 mac->fec_ability = 0;
998                 break;
999         }
1000 }
1001
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003                                         u8 speed_ability)
1004 {
1005         struct hclge_mac *mac = &hdev->hw.mac;
1006
1007         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009                                  mac->supported);
1010
1011         hclge_convert_setting_sr(mac, speed_ability);
1012         hclge_convert_setting_lr(mac, speed_ability);
1013         hclge_convert_setting_cr(mac, speed_ability);
1014         if (hdev->pdev->revision >= 0x21)
1015                 hclge_convert_setting_fec(mac);
1016
1017         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 }
1021
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023                                             u8 speed_ability)
1024 {
1025         struct hclge_mac *mac = &hdev->hw.mac;
1026
1027         hclge_convert_setting_kr(mac, speed_ability);
1028         if (hdev->pdev->revision >= 0x21)
1029                 hclge_convert_setting_fec(mac);
1030         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 }
1034
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036                                          u8 speed_ability)
1037 {
1038         unsigned long *supported = hdev->hw.mac.supported;
1039
1040         /* default to support all speed for GE port */
1041         if (!speed_ability)
1042                 speed_ability = HCLGE_SUPPORT_GE;
1043
1044         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046                                  supported);
1047
1048         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1050                                  supported);
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1052                                  supported);
1053         }
1054
1055         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058         }
1059
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 }
1064
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1066 {
1067         u8 media_type = hdev->hw.mac.media_type;
1068
1069         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072                 hclge_parse_copper_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1075 }
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1077 {
1078         struct hclge_cfg_param_cmd *req;
1079         u64 mac_addr_tmp_high;
1080         u64 mac_addr_tmp;
1081         int i;
1082
1083         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1084
1085         /* get the configuration */
1086         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087                                               HCLGE_CFG_VMDQ_M,
1088                                               HCLGE_CFG_VMDQ_S);
1089         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                             HCLGE_CFG_TQP_DESC_N_M,
1093                                             HCLGE_CFG_TQP_DESC_N_S);
1094
1095         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096                                         HCLGE_CFG_PHY_ADDR_M,
1097                                         HCLGE_CFG_PHY_ADDR_S);
1098         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099                                           HCLGE_CFG_MEDIA_TP_M,
1100                                           HCLGE_CFG_MEDIA_TP_S);
1101         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102                                           HCLGE_CFG_RX_BUF_LEN_M,
1103                                           HCLGE_CFG_RX_BUF_LEN_S);
1104         /* get mac_address */
1105         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107                                             HCLGE_CFG_MAC_ADDR_H_M,
1108                                             HCLGE_CFG_MAC_ADDR_H_S);
1109
1110         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1111
1112         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113                                              HCLGE_CFG_DEFAULT_SPEED_M,
1114                                              HCLGE_CFG_DEFAULT_SPEED_S);
1115         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116                                             HCLGE_CFG_RSS_SIZE_M,
1117                                             HCLGE_CFG_RSS_SIZE_S);
1118
1119         for (i = 0; i < ETH_ALEN; i++)
1120                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1121
1122         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1124
1125         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126                                              HCLGE_CFG_SPEED_ABILITY_M,
1127                                              HCLGE_CFG_SPEED_ABILITY_S);
1128         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1130                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1131         if (!cfg->umv_space)
1132                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 }
1134
1135 /* hclge_get_cfg: query the static parameter from flash
1136  * @hdev: pointer to struct hclge_dev
1137  * @hcfg: the config structure to be getted
1138  */
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1140 {
1141         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142         struct hclge_cfg_param_cmd *req;
1143         int i, ret;
1144
1145         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1146                 u32 offset = 0;
1147
1148                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1150                                            true);
1151                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153                 /* Len should be united by 4 bytes when send to hardware */
1154                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156                 req->offset = cpu_to_le32(offset);
1157         }
1158
1159         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1160         if (ret) {
1161                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1162                 return ret;
1163         }
1164
1165         hclge_parse_cfg(hcfg, desc);
1166
1167         return 0;
1168 }
1169
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1171 {
1172         int ret;
1173
1174         ret = hclge_query_function_status(hdev);
1175         if (ret) {
1176                 dev_err(&hdev->pdev->dev,
1177                         "query function status error %d.\n", ret);
1178                 return ret;
1179         }
1180
1181         /* get pf resource */
1182         ret = hclge_query_pf_resource(hdev);
1183         if (ret)
1184                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1185
1186         return ret;
1187 }
1188
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1190 {
1191 #define HCLGE_MIN_TX_DESC       64
1192 #define HCLGE_MIN_RX_DESC       64
1193
1194         if (!is_kdump_kernel())
1195                 return;
1196
1197         dev_info(&hdev->pdev->dev,
1198                  "Running kdump kernel. Using minimal resources\n");
1199
1200         /* minimal queue pairs equals to the number of vports */
1201         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1204 }
1205
1206 static int hclge_configure(struct hclge_dev *hdev)
1207 {
1208         struct hclge_cfg cfg;
1209         int ret, i;
1210
1211         ret = hclge_get_cfg(hdev, &cfg);
1212         if (ret) {
1213                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1214                 return ret;
1215         }
1216
1217         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218         hdev->base_tqp_pid = 0;
1219         hdev->rss_size_max = cfg.rss_size_max;
1220         hdev->rx_buf_len = cfg.rx_buf_len;
1221         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222         hdev->hw.mac.media_type = cfg.media_type;
1223         hdev->hw.mac.phy_addr = cfg.phy_addr;
1224         hdev->num_tx_desc = cfg.tqp_desc_num;
1225         hdev->num_rx_desc = cfg.tqp_desc_num;
1226         hdev->tm_info.num_pg = 1;
1227         hdev->tc_max = cfg.tc_num;
1228         hdev->tm_info.hw_pfc_map = 0;
1229         hdev->wanted_umv_size = cfg.umv_space;
1230
1231         if (hnae3_dev_fd_supported(hdev)) {
1232                 hdev->fd_en = true;
1233                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1234         }
1235
1236         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1237         if (ret) {
1238                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1239                 return ret;
1240         }
1241
1242         hclge_parse_link_mode(hdev, cfg.speed_ability);
1243
1244         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245             (hdev->tc_max < 1)) {
1246                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247                          hdev->tc_max);
1248                 hdev->tc_max = 1;
1249         }
1250
1251         /* Dev does not support DCB */
1252         if (!hnae3_dev_dcb_supported(hdev)) {
1253                 hdev->tc_max = 1;
1254                 hdev->pfc_max = 0;
1255         } else {
1256                 hdev->pfc_max = hdev->tc_max;
1257         }
1258
1259         hdev->tm_info.num_tc = 1;
1260
1261         /* Currently not support uncontiuous tc */
1262         for (i = 0; i < hdev->tm_info.num_tc; i++)
1263                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1264
1265         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1266
1267         hclge_init_kdump_kernel_config(hdev);
1268
1269         return ret;
1270 }
1271
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1273                             int tso_mss_max)
1274 {
1275         struct hclge_cfg_tso_status_cmd *req;
1276         struct hclge_desc desc;
1277         u16 tso_mss;
1278
1279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1280
1281         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1282
1283         tso_mss = 0;
1284         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286         req->tso_mss_min = cpu_to_le16(tso_mss);
1287
1288         tso_mss = 0;
1289         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291         req->tso_mss_max = cpu_to_le16(tso_mss);
1292
1293         return hclge_cmd_send(&hdev->hw, &desc, 1);
1294 }
1295
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1297 {
1298         struct hclge_cfg_gro_status_cmd *req;
1299         struct hclge_desc desc;
1300         int ret;
1301
1302         if (!hnae3_dev_gro_supported(hdev))
1303                 return 0;
1304
1305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1307
1308         req->gro_en = cpu_to_le16(en ? 1 : 0);
1309
1310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1311         if (ret)
1312                 dev_err(&hdev->pdev->dev,
1313                         "GRO hardware config cmd failed, ret = %d\n", ret);
1314
1315         return ret;
1316 }
1317
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1319 {
1320         struct hclge_tqp *tqp;
1321         int i;
1322
1323         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1325         if (!hdev->htqp)
1326                 return -ENOMEM;
1327
1328         tqp = hdev->htqp;
1329
1330         for (i = 0; i < hdev->num_tqps; i++) {
1331                 tqp->dev = &hdev->pdev->dev;
1332                 tqp->index = i;
1333
1334                 tqp->q.ae_algo = &ae_algo;
1335                 tqp->q.buf_size = hdev->rx_buf_len;
1336                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339                         i * HCLGE_TQP_REG_SIZE;
1340
1341                 tqp++;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1349 {
1350         struct hclge_tqp_map_cmd *req;
1351         struct hclge_desc desc;
1352         int ret;
1353
1354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1355
1356         req = (struct hclge_tqp_map_cmd *)desc.data;
1357         req->tqp_id = cpu_to_le16(tqp_pid);
1358         req->tqp_vf = func_id;
1359         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360                         1 << HCLGE_TQP_MAP_EN_B;
1361         req->tqp_vid = cpu_to_le16(tqp_vid);
1362
1363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1364         if (ret)
1365                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366
1367         return ret;
1368 }
1369
1370 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1371 {
1372         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373         struct hclge_dev *hdev = vport->back;
1374         int i, alloced;
1375
1376         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377              alloced < num_tqps; i++) {
1378                 if (!hdev->htqp[i].alloced) {
1379                         hdev->htqp[i].q.handle = &vport->nic;
1380                         hdev->htqp[i].q.tqp_index = alloced;
1381                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384                         hdev->htqp[i].alloced = true;
1385                         alloced++;
1386                 }
1387         }
1388         vport->alloc_tqps = alloced;
1389         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1391
1392         return 0;
1393 }
1394
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396                             u16 num_tx_desc, u16 num_rx_desc)
1397
1398 {
1399         struct hnae3_handle *nic = &vport->nic;
1400         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401         struct hclge_dev *hdev = vport->back;
1402         int ret;
1403
1404         kinfo->num_tx_desc = num_tx_desc;
1405         kinfo->num_rx_desc = num_rx_desc;
1406
1407         kinfo->rx_buf_len = hdev->rx_buf_len;
1408
1409         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1411         if (!kinfo->tqp)
1412                 return -ENOMEM;
1413
1414         ret = hclge_assign_tqp(vport, num_tqps);
1415         if (ret)
1416                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417
1418         return ret;
1419 }
1420
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422                                   struct hclge_vport *vport)
1423 {
1424         struct hnae3_handle *nic = &vport->nic;
1425         struct hnae3_knic_private_info *kinfo;
1426         u16 i;
1427
1428         kinfo = &nic->kinfo;
1429         for (i = 0; i < vport->alloc_tqps; i++) {
1430                 struct hclge_tqp *q =
1431                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1432                 bool is_pf;
1433                 int ret;
1434
1435                 is_pf = !(vport->vport_id);
1436                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1437                                              i, is_pf);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1446 {
1447         struct hclge_vport *vport = hdev->vport;
1448         u16 i, num_vport;
1449
1450         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451         for (i = 0; i < num_vport; i++) {
1452                 int ret;
1453
1454                 ret = hclge_map_tqp_to_vport(hdev, vport);
1455                 if (ret)
1456                         return ret;
1457
1458                 vport++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1465 {
1466         /* this would be initialized later */
1467 }
1468
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1470 {
1471         struct hnae3_handle *nic = &vport->nic;
1472         struct hclge_dev *hdev = vport->back;
1473         int ret;
1474
1475         nic->pdev = hdev->pdev;
1476         nic->ae_algo = &ae_algo;
1477         nic->numa_node_mask = hdev->numa_node_mask;
1478
1479         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480                 ret = hclge_knic_setup(vport, num_tqps,
1481                                        hdev->num_tx_desc, hdev->num_rx_desc);
1482
1483                 if (ret) {
1484                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1485                                 ret);
1486                         return ret;
1487                 }
1488         } else {
1489                 hclge_unic_setup(vport, num_tqps);
1490         }
1491
1492         return 0;
1493 }
1494
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1496 {
1497         struct pci_dev *pdev = hdev->pdev;
1498         struct hclge_vport *vport;
1499         u32 tqp_main_vport;
1500         u32 tqp_per_vport;
1501         int num_vport, i;
1502         int ret;
1503
1504         /* We need to alloc a vport for main NIC of PF */
1505         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1506
1507         if (hdev->num_tqps < num_vport) {
1508                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509                         hdev->num_tqps, num_vport);
1510                 return -EINVAL;
1511         }
1512
1513         /* Alloc the same number of TQPs for every vport */
1514         tqp_per_vport = hdev->num_tqps / num_vport;
1515         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1516
1517         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1518                              GFP_KERNEL);
1519         if (!vport)
1520                 return -ENOMEM;
1521
1522         hdev->vport = vport;
1523         hdev->num_alloc_vport = num_vport;
1524
1525         if (IS_ENABLED(CONFIG_PCI_IOV))
1526                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1527
1528         for (i = 0; i < num_vport; i++) {
1529                 vport->back = hdev;
1530                 vport->vport_id = i;
1531                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534                 INIT_LIST_HEAD(&vport->vlan_list);
1535                 INIT_LIST_HEAD(&vport->uc_mac_list);
1536                 INIT_LIST_HEAD(&vport->mc_mac_list);
1537
1538                 if (i == 0)
1539                         ret = hclge_vport_setup(vport, tqp_main_vport);
1540                 else
1541                         ret = hclge_vport_setup(vport, tqp_per_vport);
1542                 if (ret) {
1543                         dev_err(&pdev->dev,
1544                                 "vport setup failed for vport %d, %d\n",
1545                                 i, ret);
1546                         return ret;
1547                 }
1548
1549                 vport++;
1550         }
1551
1552         return 0;
1553 }
1554
1555 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556                                     struct hclge_pkt_buf_alloc *buf_alloc)
1557 {
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1561         struct hclge_tx_buff_alloc_cmd *req;
1562         struct hclge_desc desc;
1563         int ret;
1564         u8 i;
1565
1566         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1567
1568         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1571
1572                 req->tx_pkt_buff[i] =
1573                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1575         }
1576
1577         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1578         if (ret)
1579                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1580                         ret);
1581
1582         return ret;
1583 }
1584
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586                                  struct hclge_pkt_buf_alloc *buf_alloc)
1587 {
1588         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1589
1590         if (ret)
1591                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1592
1593         return ret;
1594 }
1595
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1597 {
1598         int i, cnt = 0;
1599
1600         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601                 if (hdev->hw_tc_map & BIT(i))
1602                         cnt++;
1603         return cnt;
1604 }
1605
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608                                   struct hclge_pkt_buf_alloc *buf_alloc)
1609 {
1610         struct hclge_priv_buf *priv;
1611         int i, cnt = 0;
1612
1613         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614                 priv = &buf_alloc->priv_buf[i];
1615                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616                     priv->enable)
1617                         cnt++;
1618         }
1619
1620         return cnt;
1621 }
1622
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625                                      struct hclge_pkt_buf_alloc *buf_alloc)
1626 {
1627         struct hclge_priv_buf *priv;
1628         int i, cnt = 0;
1629
1630         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631                 priv = &buf_alloc->priv_buf[i];
1632                 if (hdev->hw_tc_map & BIT(i) &&
1633                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1634                     priv->enable)
1635                         cnt++;
1636         }
1637
1638         return cnt;
1639 }
1640
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1642 {
1643         struct hclge_priv_buf *priv;
1644         u32 rx_priv = 0;
1645         int i;
1646
1647         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648                 priv = &buf_alloc->priv_buf[i];
1649                 if (priv->enable)
1650                         rx_priv += priv->buf_size;
1651         }
1652         return rx_priv;
1653 }
1654
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1656 {
1657         u32 i, total_tx_size = 0;
1658
1659         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1661
1662         return total_tx_size;
1663 }
1664
1665 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666                                 struct hclge_pkt_buf_alloc *buf_alloc,
1667                                 u32 rx_all)
1668 {
1669         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670         u32 tc_num = hclge_get_tc_num(hdev);
1671         u32 shared_buf, aligned_mps;
1672         u32 rx_priv;
1673         int i;
1674
1675         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1676
1677         if (hnae3_dev_dcb_supported(hdev))
1678                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1679         else
1680                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681                                         + hdev->dv_buf_size;
1682
1683         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685                              HCLGE_BUF_SIZE_UNIT);
1686
1687         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688         if (rx_all < rx_priv + shared_std)
1689                 return false;
1690
1691         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692         buf_alloc->s_buf.buf_size = shared_buf;
1693         if (hnae3_dev_dcb_supported(hdev)) {
1694                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1697         } else {
1698                 buf_alloc->s_buf.self.high = aligned_mps +
1699                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700                 buf_alloc->s_buf.self.low = aligned_mps;
1701         }
1702
1703         if (hnae3_dev_dcb_supported(hdev)) {
1704                 if (tc_num)
1705                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1706                 else
1707                         hi_thrd = shared_buf - hdev->dv_buf_size;
1708
1709                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711                 lo_thrd = hi_thrd - aligned_mps / 2;
1712         } else {
1713                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714                 lo_thrd = aligned_mps;
1715         }
1716
1717         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720         }
1721
1722         return true;
1723 }
1724
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726                                 struct hclge_pkt_buf_alloc *buf_alloc)
1727 {
1728         u32 i, total_size;
1729
1730         total_size = hdev->pkt_buf_size;
1731
1732         /* alloc tx buffer for all enabled tc */
1733         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1735
1736                 if (hdev->hw_tc_map & BIT(i)) {
1737                         if (total_size < hdev->tx_buf_size)
1738                                 return -ENOMEM;
1739
1740                         priv->tx_buf_size = hdev->tx_buf_size;
1741                 } else {
1742                         priv->tx_buf_size = 0;
1743                 }
1744
1745                 total_size -= priv->tx_buf_size;
1746         }
1747
1748         return 0;
1749 }
1750
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752                                   struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1756         int i;
1757
1758         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1760
1761                 priv->enable = 0;
1762                 priv->wl.low = 0;
1763                 priv->wl.high = 0;
1764                 priv->buf_size = 0;
1765
1766                 if (!(hdev->hw_tc_map & BIT(i)))
1767                         continue;
1768
1769                 priv->enable = 1;
1770
1771                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772                         priv->wl.low = max ? aligned_mps : 256;
1773                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774                                                 HCLGE_BUF_SIZE_UNIT);
1775                 } else {
1776                         priv->wl.low = 0;
1777                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1778                 }
1779
1780                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1781         }
1782
1783         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1784 }
1785
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787                                           struct hclge_pkt_buf_alloc *buf_alloc)
1788 {
1789         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1791         int i;
1792
1793         /* let the last to be cleared first */
1794         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1796
1797                 if (hdev->hw_tc_map & BIT(i) &&
1798                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799                         /* Clear the no pfc TC private buffer */
1800                         priv->wl.low = 0;
1801                         priv->wl.high = 0;
1802                         priv->buf_size = 0;
1803                         priv->enable = 0;
1804                         no_pfc_priv_num--;
1805                 }
1806
1807                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808                     no_pfc_priv_num == 0)
1809                         break;
1810         }
1811
1812         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1813 }
1814
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816                                         struct hclge_pkt_buf_alloc *buf_alloc)
1817 {
1818         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1820         int i;
1821
1822         /* let the last to be cleared first */
1823         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1825
1826                 if (hdev->hw_tc_map & BIT(i) &&
1827                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1828                         /* Reduce the number of pfc TC with private buffer */
1829                         priv->wl.low = 0;
1830                         priv->enable = 0;
1831                         priv->wl.high = 0;
1832                         priv->buf_size = 0;
1833                         pfc_priv_num--;
1834                 }
1835
1836                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1837                     pfc_priv_num == 0)
1838                         break;
1839         }
1840
1841         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1842 }
1843
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845  * @hdev: pointer to struct hclge_dev
1846  * @buf_alloc: pointer to buffer calculation data
1847  * @return: 0: calculate sucessful, negative: fail
1848  */
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850                                 struct hclge_pkt_buf_alloc *buf_alloc)
1851 {
1852         /* When DCB is not supported, rx private buffer is not allocated. */
1853         if (!hnae3_dev_dcb_supported(hdev)) {
1854                 u32 rx_all = hdev->pkt_buf_size;
1855
1856                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1858                         return -ENOMEM;
1859
1860                 return 0;
1861         }
1862
1863         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1864                 return 0;
1865
1866         /* try to decrease the buffer size */
1867         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1868                 return 0;
1869
1870         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1871                 return 0;
1872
1873         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1874                 return 0;
1875
1876         return -ENOMEM;
1877 }
1878
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880                                    struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882         struct hclge_rx_priv_buff_cmd *req;
1883         struct hclge_desc desc;
1884         int ret;
1885         int i;
1886
1887         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1889
1890         /* Alloc private buffer TCs */
1891         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1893
1894                 req->buf_num[i] =
1895                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1896                 req->buf_num[i] |=
1897                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1898         }
1899
1900         req->shared_buf =
1901                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1903
1904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1905         if (ret)
1906                 dev_err(&hdev->pdev->dev,
1907                         "rx private buffer alloc cmd failed %d\n", ret);
1908
1909         return ret;
1910 }
1911
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913                                    struct hclge_pkt_buf_alloc *buf_alloc)
1914 {
1915         struct hclge_rx_priv_wl_buf *req;
1916         struct hclge_priv_buf *priv;
1917         struct hclge_desc desc[2];
1918         int i, j;
1919         int ret;
1920
1921         for (i = 0; i < 2; i++) {
1922                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1923                                            false);
1924                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1925
1926                 /* The first descriptor set the NEXT bit to 1 */
1927                 if (i == 0)
1928                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1929                 else
1930                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1931
1932                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1934
1935                         priv = &buf_alloc->priv_buf[idx];
1936                         req->tc_wl[j].high =
1937                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938                         req->tc_wl[j].high |=
1939                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1940                         req->tc_wl[j].low =
1941                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942                         req->tc_wl[j].low |=
1943                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1944                 }
1945         }
1946
1947         /* Send 2 descriptor at one time */
1948         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1949         if (ret)
1950                 dev_err(&hdev->pdev->dev,
1951                         "rx private waterline config cmd failed %d\n",
1952                         ret);
1953         return ret;
1954 }
1955
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957                                     struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960         struct hclge_rx_com_thrd *req;
1961         struct hclge_desc desc[2];
1962         struct hclge_tc_thrd *tc;
1963         int i, j;
1964         int ret;
1965
1966         for (i = 0; i < 2; i++) {
1967                 hclge_cmd_setup_basic_desc(&desc[i],
1968                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1970
1971                 /* The first descriptor set the NEXT bit to 1 */
1972                 if (i == 0)
1973                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1974                 else
1975                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1976
1977                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1979
1980                         req->com_thrd[j].high =
1981                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982                         req->com_thrd[j].high |=
1983                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984                         req->com_thrd[j].low =
1985                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986                         req->com_thrd[j].low |=
1987                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1988                 }
1989         }
1990
1991         /* Send 2 descriptors at one time */
1992         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1993         if (ret)
1994                 dev_err(&hdev->pdev->dev,
1995                         "common threshold config cmd failed %d\n", ret);
1996         return ret;
1997 }
1998
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000                                   struct hclge_pkt_buf_alloc *buf_alloc)
2001 {
2002         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003         struct hclge_rx_com_wl *req;
2004         struct hclge_desc desc;
2005         int ret;
2006
2007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2008
2009         req = (struct hclge_rx_com_wl *)desc.data;
2010         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2012
2013         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2015
2016         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2017         if (ret)
2018                 dev_err(&hdev->pdev->dev,
2019                         "common waterline config cmd failed %d\n", ret);
2020
2021         return ret;
2022 }
2023
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2025 {
2026         struct hclge_pkt_buf_alloc *pkt_buf;
2027         int ret;
2028
2029         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2030         if (!pkt_buf)
2031                 return -ENOMEM;
2032
2033         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2034         if (ret) {
2035                 dev_err(&hdev->pdev->dev,
2036                         "could not calc tx buffer size for all TCs %d\n", ret);
2037                 goto out;
2038         }
2039
2040         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2041         if (ret) {
2042                 dev_err(&hdev->pdev->dev,
2043                         "could not alloc tx buffers %d\n", ret);
2044                 goto out;
2045         }
2046
2047         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2048         if (ret) {
2049                 dev_err(&hdev->pdev->dev,
2050                         "could not calc rx priv buffer size for all TCs %d\n",
2051                         ret);
2052                 goto out;
2053         }
2054
2055         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2056         if (ret) {
2057                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2058                         ret);
2059                 goto out;
2060         }
2061
2062         if (hnae3_dev_dcb_supported(hdev)) {
2063                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2064                 if (ret) {
2065                         dev_err(&hdev->pdev->dev,
2066                                 "could not configure rx private waterline %d\n",
2067                                 ret);
2068                         goto out;
2069                 }
2070
2071                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2072                 if (ret) {
2073                         dev_err(&hdev->pdev->dev,
2074                                 "could not configure common threshold %d\n",
2075                                 ret);
2076                         goto out;
2077                 }
2078         }
2079
2080         ret = hclge_common_wl_config(hdev, pkt_buf);
2081         if (ret)
2082                 dev_err(&hdev->pdev->dev,
2083                         "could not configure common waterline %d\n", ret);
2084
2085 out:
2086         kfree(pkt_buf);
2087         return ret;
2088 }
2089
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2091 {
2092         struct hnae3_handle *roce = &vport->roce;
2093         struct hnae3_handle *nic = &vport->nic;
2094
2095         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2096
2097         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098             vport->back->num_msi_left == 0)
2099                 return -EINVAL;
2100
2101         roce->rinfo.base_vector = vport->back->roce_base_vector;
2102
2103         roce->rinfo.netdev = nic->kinfo.netdev;
2104         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2105
2106         roce->pdev = nic->pdev;
2107         roce->ae_algo = nic->ae_algo;
2108         roce->numa_node_mask = nic->numa_node_mask;
2109
2110         return 0;
2111 }
2112
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2114 {
2115         struct pci_dev *pdev = hdev->pdev;
2116         int vectors;
2117         int i;
2118
2119         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2121         if (vectors < 0) {
2122                 dev_err(&pdev->dev,
2123                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2124                         vectors);
2125                 return vectors;
2126         }
2127         if (vectors < hdev->num_msi)
2128                 dev_warn(&hdev->pdev->dev,
2129                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130                          hdev->num_msi, vectors);
2131
2132         hdev->num_msi = vectors;
2133         hdev->num_msi_left = vectors;
2134         hdev->base_msi_vector = pdev->irq;
2135         hdev->roce_base_vector = hdev->base_msi_vector +
2136                                 hdev->roce_base_msix_offset;
2137
2138         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139                                            sizeof(u16), GFP_KERNEL);
2140         if (!hdev->vector_status) {
2141                 pci_free_irq_vectors(pdev);
2142                 return -ENOMEM;
2143         }
2144
2145         for (i = 0; i < hdev->num_msi; i++)
2146                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2147
2148         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149                                         sizeof(int), GFP_KERNEL);
2150         if (!hdev->vector_irq) {
2151                 pci_free_irq_vectors(pdev);
2152                 return -ENOMEM;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2159 {
2160
2161         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162                 duplex = HCLGE_MAC_FULL;
2163
2164         return duplex;
2165 }
2166
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2168                                       u8 duplex)
2169 {
2170         struct hclge_config_mac_speed_dup_cmd *req;
2171         struct hclge_desc desc;
2172         int ret;
2173
2174         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2175
2176         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2177
2178         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2179
2180         switch (speed) {
2181         case HCLGE_MAC_SPEED_10M:
2182                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183                                 HCLGE_CFG_SPEED_S, 6);
2184                 break;
2185         case HCLGE_MAC_SPEED_100M:
2186                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187                                 HCLGE_CFG_SPEED_S, 7);
2188                 break;
2189         case HCLGE_MAC_SPEED_1G:
2190                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191                                 HCLGE_CFG_SPEED_S, 0);
2192                 break;
2193         case HCLGE_MAC_SPEED_10G:
2194                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195                                 HCLGE_CFG_SPEED_S, 1);
2196                 break;
2197         case HCLGE_MAC_SPEED_25G:
2198                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199                                 HCLGE_CFG_SPEED_S, 2);
2200                 break;
2201         case HCLGE_MAC_SPEED_40G:
2202                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203                                 HCLGE_CFG_SPEED_S, 3);
2204                 break;
2205         case HCLGE_MAC_SPEED_50G:
2206                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207                                 HCLGE_CFG_SPEED_S, 4);
2208                 break;
2209         case HCLGE_MAC_SPEED_100G:
2210                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211                                 HCLGE_CFG_SPEED_S, 5);
2212                 break;
2213         default:
2214                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2215                 return -EINVAL;
2216         }
2217
2218         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2219                       1);
2220
2221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222         if (ret) {
2223                 dev_err(&hdev->pdev->dev,
2224                         "mac speed/duplex config cmd failed %d.\n", ret);
2225                 return ret;
2226         }
2227
2228         return 0;
2229 }
2230
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2232 {
2233         int ret;
2234
2235         duplex = hclge_check_speed_dup(duplex, speed);
2236         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2237                 return 0;
2238
2239         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2240         if (ret)
2241                 return ret;
2242
2243         hdev->hw.mac.speed = speed;
2244         hdev->hw.mac.duplex = duplex;
2245
2246         return 0;
2247 }
2248
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2250                                      u8 duplex)
2251 {
2252         struct hclge_vport *vport = hclge_get_vport(handle);
2253         struct hclge_dev *hdev = vport->back;
2254
2255         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2256 }
2257
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2259 {
2260         struct hclge_config_auto_neg_cmd *req;
2261         struct hclge_desc desc;
2262         u32 flag = 0;
2263         int ret;
2264
2265         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2266
2267         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2270
2271         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2272         if (ret)
2273                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2274                         ret);
2275
2276         return ret;
2277 }
2278
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2280 {
2281         struct hclge_vport *vport = hclge_get_vport(handle);
2282         struct hclge_dev *hdev = vport->back;
2283
2284         if (!hdev->hw.mac.support_autoneg) {
2285                 if (enable) {
2286                         dev_err(&hdev->pdev->dev,
2287                                 "autoneg is not supported by current port\n");
2288                         return -EOPNOTSUPP;
2289                 } else {
2290                         return 0;
2291                 }
2292         }
2293
2294         return hclge_set_autoneg_en(hdev, enable);
2295 }
2296
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2298 {
2299         struct hclge_vport *vport = hclge_get_vport(handle);
2300         struct hclge_dev *hdev = vport->back;
2301         struct phy_device *phydev = hdev->hw.mac.phydev;
2302
2303         if (phydev)
2304                 return phydev->autoneg;
2305
2306         return hdev->hw.mac.autoneg;
2307 }
2308
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2310 {
2311         struct hclge_vport *vport = hclge_get_vport(handle);
2312         struct hclge_dev *hdev = vport->back;
2313         int ret;
2314
2315         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2316
2317         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2318         if (ret)
2319                 return ret;
2320         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2321 }
2322
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2324 {
2325         struct hclge_config_fec_cmd *req;
2326         struct hclge_desc desc;
2327         int ret;
2328
2329         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2330
2331         req = (struct hclge_config_fec_cmd *)desc.data;
2332         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334         if (fec_mode & BIT(HNAE3_FEC_RS))
2335                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337         if (fec_mode & BIT(HNAE3_FEC_BASER))
2338                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2340
2341         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2342         if (ret)
2343                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2344
2345         return ret;
2346 }
2347
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2349 {
2350         struct hclge_vport *vport = hclge_get_vport(handle);
2351         struct hclge_dev *hdev = vport->back;
2352         struct hclge_mac *mac = &hdev->hw.mac;
2353         int ret;
2354
2355         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2357                 return -EINVAL;
2358         }
2359
2360         ret = hclge_set_fec_hw(hdev, fec_mode);
2361         if (ret)
2362                 return ret;
2363
2364         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2365         return 0;
2366 }
2367
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2369                           u8 *fec_mode)
2370 {
2371         struct hclge_vport *vport = hclge_get_vport(handle);
2372         struct hclge_dev *hdev = vport->back;
2373         struct hclge_mac *mac = &hdev->hw.mac;
2374
2375         if (fec_ability)
2376                 *fec_ability = mac->fec_ability;
2377         if (fec_mode)
2378                 *fec_mode = mac->fec_mode;
2379 }
2380
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2382 {
2383         struct hclge_mac *mac = &hdev->hw.mac;
2384         int ret;
2385
2386         hdev->support_sfp_query = true;
2387         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389                                          hdev->hw.mac.duplex);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "Config mac speed dup fail ret=%d\n", ret);
2393                 return ret;
2394         }
2395
2396         mac->link = 0;
2397
2398         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2400                 if (ret) {
2401                         dev_err(&hdev->pdev->dev,
2402                                 "Fec mode init fail, ret = %d\n", ret);
2403                         return ret;
2404                 }
2405         }
2406
2407         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2408         if (ret) {
2409                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2410                 return ret;
2411         }
2412
2413         ret = hclge_buffer_alloc(hdev);
2414         if (ret)
2415                 dev_err(&hdev->pdev->dev,
2416                         "allocate buffer fail, ret=%d\n", ret);
2417
2418         return ret;
2419 }
2420
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2422 {
2423         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425                 schedule_work(&hdev->mbx_service_task);
2426 }
2427
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2429 {
2430         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431                 schedule_work(&hdev->rst_service_task);
2432 }
2433
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2435 {
2436         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439                 (void)schedule_work(&hdev->service_task);
2440 }
2441
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2443 {
2444         struct hclge_link_status_cmd *req;
2445         struct hclge_desc desc;
2446         int link_status;
2447         int ret;
2448
2449         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2451         if (ret) {
2452                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2453                         ret);
2454                 return ret;
2455         }
2456
2457         req = (struct hclge_link_status_cmd *)desc.data;
2458         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2459
2460         return !!link_status;
2461 }
2462
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2464 {
2465         int mac_state;
2466         int link_stat;
2467
2468         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2469                 return 0;
2470
2471         mac_state = hclge_get_mac_link_status(hdev);
2472
2473         if (hdev->hw.mac.phydev) {
2474                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475                         link_stat = mac_state &
2476                                 hdev->hw.mac.phydev->link;
2477                 else
2478                         link_stat = 0;
2479
2480         } else {
2481                 link_stat = mac_state;
2482         }
2483
2484         return !!link_stat;
2485 }
2486
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2488 {
2489         struct hnae3_client *rclient = hdev->roce_client;
2490         struct hnae3_client *client = hdev->nic_client;
2491         struct hnae3_handle *rhandle;
2492         struct hnae3_handle *handle;
2493         int state;
2494         int i;
2495
2496         if (!client)
2497                 return;
2498         state = hclge_get_mac_phy_link(hdev);
2499         if (state != hdev->hw.mac.link) {
2500                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501                         handle = &hdev->vport[i].nic;
2502                         client->ops->link_status_change(handle, state);
2503                         hclge_config_mac_tnl_int(hdev, state);
2504                         rhandle = &hdev->vport[i].roce;
2505                         if (rclient && rclient->ops->link_status_change)
2506                                 rclient->ops->link_status_change(rhandle,
2507                                                                  state);
2508                 }
2509                 hdev->hw.mac.link = state;
2510         }
2511 }
2512
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2514 {
2515         /* update fec ability by speed */
2516         hclge_convert_setting_fec(mac);
2517
2518         /* firmware can not identify back plane type, the media type
2519          * read from configuration can help deal it
2520          */
2521         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2526
2527         if (mac->support_autoneg == true) {
2528                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529                 linkmode_copy(mac->advertising, mac->supported);
2530         } else {
2531                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2532                                    mac->supported);
2533                 linkmode_zero(mac->advertising);
2534         }
2535 }
2536
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2538 {
2539         struct hclge_sfp_info_cmd *resp = NULL;
2540         struct hclge_desc desc;
2541         int ret;
2542
2543         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544         resp = (struct hclge_sfp_info_cmd *)desc.data;
2545         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546         if (ret == -EOPNOTSUPP) {
2547                 dev_warn(&hdev->pdev->dev,
2548                          "IMP do not support get SFP speed %d\n", ret);
2549                 return ret;
2550         } else if (ret) {
2551                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2552                 return ret;
2553         }
2554
2555         *speed = le32_to_cpu(resp->speed);
2556
2557         return 0;
2558 }
2559
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2561 {
2562         struct hclge_sfp_info_cmd *resp;
2563         struct hclge_desc desc;
2564         int ret;
2565
2566         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567         resp = (struct hclge_sfp_info_cmd *)desc.data;
2568
2569         resp->query_type = QUERY_ACTIVE_SPEED;
2570
2571         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572         if (ret == -EOPNOTSUPP) {
2573                 dev_warn(&hdev->pdev->dev,
2574                          "IMP does not support get SFP info %d\n", ret);
2575                 return ret;
2576         } else if (ret) {
2577                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2578                 return ret;
2579         }
2580
2581         mac->speed = le32_to_cpu(resp->speed);
2582         /* if resp->speed_ability is 0, it means it's an old version
2583          * firmware, do not update these params
2584          */
2585         if (resp->speed_ability) {
2586                 mac->module_type = le32_to_cpu(resp->module_type);
2587                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588                 mac->autoneg = resp->autoneg;
2589                 mac->support_autoneg = resp->autoneg_ability;
2590                 if (!resp->active_fec)
2591                         mac->fec_mode = 0;
2592                 else
2593                         mac->fec_mode = BIT(resp->active_fec);
2594         } else {
2595                 mac->speed_type = QUERY_SFP_SPEED;
2596         }
2597
2598         return 0;
2599 }
2600
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2602 {
2603         struct hclge_mac *mac = &hdev->hw.mac;
2604         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2605         int ret;
2606
2607         /* get the port info from SFP cmd if not copper port */
2608         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2609                 return 0;
2610
2611         /* if IMP does not support get SFP/qSFP info, return directly */
2612         if (!hdev->support_sfp_query)
2613                 return 0;
2614
2615         if (hdev->pdev->revision >= 0x21)
2616                 ret = hclge_get_sfp_info(hdev, mac);
2617         else
2618                 ret = hclge_get_sfp_speed(hdev, &speed);
2619
2620         if (ret == -EOPNOTSUPP) {
2621                 hdev->support_sfp_query = false;
2622                 return ret;
2623         } else if (ret) {
2624                 return ret;
2625         }
2626
2627         if (hdev->pdev->revision >= 0x21) {
2628                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629                         hclge_update_port_capability(mac);
2630                         return 0;
2631                 }
2632                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2633                                                HCLGE_MAC_FULL);
2634         } else {
2635                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636                         return 0; /* do nothing if no SFP */
2637
2638                 /* must config full duplex for SFP */
2639                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2640         }
2641 }
2642
2643 static int hclge_get_status(struct hnae3_handle *handle)
2644 {
2645         struct hclge_vport *vport = hclge_get_vport(handle);
2646         struct hclge_dev *hdev = vport->back;
2647
2648         hclge_update_link_status(hdev);
2649
2650         return hdev->hw.mac.link;
2651 }
2652
2653 static void hclge_service_timer(struct timer_list *t)
2654 {
2655         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2656
2657         mod_timer(&hdev->service_timer, jiffies + HZ);
2658         hdev->hw_stats.stats_timer++;
2659         hdev->fd_arfs_expire_timer++;
2660         hclge_task_schedule(hdev);
2661 }
2662
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2664 {
2665         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2666
2667         /* Flush memory before next watchdog */
2668         smp_mb__before_atomic();
2669         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2670 }
2671
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2673 {
2674         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2675
2676         /* fetch the events from their corresponding regs */
2677         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679         msix_src_reg = hclge_read_dev(&hdev->hw,
2680                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2681
2682         /* Assumption: If by any chance reset and mailbox events are reported
2683          * together then we will only process reset event in this go and will
2684          * defer the processing of the mailbox events. Since, we would have not
2685          * cleared RX CMDQ event this time we would receive again another
2686          * interrupt from H/W just for the mailbox.
2687          */
2688
2689         /* check for vector0 reset event sources */
2690         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695                 hdev->rst_stats.imp_rst_cnt++;
2696                 return HCLGE_VECTOR0_EVENT_RST;
2697         }
2698
2699         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704                 hdev->rst_stats.global_rst_cnt++;
2705                 return HCLGE_VECTOR0_EVENT_RST;
2706         }
2707
2708         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713                 hdev->rst_stats.core_rst_cnt++;
2714                 return HCLGE_VECTOR0_EVENT_RST;
2715         }
2716
2717         /* check for vector0 msix event source */
2718         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2720                         msix_src_reg);
2721                 return HCLGE_VECTOR0_EVENT_ERR;
2722         }
2723
2724         /* check for vector0 mailbox(=CMDQ RX) event source */
2725         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727                 *clearval = cmdq_src_reg;
2728                 return HCLGE_VECTOR0_EVENT_MBX;
2729         }
2730
2731         /* print other vector0 event source */
2732         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733                 cmdq_src_reg, msix_src_reg);
2734         return HCLGE_VECTOR0_EVENT_OTHER;
2735 }
2736
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2738                                     u32 regclr)
2739 {
2740         switch (event_type) {
2741         case HCLGE_VECTOR0_EVENT_RST:
2742                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2743                 break;
2744         case HCLGE_VECTOR0_EVENT_MBX:
2745                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2746                 break;
2747         default:
2748                 break;
2749         }
2750 }
2751
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2753 {
2754         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2759 }
2760
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2762 {
2763         writel(enable ? 1 : 0, vector->addr);
2764 }
2765
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2767 {
2768         struct hclge_dev *hdev = data;
2769         u32 event_cause;
2770         u32 clearval;
2771
2772         hclge_enable_vector(&hdev->misc_vector, false);
2773         event_cause = hclge_check_event_cause(hdev, &clearval);
2774
2775         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776         switch (event_cause) {
2777         case HCLGE_VECTOR0_EVENT_ERR:
2778                 /* we do not know what type of reset is required now. This could
2779                  * only be decided after we fetch the type of errors which
2780                  * caused this event. Therefore, we will do below for now:
2781                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782                  *    have defered type of reset to be used.
2783                  * 2. Schedule the reset serivce task.
2784                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2785                  *    will fetch the correct type of reset.  This would be done
2786                  *    by first decoding the types of errors.
2787                  */
2788                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2789                 /* fall through */
2790         case HCLGE_VECTOR0_EVENT_RST:
2791                 hclge_reset_task_schedule(hdev);
2792                 break;
2793         case HCLGE_VECTOR0_EVENT_MBX:
2794                 /* If we are here then,
2795                  * 1. Either we are not handling any mbx task and we are not
2796                  *    scheduled as well
2797                  *                        OR
2798                  * 2. We could be handling a mbx task but nothing more is
2799                  *    scheduled.
2800                  * In both cases, we should schedule mbx task as there are more
2801                  * mbx messages reported by this interrupt.
2802                  */
2803                 hclge_mbx_task_schedule(hdev);
2804                 break;
2805         default:
2806                 dev_warn(&hdev->pdev->dev,
2807                          "received unknown or unhandled event of vector0\n");
2808                 break;
2809         }
2810
2811         /* clear the source of interrupt if it is not cause by reset */
2812         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813                 hclge_clear_event_cause(hdev, event_cause, clearval);
2814                 hclge_enable_vector(&hdev->misc_vector, true);
2815         }
2816
2817         return IRQ_HANDLED;
2818 }
2819
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2821 {
2822         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823                 dev_warn(&hdev->pdev->dev,
2824                          "vector(vector_id %d) has been freed.\n", vector_id);
2825                 return;
2826         }
2827
2828         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829         hdev->num_msi_left += 1;
2830         hdev->num_msi_used -= 1;
2831 }
2832
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2834 {
2835         struct hclge_misc_vector *vector = &hdev->misc_vector;
2836
2837         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2838
2839         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840         hdev->vector_status[0] = 0;
2841
2842         hdev->num_msi_left -= 1;
2843         hdev->num_msi_used += 1;
2844 }
2845
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2847 {
2848         int ret;
2849
2850         hclge_get_misc_vector(hdev);
2851
2852         /* this would be explicitly freed in the end */
2853         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854                           0, "hclge_misc", hdev);
2855         if (ret) {
2856                 hclge_free_vector(hdev, 0);
2857                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858                         hdev->misc_vector.vector_irq);
2859         }
2860
2861         return ret;
2862 }
2863
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2865 {
2866         free_irq(hdev->misc_vector.vector_irq, hdev);
2867         hclge_free_vector(hdev, 0);
2868 }
2869
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871                         enum hnae3_reset_notify_type type)
2872 {
2873         struct hnae3_client *client = hdev->nic_client;
2874         u16 i;
2875
2876         if (!client->ops->reset_notify)
2877                 return -EOPNOTSUPP;
2878
2879         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2880                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2881                 int ret;
2882
2883                 ret = client->ops->reset_notify(handle, type);
2884                 if (ret) {
2885                         dev_err(&hdev->pdev->dev,
2886                                 "notify nic client failed %d(%d)\n", type, ret);
2887                         return ret;
2888                 }
2889         }
2890
2891         return 0;
2892 }
2893
2894 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2895                                     enum hnae3_reset_notify_type type)
2896 {
2897         struct hnae3_client *client = hdev->roce_client;
2898         int ret = 0;
2899         u16 i;
2900
2901         if (!client)
2902                 return 0;
2903
2904         if (!client->ops->reset_notify)
2905                 return -EOPNOTSUPP;
2906
2907         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2908                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2909
2910                 ret = client->ops->reset_notify(handle, type);
2911                 if (ret) {
2912                         dev_err(&hdev->pdev->dev,
2913                                 "notify roce client failed %d(%d)",
2914                                 type, ret);
2915                         return ret;
2916                 }
2917         }
2918
2919         return ret;
2920 }
2921
2922 static int hclge_reset_wait(struct hclge_dev *hdev)
2923 {
2924 #define HCLGE_RESET_WATI_MS     100
2925 #define HCLGE_RESET_WAIT_CNT    200
2926         u32 val, reg, reg_bit;
2927         u32 cnt = 0;
2928
2929         switch (hdev->reset_type) {
2930         case HNAE3_IMP_RESET:
2931                 reg = HCLGE_GLOBAL_RESET_REG;
2932                 reg_bit = HCLGE_IMP_RESET_BIT;
2933                 break;
2934         case HNAE3_GLOBAL_RESET:
2935                 reg = HCLGE_GLOBAL_RESET_REG;
2936                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2937                 break;
2938         case HNAE3_CORE_RESET:
2939                 reg = HCLGE_GLOBAL_RESET_REG;
2940                 reg_bit = HCLGE_CORE_RESET_BIT;
2941                 break;
2942         case HNAE3_FUNC_RESET:
2943                 reg = HCLGE_FUN_RST_ING;
2944                 reg_bit = HCLGE_FUN_RST_ING_B;
2945                 break;
2946         case HNAE3_FLR_RESET:
2947                 break;
2948         default:
2949                 dev_err(&hdev->pdev->dev,
2950                         "Wait for unsupported reset type: %d\n",
2951                         hdev->reset_type);
2952                 return -EINVAL;
2953         }
2954
2955         if (hdev->reset_type == HNAE3_FLR_RESET) {
2956                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2957                        cnt++ < HCLGE_RESET_WAIT_CNT)
2958                         msleep(HCLGE_RESET_WATI_MS);
2959
2960                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2961                         dev_err(&hdev->pdev->dev,
2962                                 "flr wait timeout: %d\n", cnt);
2963                         return -EBUSY;
2964                 }
2965
2966                 return 0;
2967         }
2968
2969         val = hclge_read_dev(&hdev->hw, reg);
2970         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2971                 msleep(HCLGE_RESET_WATI_MS);
2972                 val = hclge_read_dev(&hdev->hw, reg);
2973                 cnt++;
2974         }
2975
2976         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2977                 dev_warn(&hdev->pdev->dev,
2978                          "Wait for reset timeout: %d\n", hdev->reset_type);
2979                 return -EBUSY;
2980         }
2981
2982         return 0;
2983 }
2984
2985 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2986 {
2987         struct hclge_vf_rst_cmd *req;
2988         struct hclge_desc desc;
2989
2990         req = (struct hclge_vf_rst_cmd *)desc.data;
2991         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2992         req->dest_vfid = func_id;
2993
2994         if (reset)
2995                 req->vf_rst = 0x1;
2996
2997         return hclge_cmd_send(&hdev->hw, &desc, 1);
2998 }
2999
3000 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3001 {
3002         int i;
3003
3004         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3005                 struct hclge_vport *vport = &hdev->vport[i];
3006                 int ret;
3007
3008                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3009                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3010                 if (ret) {
3011                         dev_err(&hdev->pdev->dev,
3012                                 "set vf(%d) rst failed %d!\n",
3013                                 vport->vport_id, ret);
3014                         return ret;
3015                 }
3016
3017                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3018                         continue;
3019
3020                 /* Inform VF to process the reset.
3021                  * hclge_inform_reset_assert_to_vf may fail if VF
3022                  * driver is not loaded.
3023                  */
3024                 ret = hclge_inform_reset_assert_to_vf(vport);
3025                 if (ret)
3026                         dev_warn(&hdev->pdev->dev,
3027                                  "inform reset to vf(%d) failed %d!\n",
3028                                  vport->vport_id, ret);
3029         }
3030
3031         return 0;
3032 }
3033
3034 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3035 {
3036         struct hclge_desc desc;
3037         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3038         int ret;
3039
3040         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3041         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3042         req->fun_reset_vfid = func_id;
3043
3044         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3045         if (ret)
3046                 dev_err(&hdev->pdev->dev,
3047                         "send function reset cmd fail, status =%d\n", ret);
3048
3049         return ret;
3050 }
3051
3052 static void hclge_do_reset(struct hclge_dev *hdev)
3053 {
3054         struct hnae3_handle *handle = &hdev->vport[0].nic;
3055         struct pci_dev *pdev = hdev->pdev;
3056         u32 val;
3057
3058         if (hclge_get_hw_reset_stat(handle)) {
3059                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3060                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3061                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3062                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3063                 return;
3064         }
3065
3066         switch (hdev->reset_type) {
3067         case HNAE3_GLOBAL_RESET:
3068                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3069                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3070                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3071                 dev_info(&pdev->dev, "Global Reset requested\n");
3072                 break;
3073         case HNAE3_CORE_RESET:
3074                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3075                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3076                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3077                 dev_info(&pdev->dev, "Core Reset requested\n");
3078                 break;
3079         case HNAE3_FUNC_RESET:
3080                 dev_info(&pdev->dev, "PF Reset requested\n");
3081                 /* schedule again to check later */
3082                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3083                 hclge_reset_task_schedule(hdev);
3084                 break;
3085         case HNAE3_FLR_RESET:
3086                 dev_info(&pdev->dev, "FLR requested\n");
3087                 /* schedule again to check later */
3088                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3089                 hclge_reset_task_schedule(hdev);
3090                 break;
3091         default:
3092                 dev_warn(&pdev->dev,
3093                          "Unsupported reset type: %d\n", hdev->reset_type);
3094                 break;
3095         }
3096 }
3097
3098 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3099                                                    unsigned long *addr)
3100 {
3101         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3102
3103         /* first, resolve any unknown reset type to the known type(s) */
3104         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3105                 /* we will intentionally ignore any errors from this function
3106                  *  as we will end up in *some* reset request in any case
3107                  */
3108                 hclge_handle_hw_msix_error(hdev, addr);
3109                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3110                 /* We defered the clearing of the error event which caused
3111                  * interrupt since it was not posssible to do that in
3112                  * interrupt context (and this is the reason we introduced
3113                  * new UNKNOWN reset type). Now, the errors have been
3114                  * handled and cleared in hardware we can safely enable
3115                  * interrupts. This is an exception to the norm.
3116                  */
3117                 hclge_enable_vector(&hdev->misc_vector, true);
3118         }
3119
3120         /* return the highest priority reset level amongst all */
3121         if (test_bit(HNAE3_IMP_RESET, addr)) {
3122                 rst_level = HNAE3_IMP_RESET;
3123                 clear_bit(HNAE3_IMP_RESET, addr);
3124                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3125                 clear_bit(HNAE3_CORE_RESET, addr);
3126                 clear_bit(HNAE3_FUNC_RESET, addr);
3127         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3128                 rst_level = HNAE3_GLOBAL_RESET;
3129                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3130                 clear_bit(HNAE3_CORE_RESET, addr);
3131                 clear_bit(HNAE3_FUNC_RESET, addr);
3132         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3133                 rst_level = HNAE3_CORE_RESET;
3134                 clear_bit(HNAE3_CORE_RESET, addr);
3135                 clear_bit(HNAE3_FUNC_RESET, addr);
3136         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3137                 rst_level = HNAE3_FUNC_RESET;
3138                 clear_bit(HNAE3_FUNC_RESET, addr);
3139         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3140                 rst_level = HNAE3_FLR_RESET;
3141                 clear_bit(HNAE3_FLR_RESET, addr);
3142         }
3143
3144         if (hdev->reset_type != HNAE3_NONE_RESET &&
3145             rst_level < hdev->reset_type)
3146                 return HNAE3_NONE_RESET;
3147
3148         return rst_level;
3149 }
3150
3151 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3152 {
3153         u32 clearval = 0;
3154
3155         switch (hdev->reset_type) {
3156         case HNAE3_IMP_RESET:
3157                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3158                 break;
3159         case HNAE3_GLOBAL_RESET:
3160                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3161                 break;
3162         case HNAE3_CORE_RESET:
3163                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3164                 break;
3165         default:
3166                 break;
3167         }
3168
3169         if (!clearval)
3170                 return;
3171
3172         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3173         hclge_enable_vector(&hdev->misc_vector, true);
3174 }
3175
3176 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3177 {
3178         int ret = 0;
3179
3180         switch (hdev->reset_type) {
3181         case HNAE3_FUNC_RESET:
3182                 /* fall through */
3183         case HNAE3_FLR_RESET:
3184                 ret = hclge_set_all_vf_rst(hdev, true);
3185                 break;
3186         default:
3187                 break;
3188         }
3189
3190         return ret;
3191 }
3192
3193 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3194 {
3195         u32 reg_val;
3196         int ret = 0;
3197
3198         switch (hdev->reset_type) {
3199         case HNAE3_FUNC_RESET:
3200                 /* There is no mechanism for PF to know if VF has stopped IO
3201                  * for now, just wait 100 ms for VF to stop IO
3202                  */
3203                 msleep(100);
3204                 ret = hclge_func_reset_cmd(hdev, 0);
3205                 if (ret) {
3206                         dev_err(&hdev->pdev->dev,
3207                                 "asserting function reset fail %d!\n", ret);
3208                         return ret;
3209                 }
3210
3211                 /* After performaning pf reset, it is not necessary to do the
3212                  * mailbox handling or send any command to firmware, because
3213                  * any mailbox handling or command to firmware is only valid
3214                  * after hclge_cmd_init is called.
3215                  */
3216                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3217                 hdev->rst_stats.pf_rst_cnt++;
3218                 break;
3219         case HNAE3_FLR_RESET:
3220                 /* There is no mechanism for PF to know if VF has stopped IO
3221                  * for now, just wait 100 ms for VF to stop IO
3222                  */
3223                 msleep(100);
3224                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3225                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3226                 hdev->rst_stats.flr_rst_cnt++;
3227                 break;
3228         case HNAE3_IMP_RESET:
3229                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3230                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3231                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3232                 break;
3233         default:
3234                 break;
3235         }
3236
3237         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3238
3239         return ret;
3240 }
3241
3242 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3243 {
3244 #define MAX_RESET_FAIL_CNT 5
3245 #define RESET_UPGRADE_DELAY_SEC 10
3246
3247         if (hdev->reset_pending) {
3248                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3249                          hdev->reset_pending);
3250                 return true;
3251         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3252                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3253                     BIT(HCLGE_IMP_RESET_BIT))) {
3254                 dev_info(&hdev->pdev->dev,
3255                          "reset failed because IMP Reset is pending\n");
3256                 hclge_clear_reset_cause(hdev);
3257                 return false;
3258         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3259                 hdev->reset_fail_cnt++;
3260                 if (is_timeout) {
3261                         set_bit(hdev->reset_type, &hdev->reset_pending);
3262                         dev_info(&hdev->pdev->dev,
3263                                  "re-schedule to wait for hw reset done\n");
3264                         return true;
3265                 }
3266
3267                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3268                 hclge_clear_reset_cause(hdev);
3269                 mod_timer(&hdev->reset_timer,
3270                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3271
3272                 return false;
3273         }
3274
3275         hclge_clear_reset_cause(hdev);
3276         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3277         return false;
3278 }
3279
3280 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3281 {
3282         int ret = 0;
3283
3284         switch (hdev->reset_type) {
3285         case HNAE3_FUNC_RESET:
3286                 /* fall through */
3287         case HNAE3_FLR_RESET:
3288                 ret = hclge_set_all_vf_rst(hdev, false);
3289                 break;
3290         default:
3291                 break;
3292         }
3293
3294         return ret;
3295 }
3296
3297 static void hclge_reset(struct hclge_dev *hdev)
3298 {
3299         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3300         bool is_timeout = false;
3301         int ret;
3302
3303         /* Initialize ae_dev reset status as well, in case enet layer wants to
3304          * know if device is undergoing reset
3305          */
3306         ae_dev->reset_type = hdev->reset_type;
3307         hdev->rst_stats.reset_cnt++;
3308         /* perform reset of the stack & ae device for a client */
3309         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3310         if (ret)
3311                 goto err_reset;
3312
3313         ret = hclge_reset_prepare_down(hdev);
3314         if (ret)
3315                 goto err_reset;
3316
3317         rtnl_lock();
3318         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3319         if (ret)
3320                 goto err_reset_lock;
3321
3322         rtnl_unlock();
3323
3324         ret = hclge_reset_prepare_wait(hdev);
3325         if (ret)
3326                 goto err_reset;
3327
3328         if (hclge_reset_wait(hdev)) {
3329                 is_timeout = true;
3330                 goto err_reset;
3331         }
3332
3333         hdev->rst_stats.hw_reset_done_cnt++;
3334
3335         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3336         if (ret)
3337                 goto err_reset;
3338
3339         rtnl_lock();
3340         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3341         if (ret)
3342                 goto err_reset_lock;
3343
3344         ret = hclge_reset_ae_dev(hdev->ae_dev);
3345         if (ret)
3346                 goto err_reset_lock;
3347
3348         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3349         if (ret)
3350                 goto err_reset_lock;
3351
3352         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3353         if (ret)
3354                 goto err_reset_lock;
3355
3356         hclge_clear_reset_cause(hdev);
3357
3358         ret = hclge_reset_prepare_up(hdev);
3359         if (ret)
3360                 goto err_reset_lock;
3361
3362         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3363         if (ret)
3364                 goto err_reset_lock;
3365
3366         rtnl_unlock();
3367
3368         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3369         if (ret)
3370                 goto err_reset;
3371
3372         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3373         if (ret)
3374                 goto err_reset;
3375
3376         hdev->last_reset_time = jiffies;
3377         hdev->reset_fail_cnt = 0;
3378         hdev->rst_stats.reset_done_cnt++;
3379         ae_dev->reset_type = HNAE3_NONE_RESET;
3380         del_timer(&hdev->reset_timer);
3381
3382         return;
3383
3384 err_reset_lock:
3385         rtnl_unlock();
3386 err_reset:
3387         if (hclge_reset_err_handle(hdev, is_timeout))
3388                 hclge_reset_task_schedule(hdev);
3389 }
3390
3391 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3392 {
3393         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3394         struct hclge_dev *hdev = ae_dev->priv;
3395
3396         /* We might end up getting called broadly because of 2 below cases:
3397          * 1. Recoverable error was conveyed through APEI and only way to bring
3398          *    normalcy is to reset.
3399          * 2. A new reset request from the stack due to timeout
3400          *
3401          * For the first case,error event might not have ae handle available.
3402          * check if this is a new reset request and we are not here just because
3403          * last reset attempt did not succeed and watchdog hit us again. We will
3404          * know this if last reset request did not occur very recently (watchdog
3405          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3406          * In case of new request we reset the "reset level" to PF reset.
3407          * And if it is a repeat reset request of the most recent one then we
3408          * want to make sure we throttle the reset request. Therefore, we will
3409          * not allow it again before 3*HZ times.
3410          */
3411         if (!handle)
3412                 handle = &hdev->vport[0].nic;
3413
3414         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3415                 return;
3416         else if (hdev->default_reset_request)
3417                 hdev->reset_level =
3418                         hclge_get_reset_level(hdev,
3419                                               &hdev->default_reset_request);
3420         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3421                 hdev->reset_level = HNAE3_FUNC_RESET;
3422
3423         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3424                  hdev->reset_level);
3425
3426         /* request reset & schedule reset task */
3427         set_bit(hdev->reset_level, &hdev->reset_request);
3428         hclge_reset_task_schedule(hdev);
3429
3430         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3431                 hdev->reset_level++;
3432 }
3433
3434 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3435                                         enum hnae3_reset_type rst_type)
3436 {
3437         struct hclge_dev *hdev = ae_dev->priv;
3438
3439         set_bit(rst_type, &hdev->default_reset_request);
3440 }
3441
3442 static void hclge_reset_timer(struct timer_list *t)
3443 {
3444         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3445
3446         dev_info(&hdev->pdev->dev,
3447                  "triggering global reset in reset timer\n");
3448         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3449         hclge_reset_event(hdev->pdev, NULL);
3450 }
3451
3452 static void hclge_reset_subtask(struct hclge_dev *hdev)
3453 {
3454         /* check if there is any ongoing reset in the hardware. This status can
3455          * be checked from reset_pending. If there is then, we need to wait for
3456          * hardware to complete reset.
3457          *    a. If we are able to figure out in reasonable time that hardware
3458          *       has fully resetted then, we can proceed with driver, client
3459          *       reset.
3460          *    b. else, we can come back later to check this status so re-sched
3461          *       now.
3462          */
3463         hdev->last_reset_time = jiffies;
3464         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3465         if (hdev->reset_type != HNAE3_NONE_RESET)
3466                 hclge_reset(hdev);
3467
3468         /* check if we got any *new* reset requests to be honored */
3469         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3470         if (hdev->reset_type != HNAE3_NONE_RESET)
3471                 hclge_do_reset(hdev);
3472
3473         hdev->reset_type = HNAE3_NONE_RESET;
3474 }
3475
3476 static void hclge_reset_service_task(struct work_struct *work)
3477 {
3478         struct hclge_dev *hdev =
3479                 container_of(work, struct hclge_dev, rst_service_task);
3480
3481         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3482                 return;
3483
3484         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3485
3486         hclge_reset_subtask(hdev);
3487
3488         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3489 }
3490
3491 static void hclge_mailbox_service_task(struct work_struct *work)
3492 {
3493         struct hclge_dev *hdev =
3494                 container_of(work, struct hclge_dev, mbx_service_task);
3495
3496         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3497                 return;
3498
3499         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3500
3501         hclge_mbx_handler(hdev);
3502
3503         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3504 }
3505
3506 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3507 {
3508         int i;
3509
3510         /* start from vport 1 for PF is always alive */
3511         for (i = 1; i < hdev->num_alloc_vport; i++) {
3512                 struct hclge_vport *vport = &hdev->vport[i];
3513
3514                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3515                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3516
3517                 /* If vf is not alive, set to default value */
3518                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3519                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3520         }
3521 }
3522
3523 static void hclge_service_task(struct work_struct *work)
3524 {
3525         struct hclge_dev *hdev =
3526                 container_of(work, struct hclge_dev, service_task);
3527
3528         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3529                 hclge_update_stats_for_all(hdev);
3530                 hdev->hw_stats.stats_timer = 0;
3531         }
3532
3533         hclge_update_port_info(hdev);
3534         hclge_update_link_status(hdev);
3535         hclge_update_vport_alive(hdev);
3536         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3537                 hclge_rfs_filter_expire(hdev);
3538                 hdev->fd_arfs_expire_timer = 0;
3539         }
3540         hclge_service_complete(hdev);
3541 }
3542
3543 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3544 {
3545         /* VF handle has no client */
3546         if (!handle->client)
3547                 return container_of(handle, struct hclge_vport, nic);
3548         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3549                 return container_of(handle, struct hclge_vport, roce);
3550         else
3551                 return container_of(handle, struct hclge_vport, nic);
3552 }
3553
3554 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3555                             struct hnae3_vector_info *vector_info)
3556 {
3557         struct hclge_vport *vport = hclge_get_vport(handle);
3558         struct hnae3_vector_info *vector = vector_info;
3559         struct hclge_dev *hdev = vport->back;
3560         int alloc = 0;
3561         int i, j;
3562
3563         vector_num = min(hdev->num_msi_left, vector_num);
3564
3565         for (j = 0; j < vector_num; j++) {
3566                 for (i = 1; i < hdev->num_msi; i++) {
3567                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3568                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3569                                 vector->io_addr = hdev->hw.io_base +
3570                                         HCLGE_VECTOR_REG_BASE +
3571                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3572                                         vport->vport_id *
3573                                         HCLGE_VECTOR_VF_OFFSET;
3574                                 hdev->vector_status[i] = vport->vport_id;
3575                                 hdev->vector_irq[i] = vector->vector;
3576
3577                                 vector++;
3578                                 alloc++;
3579
3580                                 break;
3581                         }
3582                 }
3583         }
3584         hdev->num_msi_left -= alloc;
3585         hdev->num_msi_used += alloc;
3586
3587         return alloc;
3588 }
3589
3590 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3591 {
3592         int i;
3593
3594         for (i = 0; i < hdev->num_msi; i++)
3595                 if (vector == hdev->vector_irq[i])
3596                         return i;
3597
3598         return -EINVAL;
3599 }
3600
3601 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3602 {
3603         struct hclge_vport *vport = hclge_get_vport(handle);
3604         struct hclge_dev *hdev = vport->back;
3605         int vector_id;
3606
3607         vector_id = hclge_get_vector_index(hdev, vector);
3608         if (vector_id < 0) {
3609                 dev_err(&hdev->pdev->dev,
3610                         "Get vector index fail. vector_id =%d\n", vector_id);
3611                 return vector_id;
3612         }
3613
3614         hclge_free_vector(hdev, vector_id);
3615
3616         return 0;
3617 }
3618
3619 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3620 {
3621         return HCLGE_RSS_KEY_SIZE;
3622 }
3623
3624 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3625 {
3626         return HCLGE_RSS_IND_TBL_SIZE;
3627 }
3628
3629 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3630                                   const u8 hfunc, const u8 *key)
3631 {
3632         struct hclge_rss_config_cmd *req;
3633         struct hclge_desc desc;
3634         int key_offset;
3635         int key_size;
3636         int ret;
3637
3638         req = (struct hclge_rss_config_cmd *)desc.data;
3639
3640         for (key_offset = 0; key_offset < 3; key_offset++) {
3641                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3642                                            false);
3643
3644                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3645                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3646
3647                 if (key_offset == 2)
3648                         key_size =
3649                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3650                 else
3651                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3652
3653                 memcpy(req->hash_key,
3654                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3655
3656                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3657                 if (ret) {
3658                         dev_err(&hdev->pdev->dev,
3659                                 "Configure RSS config fail, status = %d\n",
3660                                 ret);
3661                         return ret;
3662                 }
3663         }
3664         return 0;
3665 }
3666
3667 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3668 {
3669         struct hclge_rss_indirection_table_cmd *req;
3670         struct hclge_desc desc;
3671         int i, j;
3672         int ret;
3673
3674         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3675
3676         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3677                 hclge_cmd_setup_basic_desc
3678                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3679
3680                 req->start_table_index =
3681                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3682                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3683
3684                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3685                         req->rss_result[j] =
3686                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3687
3688                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3689                 if (ret) {
3690                         dev_err(&hdev->pdev->dev,
3691                                 "Configure rss indir table fail,status = %d\n",
3692                                 ret);
3693                         return ret;
3694                 }
3695         }
3696         return 0;
3697 }
3698
3699 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3700                                  u16 *tc_size, u16 *tc_offset)
3701 {
3702         struct hclge_rss_tc_mode_cmd *req;
3703         struct hclge_desc desc;
3704         int ret;
3705         int i;
3706
3707         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3708         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3709
3710         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3711                 u16 mode = 0;
3712
3713                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3714                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3715                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3716                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3717                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3718
3719                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3720         }
3721
3722         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3723         if (ret)
3724                 dev_err(&hdev->pdev->dev,
3725                         "Configure rss tc mode fail, status = %d\n", ret);
3726
3727         return ret;
3728 }
3729
3730 static void hclge_get_rss_type(struct hclge_vport *vport)
3731 {
3732         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3733             vport->rss_tuple_sets.ipv4_udp_en ||
3734             vport->rss_tuple_sets.ipv4_sctp_en ||
3735             vport->rss_tuple_sets.ipv6_tcp_en ||
3736             vport->rss_tuple_sets.ipv6_udp_en ||
3737             vport->rss_tuple_sets.ipv6_sctp_en)
3738                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3739         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3740                  vport->rss_tuple_sets.ipv6_fragment_en)
3741                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3742         else
3743                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3744 }
3745
3746 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3747 {
3748         struct hclge_rss_input_tuple_cmd *req;
3749         struct hclge_desc desc;
3750         int ret;
3751
3752         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3753
3754         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3755
3756         /* Get the tuple cfg from pf */
3757         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3758         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3759         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3760         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3761         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3762         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3763         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3764         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3765         hclge_get_rss_type(&hdev->vport[0]);
3766         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3767         if (ret)
3768                 dev_err(&hdev->pdev->dev,
3769                         "Configure rss input fail, status = %d\n", ret);
3770         return ret;
3771 }
3772
3773 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3774                          u8 *key, u8 *hfunc)
3775 {
3776         struct hclge_vport *vport = hclge_get_vport(handle);
3777         int i;
3778
3779         /* Get hash algorithm */
3780         if (hfunc) {
3781                 switch (vport->rss_algo) {
3782                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3783                         *hfunc = ETH_RSS_HASH_TOP;
3784                         break;
3785                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3786                         *hfunc = ETH_RSS_HASH_XOR;
3787                         break;
3788                 default:
3789                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3790                         break;
3791                 }
3792         }
3793
3794         /* Get the RSS Key required by the user */
3795         if (key)
3796                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3797
3798         /* Get indirect table */
3799         if (indir)
3800                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3801                         indir[i] =  vport->rss_indirection_tbl[i];
3802
3803         return 0;
3804 }
3805
3806 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3807                          const  u8 *key, const  u8 hfunc)
3808 {
3809         struct hclge_vport *vport = hclge_get_vport(handle);
3810         struct hclge_dev *hdev = vport->back;
3811         u8 hash_algo;
3812         int ret, i;
3813
3814         /* Set the RSS Hash Key if specififed by the user */
3815         if (key) {
3816                 switch (hfunc) {
3817                 case ETH_RSS_HASH_TOP:
3818                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3819                         break;
3820                 case ETH_RSS_HASH_XOR:
3821                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3822                         break;
3823                 case ETH_RSS_HASH_NO_CHANGE:
3824                         hash_algo = vport->rss_algo;
3825                         break;
3826                 default:
3827                         return -EINVAL;
3828                 }
3829
3830                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3831                 if (ret)
3832                         return ret;
3833
3834                 /* Update the shadow RSS key with user specified qids */
3835                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3836                 vport->rss_algo = hash_algo;
3837         }
3838
3839         /* Update the shadow RSS table with user specified qids */
3840         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3841                 vport->rss_indirection_tbl[i] = indir[i];
3842
3843         /* Update the hardware */
3844         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3845 }
3846
3847 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3848 {
3849         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3850
3851         if (nfc->data & RXH_L4_B_2_3)
3852                 hash_sets |= HCLGE_D_PORT_BIT;
3853         else
3854                 hash_sets &= ~HCLGE_D_PORT_BIT;
3855
3856         if (nfc->data & RXH_IP_SRC)
3857                 hash_sets |= HCLGE_S_IP_BIT;
3858         else
3859                 hash_sets &= ~HCLGE_S_IP_BIT;
3860
3861         if (nfc->data & RXH_IP_DST)
3862                 hash_sets |= HCLGE_D_IP_BIT;
3863         else
3864                 hash_sets &= ~HCLGE_D_IP_BIT;
3865
3866         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3867                 hash_sets |= HCLGE_V_TAG_BIT;
3868
3869         return hash_sets;
3870 }
3871
3872 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3873                                struct ethtool_rxnfc *nfc)
3874 {
3875         struct hclge_vport *vport = hclge_get_vport(handle);
3876         struct hclge_dev *hdev = vport->back;
3877         struct hclge_rss_input_tuple_cmd *req;
3878         struct hclge_desc desc;
3879         u8 tuple_sets;
3880         int ret;
3881
3882         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3883                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3884                 return -EINVAL;
3885
3886         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3887         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3888
3889         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3890         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3891         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3892         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3893         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3894         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3895         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3896         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3897
3898         tuple_sets = hclge_get_rss_hash_bits(nfc);
3899         switch (nfc->flow_type) {
3900         case TCP_V4_FLOW:
3901                 req->ipv4_tcp_en = tuple_sets;
3902                 break;
3903         case TCP_V6_FLOW:
3904                 req->ipv6_tcp_en = tuple_sets;
3905                 break;
3906         case UDP_V4_FLOW:
3907                 req->ipv4_udp_en = tuple_sets;
3908                 break;
3909         case UDP_V6_FLOW:
3910                 req->ipv6_udp_en = tuple_sets;
3911                 break;
3912         case SCTP_V4_FLOW:
3913                 req->ipv4_sctp_en = tuple_sets;
3914                 break;
3915         case SCTP_V6_FLOW:
3916                 if ((nfc->data & RXH_L4_B_0_1) ||
3917                     (nfc->data & RXH_L4_B_2_3))
3918                         return -EINVAL;
3919
3920                 req->ipv6_sctp_en = tuple_sets;
3921                 break;
3922         case IPV4_FLOW:
3923                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3924                 break;
3925         case IPV6_FLOW:
3926                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3927                 break;
3928         default:
3929                 return -EINVAL;
3930         }
3931
3932         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3933         if (ret) {
3934                 dev_err(&hdev->pdev->dev,
3935                         "Set rss tuple fail, status = %d\n", ret);
3936                 return ret;
3937         }
3938
3939         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3940         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3941         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3942         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3943         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3944         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3945         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3946         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3947         hclge_get_rss_type(vport);
3948         return 0;
3949 }
3950
3951 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3952                                struct ethtool_rxnfc *nfc)
3953 {
3954         struct hclge_vport *vport = hclge_get_vport(handle);
3955         u8 tuple_sets;
3956
3957         nfc->data = 0;
3958
3959         switch (nfc->flow_type) {
3960         case TCP_V4_FLOW:
3961                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3962                 break;
3963         case UDP_V4_FLOW:
3964                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3965                 break;
3966         case TCP_V6_FLOW:
3967                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3968                 break;
3969         case UDP_V6_FLOW:
3970                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3971                 break;
3972         case SCTP_V4_FLOW:
3973                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3974                 break;
3975         case SCTP_V6_FLOW:
3976                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3977                 break;
3978         case IPV4_FLOW:
3979         case IPV6_FLOW:
3980                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3981                 break;
3982         default:
3983                 return -EINVAL;
3984         }
3985
3986         if (!tuple_sets)
3987                 return 0;
3988
3989         if (tuple_sets & HCLGE_D_PORT_BIT)
3990                 nfc->data |= RXH_L4_B_2_3;
3991         if (tuple_sets & HCLGE_S_PORT_BIT)
3992                 nfc->data |= RXH_L4_B_0_1;
3993         if (tuple_sets & HCLGE_D_IP_BIT)
3994                 nfc->data |= RXH_IP_DST;
3995         if (tuple_sets & HCLGE_S_IP_BIT)
3996                 nfc->data |= RXH_IP_SRC;
3997
3998         return 0;
3999 }
4000
4001 static int hclge_get_tc_size(struct hnae3_handle *handle)
4002 {
4003         struct hclge_vport *vport = hclge_get_vport(handle);
4004         struct hclge_dev *hdev = vport->back;
4005
4006         return hdev->rss_size_max;
4007 }
4008
4009 int hclge_rss_init_hw(struct hclge_dev *hdev)
4010 {
4011         struct hclge_vport *vport = hdev->vport;
4012         u8 *rss_indir = vport[0].rss_indirection_tbl;
4013         u16 rss_size = vport[0].alloc_rss_size;
4014         u8 *key = vport[0].rss_hash_key;
4015         u8 hfunc = vport[0].rss_algo;
4016         u16 tc_offset[HCLGE_MAX_TC_NUM];
4017         u16 tc_valid[HCLGE_MAX_TC_NUM];
4018         u16 tc_size[HCLGE_MAX_TC_NUM];
4019         u16 roundup_size;
4020         int i, ret;
4021
4022         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4023         if (ret)
4024                 return ret;
4025
4026         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4027         if (ret)
4028                 return ret;
4029
4030         ret = hclge_set_rss_input_tuple(hdev);
4031         if (ret)
4032                 return ret;
4033
4034         /* Each TC have the same queue size, and tc_size set to hardware is
4035          * the log2 of roundup power of two of rss_size, the acutal queue
4036          * size is limited by indirection table.
4037          */
4038         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4039                 dev_err(&hdev->pdev->dev,
4040                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4041                         rss_size);
4042                 return -EINVAL;
4043         }
4044
4045         roundup_size = roundup_pow_of_two(rss_size);
4046         roundup_size = ilog2(roundup_size);
4047
4048         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4049                 tc_valid[i] = 0;
4050
4051                 if (!(hdev->hw_tc_map & BIT(i)))
4052                         continue;
4053
4054                 tc_valid[i] = 1;
4055                 tc_size[i] = roundup_size;
4056                 tc_offset[i] = rss_size * i;
4057         }
4058
4059         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4060 }
4061
4062 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4063 {
4064         struct hclge_vport *vport = hdev->vport;
4065         int i, j;
4066
4067         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4068                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4069                         vport[j].rss_indirection_tbl[i] =
4070                                 i % vport[j].alloc_rss_size;
4071         }
4072 }
4073
4074 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4075 {
4076         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4077         struct hclge_vport *vport = hdev->vport;
4078
4079         if (hdev->pdev->revision >= 0x21)
4080                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4081
4082         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4083                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4084                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4085                 vport[i].rss_tuple_sets.ipv4_udp_en =
4086                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4087                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4088                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4089                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4090                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4091                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4092                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4093                 vport[i].rss_tuple_sets.ipv6_udp_en =
4094                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4095                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4096                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4097                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4098                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4099
4100                 vport[i].rss_algo = rss_algo;
4101
4102                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4103                        HCLGE_RSS_KEY_SIZE);
4104         }
4105
4106         hclge_rss_indir_init_cfg(hdev);
4107 }
4108
4109 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4110                                 int vector_id, bool en,
4111                                 struct hnae3_ring_chain_node *ring_chain)
4112 {
4113         struct hclge_dev *hdev = vport->back;
4114         struct hnae3_ring_chain_node *node;
4115         struct hclge_desc desc;
4116         struct hclge_ctrl_vector_chain_cmd *req
4117                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4118         enum hclge_cmd_status status;
4119         enum hclge_opcode_type op;
4120         u16 tqp_type_and_id;
4121         int i;
4122
4123         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4124         hclge_cmd_setup_basic_desc(&desc, op, false);
4125         req->int_vector_id = vector_id;
4126
4127         i = 0;
4128         for (node = ring_chain; node; node = node->next) {
4129                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4130                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4131                                 HCLGE_INT_TYPE_S,
4132                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4133                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4134                                 HCLGE_TQP_ID_S, node->tqp_index);
4135                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4136                                 HCLGE_INT_GL_IDX_S,
4137                                 hnae3_get_field(node->int_gl_idx,
4138                                                 HNAE3_RING_GL_IDX_M,
4139                                                 HNAE3_RING_GL_IDX_S));
4140                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4141                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4142                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4143                         req->vfid = vport->vport_id;
4144
4145                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4146                         if (status) {
4147                                 dev_err(&hdev->pdev->dev,
4148                                         "Map TQP fail, status is %d.\n",
4149                                         status);
4150                                 return -EIO;
4151                         }
4152                         i = 0;
4153
4154                         hclge_cmd_setup_basic_desc(&desc,
4155                                                    op,
4156                                                    false);
4157                         req->int_vector_id = vector_id;
4158                 }
4159         }
4160
4161         if (i > 0) {
4162                 req->int_cause_num = i;
4163                 req->vfid = vport->vport_id;
4164                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4165                 if (status) {
4166                         dev_err(&hdev->pdev->dev,
4167                                 "Map TQP fail, status is %d.\n", status);
4168                         return -EIO;
4169                 }
4170         }
4171
4172         return 0;
4173 }
4174
4175 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4176                                     int vector,
4177                                     struct hnae3_ring_chain_node *ring_chain)
4178 {
4179         struct hclge_vport *vport = hclge_get_vport(handle);
4180         struct hclge_dev *hdev = vport->back;
4181         int vector_id;
4182
4183         vector_id = hclge_get_vector_index(hdev, vector);
4184         if (vector_id < 0) {
4185                 dev_err(&hdev->pdev->dev,
4186                         "Get vector index fail. vector_id =%d\n", vector_id);
4187                 return vector_id;
4188         }
4189
4190         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4191 }
4192
4193 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4194                                        int vector,
4195                                        struct hnae3_ring_chain_node *ring_chain)
4196 {
4197         struct hclge_vport *vport = hclge_get_vport(handle);
4198         struct hclge_dev *hdev = vport->back;
4199         int vector_id, ret;
4200
4201         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4202                 return 0;
4203
4204         vector_id = hclge_get_vector_index(hdev, vector);
4205         if (vector_id < 0) {
4206                 dev_err(&handle->pdev->dev,
4207                         "Get vector index fail. ret =%d\n", vector_id);
4208                 return vector_id;
4209         }
4210
4211         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4212         if (ret)
4213                 dev_err(&handle->pdev->dev,
4214                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4215                         vector_id,
4216                         ret);
4217
4218         return ret;
4219 }
4220
4221 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4222                                struct hclge_promisc_param *param)
4223 {
4224         struct hclge_promisc_cfg_cmd *req;
4225         struct hclge_desc desc;
4226         int ret;
4227
4228         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4229
4230         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4231         req->vf_id = param->vf_id;
4232
4233         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4234          * pdev revision(0x20), new revision support them. The
4235          * value of this two fields will not return error when driver
4236          * send command to fireware in revision(0x20).
4237          */
4238         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4239                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4240
4241         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4242         if (ret)
4243                 dev_err(&hdev->pdev->dev,
4244                         "Set promisc mode fail, status is %d.\n", ret);
4245
4246         return ret;
4247 }
4248
4249 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4250                               bool en_mc, bool en_bc, int vport_id)
4251 {
4252         if (!param)
4253                 return;
4254
4255         memset(param, 0, sizeof(struct hclge_promisc_param));
4256         if (en_uc)
4257                 param->enable = HCLGE_PROMISC_EN_UC;
4258         if (en_mc)
4259                 param->enable |= HCLGE_PROMISC_EN_MC;
4260         if (en_bc)
4261                 param->enable |= HCLGE_PROMISC_EN_BC;
4262         param->vf_id = vport_id;
4263 }
4264
4265 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4266                                   bool en_mc_pmc)
4267 {
4268         struct hclge_vport *vport = hclge_get_vport(handle);
4269         struct hclge_dev *hdev = vport->back;
4270         struct hclge_promisc_param param;
4271         bool en_bc_pmc = true;
4272
4273         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4274          * always bypassed. So broadcast promisc should be disabled until
4275          * user enable promisc mode
4276          */
4277         if (handle->pdev->revision == 0x20)
4278                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4279
4280         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4281                                  vport->vport_id);
4282         return hclge_cmd_set_promisc_mode(hdev, &param);
4283 }
4284
4285 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4286 {
4287         struct hclge_get_fd_mode_cmd *req;
4288         struct hclge_desc desc;
4289         int ret;
4290
4291         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4292
4293         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4294
4295         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4296         if (ret) {
4297                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4298                 return ret;
4299         }
4300
4301         *fd_mode = req->mode;
4302
4303         return ret;
4304 }
4305
4306 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4307                                    u32 *stage1_entry_num,
4308                                    u32 *stage2_entry_num,
4309                                    u16 *stage1_counter_num,
4310                                    u16 *stage2_counter_num)
4311 {
4312         struct hclge_get_fd_allocation_cmd *req;
4313         struct hclge_desc desc;
4314         int ret;
4315
4316         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4317
4318         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4319
4320         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4321         if (ret) {
4322                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4323                         ret);
4324                 return ret;
4325         }
4326
4327         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4328         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4329         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4330         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4331
4332         return ret;
4333 }
4334
4335 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4336 {
4337         struct hclge_set_fd_key_config_cmd *req;
4338         struct hclge_fd_key_cfg *stage;
4339         struct hclge_desc desc;
4340         int ret;
4341
4342         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4343
4344         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4345         stage = &hdev->fd_cfg.key_cfg[stage_num];
4346         req->stage = stage_num;
4347         req->key_select = stage->key_sel;
4348         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4349         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4350         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4351         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4352         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4353         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4354
4355         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4356         if (ret)
4357                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4358
4359         return ret;
4360 }
4361
4362 static int hclge_init_fd_config(struct hclge_dev *hdev)
4363 {
4364 #define LOW_2_WORDS             0x03
4365         struct hclge_fd_key_cfg *key_cfg;
4366         int ret;
4367
4368         if (!hnae3_dev_fd_supported(hdev))
4369                 return 0;
4370
4371         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4372         if (ret)
4373                 return ret;
4374
4375         switch (hdev->fd_cfg.fd_mode) {
4376         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4377                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4378                 break;
4379         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4380                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4381                 break;
4382         default:
4383                 dev_err(&hdev->pdev->dev,
4384                         "Unsupported flow director mode %d\n",
4385                         hdev->fd_cfg.fd_mode);
4386                 return -EOPNOTSUPP;
4387         }
4388
4389         hdev->fd_cfg.proto_support =
4390                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4391                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4392         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4393         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4394         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4395         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4396         key_cfg->outer_sipv6_word_en = 0;
4397         key_cfg->outer_dipv6_word_en = 0;
4398
4399         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4400                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4401                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4402                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4403
4404         /* If use max 400bit key, we can support tuples for ether type */
4405         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4406                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4407                 key_cfg->tuple_active |=
4408                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4409         }
4410
4411         /* roce_type is used to filter roce frames
4412          * dst_vport is used to specify the rule
4413          */
4414         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4415
4416         ret = hclge_get_fd_allocation(hdev,
4417                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4418                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4419                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4420                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4421         if (ret)
4422                 return ret;
4423
4424         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4425 }
4426
4427 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4428                                 int loc, u8 *key, bool is_add)
4429 {
4430         struct hclge_fd_tcam_config_1_cmd *req1;
4431         struct hclge_fd_tcam_config_2_cmd *req2;
4432         struct hclge_fd_tcam_config_3_cmd *req3;
4433         struct hclge_desc desc[3];
4434         int ret;
4435
4436         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4437         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4438         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4439         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4440         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4441
4442         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4443         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4444         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4445
4446         req1->stage = stage;
4447         req1->xy_sel = sel_x ? 1 : 0;
4448         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4449         req1->index = cpu_to_le32(loc);
4450         req1->entry_vld = sel_x ? is_add : 0;
4451
4452         if (key) {
4453                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4454                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4455                        sizeof(req2->tcam_data));
4456                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4457                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4458         }
4459
4460         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4461         if (ret)
4462                 dev_err(&hdev->pdev->dev,
4463                         "config tcam key fail, ret=%d\n",
4464                         ret);
4465
4466         return ret;
4467 }
4468
4469 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4470                               struct hclge_fd_ad_data *action)
4471 {
4472         struct hclge_fd_ad_config_cmd *req;
4473         struct hclge_desc desc;
4474         u64 ad_data = 0;
4475         int ret;
4476
4477         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4478
4479         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4480         req->index = cpu_to_le32(loc);
4481         req->stage = stage;
4482
4483         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4484                       action->write_rule_id_to_bd);
4485         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4486                         action->rule_id);
4487         ad_data <<= 32;
4488         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4489         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4490                       action->forward_to_direct_queue);
4491         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4492                         action->queue_id);
4493         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4494         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4495                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4496         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4497         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4498                         action->counter_id);
4499
4500         req->ad_data = cpu_to_le64(ad_data);
4501         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4502         if (ret)
4503                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4504
4505         return ret;
4506 }
4507
4508 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4509                                    struct hclge_fd_rule *rule)
4510 {
4511         u16 tmp_x_s, tmp_y_s;
4512         u32 tmp_x_l, tmp_y_l;
4513         int i;
4514
4515         if (rule->unused_tuple & tuple_bit)
4516                 return true;
4517
4518         switch (tuple_bit) {
4519         case 0:
4520                 return false;
4521         case BIT(INNER_DST_MAC):
4522                 for (i = 0; i < 6; i++) {
4523                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4524                                rule->tuples_mask.dst_mac[i]);
4525                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4526                                rule->tuples_mask.dst_mac[i]);
4527                 }
4528
4529                 return true;
4530         case BIT(INNER_SRC_MAC):
4531                 for (i = 0; i < 6; i++) {
4532                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4533                                rule->tuples.src_mac[i]);
4534                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4535                                rule->tuples.src_mac[i]);
4536                 }
4537
4538                 return true;
4539         case BIT(INNER_VLAN_TAG_FST):
4540                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4541                        rule->tuples_mask.vlan_tag1);
4542                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4543                        rule->tuples_mask.vlan_tag1);
4544                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4545                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4546
4547                 return true;
4548         case BIT(INNER_ETH_TYPE):
4549                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4550                        rule->tuples_mask.ether_proto);
4551                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4552                        rule->tuples_mask.ether_proto);
4553                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4554                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4555
4556                 return true;
4557         case BIT(INNER_IP_TOS):
4558                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4559                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4560
4561                 return true;
4562         case BIT(INNER_IP_PROTO):
4563                 calc_x(*key_x, rule->tuples.ip_proto,
4564                        rule->tuples_mask.ip_proto);
4565                 calc_y(*key_y, rule->tuples.ip_proto,
4566                        rule->tuples_mask.ip_proto);
4567
4568                 return true;
4569         case BIT(INNER_SRC_IP):
4570                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4571                        rule->tuples_mask.src_ip[3]);
4572                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4573                        rule->tuples_mask.src_ip[3]);
4574                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4575                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4576
4577                 return true;
4578         case BIT(INNER_DST_IP):
4579                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4580                        rule->tuples_mask.dst_ip[3]);
4581                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4582                        rule->tuples_mask.dst_ip[3]);
4583                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4584                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4585
4586                 return true;
4587         case BIT(INNER_SRC_PORT):
4588                 calc_x(tmp_x_s, rule->tuples.src_port,
4589                        rule->tuples_mask.src_port);
4590                 calc_y(tmp_y_s, rule->tuples.src_port,
4591                        rule->tuples_mask.src_port);
4592                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4593                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4594
4595                 return true;
4596         case BIT(INNER_DST_PORT):
4597                 calc_x(tmp_x_s, rule->tuples.dst_port,
4598                        rule->tuples_mask.dst_port);
4599                 calc_y(tmp_y_s, rule->tuples.dst_port,
4600                        rule->tuples_mask.dst_port);
4601                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4602                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4603
4604                 return true;
4605         default:
4606                 return false;
4607         }
4608 }
4609
4610 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4611                                  u8 vf_id, u8 network_port_id)
4612 {
4613         u32 port_number = 0;
4614
4615         if (port_type == HOST_PORT) {
4616                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4617                                 pf_id);
4618                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4619                                 vf_id);
4620                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4621         } else {
4622                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4623                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4624                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4625         }
4626
4627         return port_number;
4628 }
4629
4630 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4631                                        __le32 *key_x, __le32 *key_y,
4632                                        struct hclge_fd_rule *rule)
4633 {
4634         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4635         u8 cur_pos = 0, tuple_size, shift_bits;
4636         int i;
4637
4638         for (i = 0; i < MAX_META_DATA; i++) {
4639                 tuple_size = meta_data_key_info[i].key_length;
4640                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4641
4642                 switch (tuple_bit) {
4643                 case BIT(ROCE_TYPE):
4644                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4645                         cur_pos += tuple_size;
4646                         break;
4647                 case BIT(DST_VPORT):
4648                         port_number = hclge_get_port_number(HOST_PORT, 0,
4649                                                             rule->vf_id, 0);
4650                         hnae3_set_field(meta_data,
4651                                         GENMASK(cur_pos + tuple_size, cur_pos),
4652                                         cur_pos, port_number);
4653                         cur_pos += tuple_size;
4654                         break;
4655                 default:
4656                         break;
4657                 }
4658         }
4659
4660         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4661         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4662         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4663
4664         *key_x = cpu_to_le32(tmp_x << shift_bits);
4665         *key_y = cpu_to_le32(tmp_y << shift_bits);
4666 }
4667
4668 /* A complete key is combined with meta data key and tuple key.
4669  * Meta data key is stored at the MSB region, and tuple key is stored at
4670  * the LSB region, unused bits will be filled 0.
4671  */
4672 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4673                             struct hclge_fd_rule *rule)
4674 {
4675         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4676         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4677         u8 *cur_key_x, *cur_key_y;
4678         int i, ret, tuple_size;
4679         u8 meta_data_region;
4680
4681         memset(key_x, 0, sizeof(key_x));
4682         memset(key_y, 0, sizeof(key_y));
4683         cur_key_x = key_x;
4684         cur_key_y = key_y;
4685
4686         for (i = 0 ; i < MAX_TUPLE; i++) {
4687                 bool tuple_valid;
4688                 u32 check_tuple;
4689
4690                 tuple_size = tuple_key_info[i].key_length / 8;
4691                 check_tuple = key_cfg->tuple_active & BIT(i);
4692
4693                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4694                                                      cur_key_y, rule);
4695                 if (tuple_valid) {
4696                         cur_key_x += tuple_size;
4697                         cur_key_y += tuple_size;
4698                 }
4699         }
4700
4701         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4702                         MAX_META_DATA_LENGTH / 8;
4703
4704         hclge_fd_convert_meta_data(key_cfg,
4705                                    (__le32 *)(key_x + meta_data_region),
4706                                    (__le32 *)(key_y + meta_data_region),
4707                                    rule);
4708
4709         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4710                                    true);
4711         if (ret) {
4712                 dev_err(&hdev->pdev->dev,
4713                         "fd key_y config fail, loc=%d, ret=%d\n",
4714                         rule->queue_id, ret);
4715                 return ret;
4716         }
4717
4718         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4719                                    true);
4720         if (ret)
4721                 dev_err(&hdev->pdev->dev,
4722                         "fd key_x config fail, loc=%d, ret=%d\n",
4723                         rule->queue_id, ret);
4724         return ret;
4725 }
4726
4727 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4728                                struct hclge_fd_rule *rule)
4729 {
4730         struct hclge_fd_ad_data ad_data;
4731
4732         ad_data.ad_id = rule->location;
4733
4734         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4735                 ad_data.drop_packet = true;
4736                 ad_data.forward_to_direct_queue = false;
4737                 ad_data.queue_id = 0;
4738         } else {
4739                 ad_data.drop_packet = false;
4740                 ad_data.forward_to_direct_queue = true;
4741                 ad_data.queue_id = rule->queue_id;
4742         }
4743
4744         ad_data.use_counter = false;
4745         ad_data.counter_id = 0;
4746
4747         ad_data.use_next_stage = false;
4748         ad_data.next_input_key = 0;
4749
4750         ad_data.write_rule_id_to_bd = true;
4751         ad_data.rule_id = rule->location;
4752
4753         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4754 }
4755
4756 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4757                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4758 {
4759         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4760         struct ethtool_usrip4_spec *usr_ip4_spec;
4761         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4762         struct ethtool_usrip6_spec *usr_ip6_spec;
4763         struct ethhdr *ether_spec;
4764
4765         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4766                 return -EINVAL;
4767
4768         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4769                 return -EOPNOTSUPP;
4770
4771         if ((fs->flow_type & FLOW_EXT) &&
4772             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4773                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4774                 return -EOPNOTSUPP;
4775         }
4776
4777         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4778         case SCTP_V4_FLOW:
4779         case TCP_V4_FLOW:
4780         case UDP_V4_FLOW:
4781                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4782                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4783
4784                 if (!tcp_ip4_spec->ip4src)
4785                         *unused |= BIT(INNER_SRC_IP);
4786
4787                 if (!tcp_ip4_spec->ip4dst)
4788                         *unused |= BIT(INNER_DST_IP);
4789
4790                 if (!tcp_ip4_spec->psrc)
4791                         *unused |= BIT(INNER_SRC_PORT);
4792
4793                 if (!tcp_ip4_spec->pdst)
4794                         *unused |= BIT(INNER_DST_PORT);
4795
4796                 if (!tcp_ip4_spec->tos)
4797                         *unused |= BIT(INNER_IP_TOS);
4798
4799                 break;
4800         case IP_USER_FLOW:
4801                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4802                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4803                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4804
4805                 if (!usr_ip4_spec->ip4src)
4806                         *unused |= BIT(INNER_SRC_IP);
4807
4808                 if (!usr_ip4_spec->ip4dst)
4809                         *unused |= BIT(INNER_DST_IP);
4810
4811                 if (!usr_ip4_spec->tos)
4812                         *unused |= BIT(INNER_IP_TOS);
4813
4814                 if (!usr_ip4_spec->proto)
4815                         *unused |= BIT(INNER_IP_PROTO);
4816
4817                 if (usr_ip4_spec->l4_4_bytes)
4818                         return -EOPNOTSUPP;
4819
4820                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4821                         return -EOPNOTSUPP;
4822
4823                 break;
4824         case SCTP_V6_FLOW:
4825         case TCP_V6_FLOW:
4826         case UDP_V6_FLOW:
4827                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4828                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4829                         BIT(INNER_IP_TOS);
4830
4831                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4832                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4833                         *unused |= BIT(INNER_SRC_IP);
4834
4835                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4836                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4837                         *unused |= BIT(INNER_DST_IP);
4838
4839                 if (!tcp_ip6_spec->psrc)
4840                         *unused |= BIT(INNER_SRC_PORT);
4841
4842                 if (!tcp_ip6_spec->pdst)
4843                         *unused |= BIT(INNER_DST_PORT);
4844
4845                 if (tcp_ip6_spec->tclass)
4846                         return -EOPNOTSUPP;
4847
4848                 break;
4849         case IPV6_USER_FLOW:
4850                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4851                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4852                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4853                         BIT(INNER_DST_PORT);
4854
4855                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4856                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4857                         *unused |= BIT(INNER_SRC_IP);
4858
4859                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4860                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4861                         *unused |= BIT(INNER_DST_IP);
4862
4863                 if (!usr_ip6_spec->l4_proto)
4864                         *unused |= BIT(INNER_IP_PROTO);
4865
4866                 if (usr_ip6_spec->tclass)
4867                         return -EOPNOTSUPP;
4868
4869                 if (usr_ip6_spec->l4_4_bytes)
4870                         return -EOPNOTSUPP;
4871
4872                 break;
4873         case ETHER_FLOW:
4874                 ether_spec = &fs->h_u.ether_spec;
4875                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4876                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4877                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4878
4879                 if (is_zero_ether_addr(ether_spec->h_source))
4880                         *unused |= BIT(INNER_SRC_MAC);
4881
4882                 if (is_zero_ether_addr(ether_spec->h_dest))
4883                         *unused |= BIT(INNER_DST_MAC);
4884
4885                 if (!ether_spec->h_proto)
4886                         *unused |= BIT(INNER_ETH_TYPE);
4887
4888                 break;
4889         default:
4890                 return -EOPNOTSUPP;
4891         }
4892
4893         if ((fs->flow_type & FLOW_EXT)) {
4894                 if (fs->h_ext.vlan_etype)
4895                         return -EOPNOTSUPP;
4896                 if (!fs->h_ext.vlan_tci)
4897                         *unused |= BIT(INNER_VLAN_TAG_FST);
4898
4899                 if (fs->m_ext.vlan_tci) {
4900                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4901                                 return -EINVAL;
4902                 }
4903         } else {
4904                 *unused |= BIT(INNER_VLAN_TAG_FST);
4905         }
4906
4907         if (fs->flow_type & FLOW_MAC_EXT) {
4908                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4909                         return -EOPNOTSUPP;
4910
4911                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4912                         *unused |= BIT(INNER_DST_MAC);
4913                 else
4914                         *unused &= ~(BIT(INNER_DST_MAC));
4915         }
4916
4917         return 0;
4918 }
4919
4920 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4921 {
4922         struct hclge_fd_rule *rule = NULL;
4923         struct hlist_node *node2;
4924
4925         spin_lock_bh(&hdev->fd_rule_lock);
4926         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4927                 if (rule->location >= location)
4928                         break;
4929         }
4930
4931         spin_unlock_bh(&hdev->fd_rule_lock);
4932
4933         return  rule && rule->location == location;
4934 }
4935
4936 /* make sure being called after lock up with fd_rule_lock */
4937 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4938                                      struct hclge_fd_rule *new_rule,
4939                                      u16 location,
4940                                      bool is_add)
4941 {
4942         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4943         struct hlist_node *node2;
4944
4945         if (is_add && !new_rule)
4946                 return -EINVAL;
4947
4948         hlist_for_each_entry_safe(rule, node2,
4949                                   &hdev->fd_rule_list, rule_node) {
4950                 if (rule->location >= location)
4951                         break;
4952                 parent = rule;
4953         }
4954
4955         if (rule && rule->location == location) {
4956                 hlist_del(&rule->rule_node);
4957                 kfree(rule);
4958                 hdev->hclge_fd_rule_num--;
4959
4960                 if (!is_add) {
4961                         if (!hdev->hclge_fd_rule_num)
4962                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4963                         clear_bit(location, hdev->fd_bmap);
4964
4965                         return 0;
4966                 }
4967         } else if (!is_add) {
4968                 dev_err(&hdev->pdev->dev,
4969                         "delete fail, rule %d is inexistent\n",
4970                         location);
4971                 return -EINVAL;
4972         }
4973
4974         INIT_HLIST_NODE(&new_rule->rule_node);
4975
4976         if (parent)
4977                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4978         else
4979                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4980
4981         set_bit(location, hdev->fd_bmap);
4982         hdev->hclge_fd_rule_num++;
4983         hdev->fd_active_type = new_rule->rule_type;
4984
4985         return 0;
4986 }
4987
4988 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4989                               struct ethtool_rx_flow_spec *fs,
4990                               struct hclge_fd_rule *rule)
4991 {
4992         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4993
4994         switch (flow_type) {
4995         case SCTP_V4_FLOW:
4996         case TCP_V4_FLOW:
4997         case UDP_V4_FLOW:
4998                 rule->tuples.src_ip[3] =
4999                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5000                 rule->tuples_mask.src_ip[3] =
5001                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5002
5003                 rule->tuples.dst_ip[3] =
5004                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5005                 rule->tuples_mask.dst_ip[3] =
5006                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5007
5008                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5009                 rule->tuples_mask.src_port =
5010                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5011
5012                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5013                 rule->tuples_mask.dst_port =
5014                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5015
5016                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5017                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5018
5019                 rule->tuples.ether_proto = ETH_P_IP;
5020                 rule->tuples_mask.ether_proto = 0xFFFF;
5021
5022                 break;
5023         case IP_USER_FLOW:
5024                 rule->tuples.src_ip[3] =
5025                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5026                 rule->tuples_mask.src_ip[3] =
5027                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5028
5029                 rule->tuples.dst_ip[3] =
5030                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5031                 rule->tuples_mask.dst_ip[3] =
5032                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5033
5034                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5035                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5036
5037                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5038                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5039
5040                 rule->tuples.ether_proto = ETH_P_IP;
5041                 rule->tuples_mask.ether_proto = 0xFFFF;
5042
5043                 break;
5044         case SCTP_V6_FLOW:
5045         case TCP_V6_FLOW:
5046         case UDP_V6_FLOW:
5047                 be32_to_cpu_array(rule->tuples.src_ip,
5048                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
5049                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5050                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
5051
5052                 be32_to_cpu_array(rule->tuples.dst_ip,
5053                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
5054                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5055                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
5056
5057                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5058                 rule->tuples_mask.src_port =
5059                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5060
5061                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5062                 rule->tuples_mask.dst_port =
5063                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5064
5065                 rule->tuples.ether_proto = ETH_P_IPV6;
5066                 rule->tuples_mask.ether_proto = 0xFFFF;
5067
5068                 break;
5069         case IPV6_USER_FLOW:
5070                 be32_to_cpu_array(rule->tuples.src_ip,
5071                                   fs->h_u.usr_ip6_spec.ip6src, 4);
5072                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5073                                   fs->m_u.usr_ip6_spec.ip6src, 4);
5074
5075                 be32_to_cpu_array(rule->tuples.dst_ip,
5076                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
5077                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5078                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
5079
5080                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5081                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5082
5083                 rule->tuples.ether_proto = ETH_P_IPV6;
5084                 rule->tuples_mask.ether_proto = 0xFFFF;
5085
5086                 break;
5087         case ETHER_FLOW:
5088                 ether_addr_copy(rule->tuples.src_mac,
5089                                 fs->h_u.ether_spec.h_source);
5090                 ether_addr_copy(rule->tuples_mask.src_mac,
5091                                 fs->m_u.ether_spec.h_source);
5092
5093                 ether_addr_copy(rule->tuples.dst_mac,
5094                                 fs->h_u.ether_spec.h_dest);
5095                 ether_addr_copy(rule->tuples_mask.dst_mac,
5096                                 fs->m_u.ether_spec.h_dest);
5097
5098                 rule->tuples.ether_proto =
5099                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5100                 rule->tuples_mask.ether_proto =
5101                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5102
5103                 break;
5104         default:
5105                 return -EOPNOTSUPP;
5106         }
5107
5108         switch (flow_type) {
5109         case SCTP_V4_FLOW:
5110         case SCTP_V6_FLOW:
5111                 rule->tuples.ip_proto = IPPROTO_SCTP;
5112                 rule->tuples_mask.ip_proto = 0xFF;
5113                 break;
5114         case TCP_V4_FLOW:
5115         case TCP_V6_FLOW:
5116                 rule->tuples.ip_proto = IPPROTO_TCP;
5117                 rule->tuples_mask.ip_proto = 0xFF;
5118                 break;
5119         case UDP_V4_FLOW:
5120         case UDP_V6_FLOW:
5121                 rule->tuples.ip_proto = IPPROTO_UDP;
5122                 rule->tuples_mask.ip_proto = 0xFF;
5123                 break;
5124         default:
5125                 break;
5126         }
5127
5128         if ((fs->flow_type & FLOW_EXT)) {
5129                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5130                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5131         }
5132
5133         if (fs->flow_type & FLOW_MAC_EXT) {
5134                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5135                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5136         }
5137
5138         return 0;
5139 }
5140
5141 /* make sure being called after lock up with fd_rule_lock */
5142 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5143                                 struct hclge_fd_rule *rule)
5144 {
5145         int ret;
5146
5147         if (!rule) {
5148                 dev_err(&hdev->pdev->dev,
5149                         "The flow director rule is NULL\n");
5150                 return -EINVAL;
5151         }
5152
5153         /* it will never fail here, so needn't to check return value */
5154         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5155
5156         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5157         if (ret)
5158                 goto clear_rule;
5159
5160         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5161         if (ret)
5162                 goto clear_rule;
5163
5164         return 0;
5165
5166 clear_rule:
5167         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5168         return ret;
5169 }
5170
5171 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5172                               struct ethtool_rxnfc *cmd)
5173 {
5174         struct hclge_vport *vport = hclge_get_vport(handle);
5175         struct hclge_dev *hdev = vport->back;
5176         u16 dst_vport_id = 0, q_index = 0;
5177         struct ethtool_rx_flow_spec *fs;
5178         struct hclge_fd_rule *rule;
5179         u32 unused = 0;
5180         u8 action;
5181         int ret;
5182
5183         if (!hnae3_dev_fd_supported(hdev))
5184                 return -EOPNOTSUPP;
5185
5186         if (!hdev->fd_en) {
5187                 dev_warn(&hdev->pdev->dev,
5188                          "Please enable flow director first\n");
5189                 return -EOPNOTSUPP;
5190         }
5191
5192         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5193
5194         ret = hclge_fd_check_spec(hdev, fs, &unused);
5195         if (ret) {
5196                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5197                 return ret;
5198         }
5199
5200         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5201                 action = HCLGE_FD_ACTION_DROP_PACKET;
5202         } else {
5203                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5204                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5205                 u16 tqps;
5206
5207                 if (vf > hdev->num_req_vfs) {
5208                         dev_err(&hdev->pdev->dev,
5209                                 "Error: vf id (%d) > max vf num (%d)\n",
5210                                 vf, hdev->num_req_vfs);
5211                         return -EINVAL;
5212                 }
5213
5214                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5215                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5216
5217                 if (ring >= tqps) {
5218                         dev_err(&hdev->pdev->dev,
5219                                 "Error: queue id (%d) > max tqp num (%d)\n",
5220                                 ring, tqps - 1);
5221                         return -EINVAL;
5222                 }
5223
5224                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5225                 q_index = ring;
5226         }
5227
5228         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5229         if (!rule)
5230                 return -ENOMEM;
5231
5232         ret = hclge_fd_get_tuple(hdev, fs, rule);
5233         if (ret) {
5234                 kfree(rule);
5235                 return ret;
5236         }
5237
5238         rule->flow_type = fs->flow_type;
5239
5240         rule->location = fs->location;
5241         rule->unused_tuple = unused;
5242         rule->vf_id = dst_vport_id;
5243         rule->queue_id = q_index;
5244         rule->action = action;
5245         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5246
5247         /* to avoid rule conflict, when user configure rule by ethtool,
5248          * we need to clear all arfs rules
5249          */
5250         hclge_clear_arfs_rules(handle);
5251
5252         spin_lock_bh(&hdev->fd_rule_lock);
5253         ret = hclge_fd_config_rule(hdev, rule);
5254
5255         spin_unlock_bh(&hdev->fd_rule_lock);
5256
5257         return ret;
5258 }
5259
5260 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5261                               struct ethtool_rxnfc *cmd)
5262 {
5263         struct hclge_vport *vport = hclge_get_vport(handle);
5264         struct hclge_dev *hdev = vport->back;
5265         struct ethtool_rx_flow_spec *fs;
5266         int ret;
5267
5268         if (!hnae3_dev_fd_supported(hdev))
5269                 return -EOPNOTSUPP;
5270
5271         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5272
5273         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5274                 return -EINVAL;
5275
5276         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5277                 dev_err(&hdev->pdev->dev,
5278                         "Delete fail, rule %d is inexistent\n",
5279                         fs->location);
5280                 return -ENOENT;
5281         }
5282
5283         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5284                                    fs->location, NULL, false);
5285         if (ret)
5286                 return ret;
5287
5288         spin_lock_bh(&hdev->fd_rule_lock);
5289         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5290
5291         spin_unlock_bh(&hdev->fd_rule_lock);
5292
5293         return ret;
5294 }
5295
5296 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5297                                      bool clear_list)
5298 {
5299         struct hclge_vport *vport = hclge_get_vport(handle);
5300         struct hclge_dev *hdev = vport->back;
5301         struct hclge_fd_rule *rule;
5302         struct hlist_node *node;
5303         u16 location;
5304
5305         if (!hnae3_dev_fd_supported(hdev))
5306                 return;
5307
5308         spin_lock_bh(&hdev->fd_rule_lock);
5309         for_each_set_bit(location, hdev->fd_bmap,
5310                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5311                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5312                                      NULL, false);
5313
5314         if (clear_list) {
5315                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5316                                           rule_node) {
5317                         hlist_del(&rule->rule_node);
5318                         kfree(rule);
5319                 }
5320                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5321                 hdev->hclge_fd_rule_num = 0;
5322                 bitmap_zero(hdev->fd_bmap,
5323                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5324         }
5325
5326         spin_unlock_bh(&hdev->fd_rule_lock);
5327 }
5328
5329 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5330 {
5331         struct hclge_vport *vport = hclge_get_vport(handle);
5332         struct hclge_dev *hdev = vport->back;
5333         struct hclge_fd_rule *rule;
5334         struct hlist_node *node;
5335         int ret;
5336
5337         /* Return ok here, because reset error handling will check this
5338          * return value. If error is returned here, the reset process will
5339          * fail.
5340          */
5341         if (!hnae3_dev_fd_supported(hdev))
5342                 return 0;
5343
5344         /* if fd is disabled, should not restore it when reset */
5345         if (!hdev->fd_en)
5346                 return 0;
5347
5348         spin_lock_bh(&hdev->fd_rule_lock);
5349         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5350                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5351                 if (!ret)
5352                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5353
5354                 if (ret) {
5355                         dev_warn(&hdev->pdev->dev,
5356                                  "Restore rule %d failed, remove it\n",
5357                                  rule->location);
5358                         clear_bit(rule->location, hdev->fd_bmap);
5359                         hlist_del(&rule->rule_node);
5360                         kfree(rule);
5361                         hdev->hclge_fd_rule_num--;
5362                 }
5363         }
5364
5365         if (hdev->hclge_fd_rule_num)
5366                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5367
5368         spin_unlock_bh(&hdev->fd_rule_lock);
5369
5370         return 0;
5371 }
5372
5373 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5374                                  struct ethtool_rxnfc *cmd)
5375 {
5376         struct hclge_vport *vport = hclge_get_vport(handle);
5377         struct hclge_dev *hdev = vport->back;
5378
5379         if (!hnae3_dev_fd_supported(hdev))
5380                 return -EOPNOTSUPP;
5381
5382         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5383         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5384
5385         return 0;
5386 }
5387
5388 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5389                                   struct ethtool_rxnfc *cmd)
5390 {
5391         struct hclge_vport *vport = hclge_get_vport(handle);
5392         struct hclge_fd_rule *rule = NULL;
5393         struct hclge_dev *hdev = vport->back;
5394         struct ethtool_rx_flow_spec *fs;
5395         struct hlist_node *node2;
5396
5397         if (!hnae3_dev_fd_supported(hdev))
5398                 return -EOPNOTSUPP;
5399
5400         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5401
5402         spin_lock_bh(&hdev->fd_rule_lock);
5403
5404         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5405                 if (rule->location >= fs->location)
5406                         break;
5407         }
5408
5409         if (!rule || fs->location != rule->location) {
5410                 spin_unlock_bh(&hdev->fd_rule_lock);
5411
5412                 return -ENOENT;
5413         }
5414
5415         fs->flow_type = rule->flow_type;
5416         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5417         case SCTP_V4_FLOW:
5418         case TCP_V4_FLOW:
5419         case UDP_V4_FLOW:
5420                 fs->h_u.tcp_ip4_spec.ip4src =
5421                                 cpu_to_be32(rule->tuples.src_ip[3]);
5422                 fs->m_u.tcp_ip4_spec.ip4src =
5423                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5424                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5425
5426                 fs->h_u.tcp_ip4_spec.ip4dst =
5427                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5428                 fs->m_u.tcp_ip4_spec.ip4dst =
5429                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5430                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5431
5432                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5433                 fs->m_u.tcp_ip4_spec.psrc =
5434                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5435                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5436
5437                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5438                 fs->m_u.tcp_ip4_spec.pdst =
5439                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5440                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5441
5442                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5443                 fs->m_u.tcp_ip4_spec.tos =
5444                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5445                                 0 : rule->tuples_mask.ip_tos;
5446
5447                 break;
5448         case IP_USER_FLOW:
5449                 fs->h_u.usr_ip4_spec.ip4src =
5450                                 cpu_to_be32(rule->tuples.src_ip[3]);
5451                 fs->m_u.tcp_ip4_spec.ip4src =
5452                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5453                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5454
5455                 fs->h_u.usr_ip4_spec.ip4dst =
5456                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5457                 fs->m_u.usr_ip4_spec.ip4dst =
5458                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5459                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5460
5461                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5462                 fs->m_u.usr_ip4_spec.tos =
5463                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5464                                 0 : rule->tuples_mask.ip_tos;
5465
5466                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5467                 fs->m_u.usr_ip4_spec.proto =
5468                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5469                                 0 : rule->tuples_mask.ip_proto;
5470
5471                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5472
5473                 break;
5474         case SCTP_V6_FLOW:
5475         case TCP_V6_FLOW:
5476         case UDP_V6_FLOW:
5477                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5478                                   rule->tuples.src_ip, 4);
5479                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5480                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5481                 else
5482                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5483                                           rule->tuples_mask.src_ip, 4);
5484
5485                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5486                                   rule->tuples.dst_ip, 4);
5487                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5488                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5489                 else
5490                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5491                                           rule->tuples_mask.dst_ip, 4);
5492
5493                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5494                 fs->m_u.tcp_ip6_spec.psrc =
5495                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5496                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5497
5498                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5499                 fs->m_u.tcp_ip6_spec.pdst =
5500                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5501                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5502
5503                 break;
5504         case IPV6_USER_FLOW:
5505                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5506                                   rule->tuples.src_ip, 4);
5507                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5508                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5509                 else
5510                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5511                                           rule->tuples_mask.src_ip, 4);
5512
5513                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5514                                   rule->tuples.dst_ip, 4);
5515                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5516                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5517                 else
5518                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5519                                           rule->tuples_mask.dst_ip, 4);
5520
5521                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5522                 fs->m_u.usr_ip6_spec.l4_proto =
5523                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5524                                 0 : rule->tuples_mask.ip_proto;
5525
5526                 break;
5527         case ETHER_FLOW:
5528                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5529                                 rule->tuples.src_mac);
5530                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5531                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5532                 else
5533                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5534                                         rule->tuples_mask.src_mac);
5535
5536                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5537                                 rule->tuples.dst_mac);
5538                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5539                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5540                 else
5541                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5542                                         rule->tuples_mask.dst_mac);
5543
5544                 fs->h_u.ether_spec.h_proto =
5545                                 cpu_to_be16(rule->tuples.ether_proto);
5546                 fs->m_u.ether_spec.h_proto =
5547                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5548                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5549
5550                 break;
5551         default:
5552                 spin_unlock_bh(&hdev->fd_rule_lock);
5553                 return -EOPNOTSUPP;
5554         }
5555
5556         if (fs->flow_type & FLOW_EXT) {
5557                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5558                 fs->m_ext.vlan_tci =
5559                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5560                                 cpu_to_be16(VLAN_VID_MASK) :
5561                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5562         }
5563
5564         if (fs->flow_type & FLOW_MAC_EXT) {
5565                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5566                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5567                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5568                 else
5569                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5570                                         rule->tuples_mask.dst_mac);
5571         }
5572
5573         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5574                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5575         } else {
5576                 u64 vf_id;
5577
5578                 fs->ring_cookie = rule->queue_id;
5579                 vf_id = rule->vf_id;
5580                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5581                 fs->ring_cookie |= vf_id;
5582         }
5583
5584         spin_unlock_bh(&hdev->fd_rule_lock);
5585
5586         return 0;
5587 }
5588
5589 static int hclge_get_all_rules(struct hnae3_handle *handle,
5590                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5591 {
5592         struct hclge_vport *vport = hclge_get_vport(handle);
5593         struct hclge_dev *hdev = vport->back;
5594         struct hclge_fd_rule *rule;
5595         struct hlist_node *node2;
5596         int cnt = 0;
5597
5598         if (!hnae3_dev_fd_supported(hdev))
5599                 return -EOPNOTSUPP;
5600
5601         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5602
5603         spin_lock_bh(&hdev->fd_rule_lock);
5604         hlist_for_each_entry_safe(rule, node2,
5605                                   &hdev->fd_rule_list, rule_node) {
5606                 if (cnt == cmd->rule_cnt) {
5607                         spin_unlock_bh(&hdev->fd_rule_lock);
5608                         return -EMSGSIZE;
5609                 }
5610
5611                 rule_locs[cnt] = rule->location;
5612                 cnt++;
5613         }
5614
5615         spin_unlock_bh(&hdev->fd_rule_lock);
5616
5617         cmd->rule_cnt = cnt;
5618
5619         return 0;
5620 }
5621
5622 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5623                                      struct hclge_fd_rule_tuples *tuples)
5624 {
5625         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5626         tuples->ip_proto = fkeys->basic.ip_proto;
5627         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5628
5629         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5630                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5631                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5632         } else {
5633                 memcpy(tuples->src_ip,
5634                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5635                        sizeof(tuples->src_ip));
5636                 memcpy(tuples->dst_ip,
5637                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5638                        sizeof(tuples->dst_ip));
5639         }
5640 }
5641
5642 /* traverse all rules, check whether an existed rule has the same tuples */
5643 static struct hclge_fd_rule *
5644 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5645                           const struct hclge_fd_rule_tuples *tuples)
5646 {
5647         struct hclge_fd_rule *rule = NULL;
5648         struct hlist_node *node;
5649
5650         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5651                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5652                         return rule;
5653         }
5654
5655         return NULL;
5656 }
5657
5658 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5659                                      struct hclge_fd_rule *rule)
5660 {
5661         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5662                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5663                              BIT(INNER_SRC_PORT);
5664         rule->action = 0;
5665         rule->vf_id = 0;
5666         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5667         if (tuples->ether_proto == ETH_P_IP) {
5668                 if (tuples->ip_proto == IPPROTO_TCP)
5669                         rule->flow_type = TCP_V4_FLOW;
5670                 else
5671                         rule->flow_type = UDP_V4_FLOW;
5672         } else {
5673                 if (tuples->ip_proto == IPPROTO_TCP)
5674                         rule->flow_type = TCP_V6_FLOW;
5675                 else
5676                         rule->flow_type = UDP_V6_FLOW;
5677         }
5678         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5679         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5680 }
5681
5682 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5683                                       u16 flow_id, struct flow_keys *fkeys)
5684 {
5685         struct hclge_vport *vport = hclge_get_vport(handle);
5686         struct hclge_fd_rule_tuples new_tuples;
5687         struct hclge_dev *hdev = vport->back;
5688         struct hclge_fd_rule *rule;
5689         u16 tmp_queue_id;
5690         u16 bit_id;
5691         int ret;
5692
5693         if (!hnae3_dev_fd_supported(hdev))
5694                 return -EOPNOTSUPP;
5695
5696         memset(&new_tuples, 0, sizeof(new_tuples));
5697         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5698
5699         spin_lock_bh(&hdev->fd_rule_lock);
5700
5701         /* when there is already fd rule existed add by user,
5702          * arfs should not work
5703          */
5704         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5705                 spin_unlock_bh(&hdev->fd_rule_lock);
5706
5707                 return -EOPNOTSUPP;
5708         }
5709
5710         /* check is there flow director filter existed for this flow,
5711          * if not, create a new filter for it;
5712          * if filter exist with different queue id, modify the filter;
5713          * if filter exist with same queue id, do nothing
5714          */
5715         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5716         if (!rule) {
5717                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5718                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5719                         spin_unlock_bh(&hdev->fd_rule_lock);
5720
5721                         return -ENOSPC;
5722                 }
5723
5724                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5725                 if (!rule) {
5726                         spin_unlock_bh(&hdev->fd_rule_lock);
5727
5728                         return -ENOMEM;
5729                 }
5730
5731                 set_bit(bit_id, hdev->fd_bmap);
5732                 rule->location = bit_id;
5733                 rule->flow_id = flow_id;
5734                 rule->queue_id = queue_id;
5735                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5736                 ret = hclge_fd_config_rule(hdev, rule);
5737
5738                 spin_unlock_bh(&hdev->fd_rule_lock);
5739
5740                 if (ret)
5741                         return ret;
5742
5743                 return rule->location;
5744         }
5745
5746         spin_unlock_bh(&hdev->fd_rule_lock);
5747
5748         if (rule->queue_id == queue_id)
5749                 return rule->location;
5750
5751         tmp_queue_id = rule->queue_id;
5752         rule->queue_id = queue_id;
5753         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5754         if (ret) {
5755                 rule->queue_id = tmp_queue_id;
5756                 return ret;
5757         }
5758
5759         return rule->location;
5760 }
5761
5762 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5763 {
5764 #ifdef CONFIG_RFS_ACCEL
5765         struct hnae3_handle *handle = &hdev->vport[0].nic;
5766         struct hclge_fd_rule *rule;
5767         struct hlist_node *node;
5768         HLIST_HEAD(del_list);
5769
5770         spin_lock_bh(&hdev->fd_rule_lock);
5771         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5772                 spin_unlock_bh(&hdev->fd_rule_lock);
5773                 return;
5774         }
5775         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5776                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5777                                         rule->flow_id, rule->location)) {
5778                         hlist_del_init(&rule->rule_node);
5779                         hlist_add_head(&rule->rule_node, &del_list);
5780                         hdev->hclge_fd_rule_num--;
5781                         clear_bit(rule->location, hdev->fd_bmap);
5782                 }
5783         }
5784         spin_unlock_bh(&hdev->fd_rule_lock);
5785
5786         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5787                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5788                                      rule->location, NULL, false);
5789                 kfree(rule);
5790         }
5791 #endif
5792 }
5793
5794 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5795 {
5796 #ifdef CONFIG_RFS_ACCEL
5797         struct hclge_vport *vport = hclge_get_vport(handle);
5798         struct hclge_dev *hdev = vport->back;
5799
5800         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5801                 hclge_del_all_fd_entries(handle, true);
5802 #endif
5803 }
5804
5805 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5806 {
5807         struct hclge_vport *vport = hclge_get_vport(handle);
5808         struct hclge_dev *hdev = vport->back;
5809
5810         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5811                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5812 }
5813
5814 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5815 {
5816         struct hclge_vport *vport = hclge_get_vport(handle);
5817         struct hclge_dev *hdev = vport->back;
5818
5819         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5820 }
5821
5822 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5823 {
5824         struct hclge_vport *vport = hclge_get_vport(handle);
5825         struct hclge_dev *hdev = vport->back;
5826
5827         return hdev->rst_stats.hw_reset_done_cnt;
5828 }
5829
5830 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5831 {
5832         struct hclge_vport *vport = hclge_get_vport(handle);
5833         struct hclge_dev *hdev = vport->back;
5834         bool clear;
5835
5836         hdev->fd_en = enable;
5837         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5838         if (!enable)
5839                 hclge_del_all_fd_entries(handle, clear);
5840         else
5841                 hclge_restore_fd_entries(handle);
5842 }
5843
5844 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5845 {
5846         struct hclge_desc desc;
5847         struct hclge_config_mac_mode_cmd *req =
5848                 (struct hclge_config_mac_mode_cmd *)desc.data;
5849         u32 loop_en = 0;
5850         int ret;
5851
5852         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5853         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5854         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5855         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5856         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5857         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5858         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5859         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5860         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5861         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5862         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5863         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5864         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5865         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5866         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5867         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5868
5869         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5870         if (ret)
5871                 dev_err(&hdev->pdev->dev,
5872                         "mac enable fail, ret =%d.\n", ret);
5873 }
5874
5875 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5876 {
5877         struct hclge_config_mac_mode_cmd *req;
5878         struct hclge_desc desc;
5879         u32 loop_en;
5880         int ret;
5881
5882         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5883         /* 1 Read out the MAC mode config at first */
5884         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5885         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5886         if (ret) {
5887                 dev_err(&hdev->pdev->dev,
5888                         "mac loopback get fail, ret =%d.\n", ret);
5889                 return ret;
5890         }
5891
5892         /* 2 Then setup the loopback flag */
5893         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5894         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5895         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5896         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5897
5898         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5899
5900         /* 3 Config mac work mode with loopback flag
5901          * and its original configure parameters
5902          */
5903         hclge_cmd_reuse_desc(&desc, false);
5904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5905         if (ret)
5906                 dev_err(&hdev->pdev->dev,
5907                         "mac loopback set fail, ret =%d.\n", ret);
5908         return ret;
5909 }
5910
5911 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5912                                      enum hnae3_loop loop_mode)
5913 {
5914 #define HCLGE_SERDES_RETRY_MS   10
5915 #define HCLGE_SERDES_RETRY_NUM  100
5916
5917 #define HCLGE_MAC_LINK_STATUS_MS   10
5918 #define HCLGE_MAC_LINK_STATUS_NUM  100
5919 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5920 #define HCLGE_MAC_LINK_STATUS_UP   1
5921
5922         struct hclge_serdes_lb_cmd *req;
5923         struct hclge_desc desc;
5924         int mac_link_ret = 0;
5925         int ret, i = 0;
5926         u8 loop_mode_b;
5927
5928         req = (struct hclge_serdes_lb_cmd *)desc.data;
5929         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5930
5931         switch (loop_mode) {
5932         case HNAE3_LOOP_SERIAL_SERDES:
5933                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5934                 break;
5935         case HNAE3_LOOP_PARALLEL_SERDES:
5936                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5937                 break;
5938         default:
5939                 dev_err(&hdev->pdev->dev,
5940                         "unsupported serdes loopback mode %d\n", loop_mode);
5941                 return -ENOTSUPP;
5942         }
5943
5944         if (en) {
5945                 req->enable = loop_mode_b;
5946                 req->mask = loop_mode_b;
5947                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5948         } else {
5949                 req->mask = loop_mode_b;
5950                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5951         }
5952
5953         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5954         if (ret) {
5955                 dev_err(&hdev->pdev->dev,
5956                         "serdes loopback set fail, ret = %d\n", ret);
5957                 return ret;
5958         }
5959
5960         do {
5961                 msleep(HCLGE_SERDES_RETRY_MS);
5962                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5963                                            true);
5964                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5965                 if (ret) {
5966                         dev_err(&hdev->pdev->dev,
5967                                 "serdes loopback get, ret = %d\n", ret);
5968                         return ret;
5969                 }
5970         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5971                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5972
5973         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5974                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5975                 return -EBUSY;
5976         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5977                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5978                 return -EIO;
5979         }
5980
5981         hclge_cfg_mac_mode(hdev, en);
5982
5983         i = 0;
5984         do {
5985                 /* serdes Internal loopback, independent of the network cable.*/
5986                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5987                 ret = hclge_get_mac_link_status(hdev);
5988                 if (ret == mac_link_ret)
5989                         return 0;
5990         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5991
5992         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5993
5994         return -EBUSY;
5995 }
5996
5997 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5998                             int stream_id, bool enable)
5999 {
6000         struct hclge_desc desc;
6001         struct hclge_cfg_com_tqp_queue_cmd *req =
6002                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6003         int ret;
6004
6005         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6006         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6007         req->stream_id = cpu_to_le16(stream_id);
6008         req->enable |= enable << HCLGE_TQP_ENABLE_B;
6009
6010         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6011         if (ret)
6012                 dev_err(&hdev->pdev->dev,
6013                         "Tqp enable fail, status =%d.\n", ret);
6014         return ret;
6015 }
6016
6017 static int hclge_set_loopback(struct hnae3_handle *handle,
6018                               enum hnae3_loop loop_mode, bool en)
6019 {
6020         struct hclge_vport *vport = hclge_get_vport(handle);
6021         struct hnae3_knic_private_info *kinfo;
6022         struct hclge_dev *hdev = vport->back;
6023         int i, ret;
6024
6025         switch (loop_mode) {
6026         case HNAE3_LOOP_APP:
6027                 ret = hclge_set_app_loopback(hdev, en);
6028                 break;
6029         case HNAE3_LOOP_SERIAL_SERDES:
6030         case HNAE3_LOOP_PARALLEL_SERDES:
6031                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6032                 break;
6033         default:
6034                 ret = -ENOTSUPP;
6035                 dev_err(&hdev->pdev->dev,
6036                         "loop_mode %d is not supported\n", loop_mode);
6037                 break;
6038         }
6039
6040         if (ret)
6041                 return ret;
6042
6043         kinfo = &vport->nic.kinfo;
6044         for (i = 0; i < kinfo->num_tqps; i++) {
6045                 ret = hclge_tqp_enable(hdev, i, 0, en);
6046                 if (ret)
6047                         return ret;
6048         }
6049
6050         return 0;
6051 }
6052
6053 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6054 {
6055         struct hclge_vport *vport = hclge_get_vport(handle);
6056         struct hnae3_knic_private_info *kinfo;
6057         struct hnae3_queue *queue;
6058         struct hclge_tqp *tqp;
6059         int i;
6060
6061         kinfo = &vport->nic.kinfo;
6062         for (i = 0; i < kinfo->num_tqps; i++) {
6063                 queue = handle->kinfo.tqp[i];
6064                 tqp = container_of(queue, struct hclge_tqp, q);
6065                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6066         }
6067 }
6068
6069 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6070 {
6071         struct hclge_vport *vport = hclge_get_vport(handle);
6072         struct hclge_dev *hdev = vport->back;
6073
6074         if (enable) {
6075                 mod_timer(&hdev->service_timer, jiffies + HZ);
6076         } else {
6077                 del_timer_sync(&hdev->service_timer);
6078                 cancel_work_sync(&hdev->service_task);
6079                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6080         }
6081 }
6082
6083 static int hclge_ae_start(struct hnae3_handle *handle)
6084 {
6085         struct hclge_vport *vport = hclge_get_vport(handle);
6086         struct hclge_dev *hdev = vport->back;
6087
6088         /* mac enable */
6089         hclge_cfg_mac_mode(hdev, true);
6090         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6091         hdev->hw.mac.link = 0;
6092
6093         /* reset tqp stats */
6094         hclge_reset_tqp_stats(handle);
6095
6096         hclge_mac_start_phy(hdev);
6097
6098         return 0;
6099 }
6100
6101 static void hclge_ae_stop(struct hnae3_handle *handle)
6102 {
6103         struct hclge_vport *vport = hclge_get_vport(handle);
6104         struct hclge_dev *hdev = vport->back;
6105         int i;
6106
6107         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6108
6109         hclge_clear_arfs_rules(handle);
6110
6111         /* If it is not PF reset, the firmware will disable the MAC,
6112          * so it only need to stop phy here.
6113          */
6114         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6115             hdev->reset_type != HNAE3_FUNC_RESET) {
6116                 hclge_mac_stop_phy(hdev);
6117                 return;
6118         }
6119
6120         for (i = 0; i < handle->kinfo.num_tqps; i++)
6121                 hclge_reset_tqp(handle, i);
6122
6123         /* Mac disable */
6124         hclge_cfg_mac_mode(hdev, false);
6125
6126         hclge_mac_stop_phy(hdev);
6127
6128         /* reset tqp stats */
6129         hclge_reset_tqp_stats(handle);
6130         hclge_update_link_status(hdev);
6131 }
6132
6133 int hclge_vport_start(struct hclge_vport *vport)
6134 {
6135         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6136         vport->last_active_jiffies = jiffies;
6137         return 0;
6138 }
6139
6140 void hclge_vport_stop(struct hclge_vport *vport)
6141 {
6142         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6143 }
6144
6145 static int hclge_client_start(struct hnae3_handle *handle)
6146 {
6147         struct hclge_vport *vport = hclge_get_vport(handle);
6148
6149         return hclge_vport_start(vport);
6150 }
6151
6152 static void hclge_client_stop(struct hnae3_handle *handle)
6153 {
6154         struct hclge_vport *vport = hclge_get_vport(handle);
6155
6156         hclge_vport_stop(vport);
6157 }
6158
6159 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6160                                          u16 cmdq_resp, u8  resp_code,
6161                                          enum hclge_mac_vlan_tbl_opcode op)
6162 {
6163         struct hclge_dev *hdev = vport->back;
6164         int return_status = -EIO;
6165
6166         if (cmdq_resp) {
6167                 dev_err(&hdev->pdev->dev,
6168                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6169                         cmdq_resp);
6170                 return -EIO;
6171         }
6172
6173         if (op == HCLGE_MAC_VLAN_ADD) {
6174                 if ((!resp_code) || (resp_code == 1)) {
6175                         return_status = 0;
6176                 } else if (resp_code == 2) {
6177                         return_status = -ENOSPC;
6178                         dev_err(&hdev->pdev->dev,
6179                                 "add mac addr failed for uc_overflow.\n");
6180                 } else if (resp_code == 3) {
6181                         return_status = -ENOSPC;
6182                         dev_err(&hdev->pdev->dev,
6183                                 "add mac addr failed for mc_overflow.\n");
6184                 } else {
6185                         dev_err(&hdev->pdev->dev,
6186                                 "add mac addr failed for undefined, code=%d.\n",
6187                                 resp_code);
6188                 }
6189         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6190                 if (!resp_code) {
6191                         return_status = 0;
6192                 } else if (resp_code == 1) {
6193                         return_status = -ENOENT;
6194                         dev_dbg(&hdev->pdev->dev,
6195                                 "remove mac addr failed for miss.\n");
6196                 } else {
6197                         dev_err(&hdev->pdev->dev,
6198                                 "remove mac addr failed for undefined, code=%d.\n",
6199                                 resp_code);
6200                 }
6201         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6202                 if (!resp_code) {
6203                         return_status = 0;
6204                 } else if (resp_code == 1) {
6205                         return_status = -ENOENT;
6206                         dev_dbg(&hdev->pdev->dev,
6207                                 "lookup mac addr failed for miss.\n");
6208                 } else {
6209                         dev_err(&hdev->pdev->dev,
6210                                 "lookup mac addr failed for undefined, code=%d.\n",
6211                                 resp_code);
6212                 }
6213         } else {
6214                 return_status = -EINVAL;
6215                 dev_err(&hdev->pdev->dev,
6216                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6217                         op);
6218         }
6219
6220         return return_status;
6221 }
6222
6223 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6224 {
6225         int word_num;
6226         int bit_num;
6227
6228         if (vfid > 255 || vfid < 0)
6229                 return -EIO;
6230
6231         if (vfid >= 0 && vfid <= 191) {
6232                 word_num = vfid / 32;
6233                 bit_num  = vfid % 32;
6234                 if (clr)
6235                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6236                 else
6237                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6238         } else {
6239                 word_num = (vfid - 192) / 32;
6240                 bit_num  = vfid % 32;
6241                 if (clr)
6242                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6243                 else
6244                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6245         }
6246
6247         return 0;
6248 }
6249
6250 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6251 {
6252 #define HCLGE_DESC_NUMBER 3
6253 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6254         int i, j;
6255
6256         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6257                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6258                         if (desc[i].data[j])
6259                                 return false;
6260
6261         return true;
6262 }
6263
6264 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6265                                    const u8 *addr, bool is_mc)
6266 {
6267         const unsigned char *mac_addr = addr;
6268         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6269                        (mac_addr[0]) | (mac_addr[1] << 8);
6270         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6271
6272         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6273         if (is_mc) {
6274                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6275                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6276         }
6277
6278         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6279         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6280 }
6281
6282 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6283                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6284 {
6285         struct hclge_dev *hdev = vport->back;
6286         struct hclge_desc desc;
6287         u8 resp_code;
6288         u16 retval;
6289         int ret;
6290
6291         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6292
6293         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6294
6295         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6296         if (ret) {
6297                 dev_err(&hdev->pdev->dev,
6298                         "del mac addr failed for cmd_send, ret =%d.\n",
6299                         ret);
6300                 return ret;
6301         }
6302         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6303         retval = le16_to_cpu(desc.retval);
6304
6305         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6306                                              HCLGE_MAC_VLAN_REMOVE);
6307 }
6308
6309 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6310                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6311                                      struct hclge_desc *desc,
6312                                      bool is_mc)
6313 {
6314         struct hclge_dev *hdev = vport->back;
6315         u8 resp_code;
6316         u16 retval;
6317         int ret;
6318
6319         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6320         if (is_mc) {
6321                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6322                 memcpy(desc[0].data,
6323                        req,
6324                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6325                 hclge_cmd_setup_basic_desc(&desc[1],
6326                                            HCLGE_OPC_MAC_VLAN_ADD,
6327                                            true);
6328                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6329                 hclge_cmd_setup_basic_desc(&desc[2],
6330                                            HCLGE_OPC_MAC_VLAN_ADD,
6331                                            true);
6332                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6333         } else {
6334                 memcpy(desc[0].data,
6335                        req,
6336                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6337                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6338         }
6339         if (ret) {
6340                 dev_err(&hdev->pdev->dev,
6341                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6342                         ret);
6343                 return ret;
6344         }
6345         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6346         retval = le16_to_cpu(desc[0].retval);
6347
6348         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6349                                              HCLGE_MAC_VLAN_LKUP);
6350 }
6351
6352 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6353                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6354                                   struct hclge_desc *mc_desc)
6355 {
6356         struct hclge_dev *hdev = vport->back;
6357         int cfg_status;
6358         u8 resp_code;
6359         u16 retval;
6360         int ret;
6361
6362         if (!mc_desc) {
6363                 struct hclge_desc desc;
6364
6365                 hclge_cmd_setup_basic_desc(&desc,
6366                                            HCLGE_OPC_MAC_VLAN_ADD,
6367                                            false);
6368                 memcpy(desc.data, req,
6369                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6370                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6371                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6372                 retval = le16_to_cpu(desc.retval);
6373
6374                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6375                                                            resp_code,
6376                                                            HCLGE_MAC_VLAN_ADD);
6377         } else {
6378                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6379                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6380                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6381                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6382                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6383                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6384                 memcpy(mc_desc[0].data, req,
6385                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6386                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6387                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6388                 retval = le16_to_cpu(mc_desc[0].retval);
6389
6390                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6391                                                            resp_code,
6392                                                            HCLGE_MAC_VLAN_ADD);
6393         }
6394
6395         if (ret) {
6396                 dev_err(&hdev->pdev->dev,
6397                         "add mac addr failed for cmd_send, ret =%d.\n",
6398                         ret);
6399                 return ret;
6400         }
6401
6402         return cfg_status;
6403 }
6404
6405 static int hclge_init_umv_space(struct hclge_dev *hdev)
6406 {
6407         u16 allocated_size = 0;
6408         int ret;
6409
6410         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6411                                   true);
6412         if (ret)
6413                 return ret;
6414
6415         if (allocated_size < hdev->wanted_umv_size)
6416                 dev_warn(&hdev->pdev->dev,
6417                          "Alloc umv space failed, want %d, get %d\n",
6418                          hdev->wanted_umv_size, allocated_size);
6419
6420         mutex_init(&hdev->umv_mutex);
6421         hdev->max_umv_size = allocated_size;
6422         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6423         hdev->share_umv_size = hdev->priv_umv_size +
6424                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6425
6426         return 0;
6427 }
6428
6429 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6430 {
6431         int ret;
6432
6433         if (hdev->max_umv_size > 0) {
6434                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6435                                           false);
6436                 if (ret)
6437                         return ret;
6438                 hdev->max_umv_size = 0;
6439         }
6440         mutex_destroy(&hdev->umv_mutex);
6441
6442         return 0;
6443 }
6444
6445 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6446                                u16 *allocated_size, bool is_alloc)
6447 {
6448         struct hclge_umv_spc_alc_cmd *req;
6449         struct hclge_desc desc;
6450         int ret;
6451
6452         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6453         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6454         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6455         req->space_size = cpu_to_le32(space_size);
6456
6457         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6458         if (ret) {
6459                 dev_err(&hdev->pdev->dev,
6460                         "%s umv space failed for cmd_send, ret =%d\n",
6461                         is_alloc ? "allocate" : "free", ret);
6462                 return ret;
6463         }
6464
6465         if (is_alloc && allocated_size)
6466                 *allocated_size = le32_to_cpu(desc.data[1]);
6467
6468         return 0;
6469 }
6470
6471 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6472 {
6473         struct hclge_vport *vport;
6474         int i;
6475
6476         for (i = 0; i < hdev->num_alloc_vport; i++) {
6477                 vport = &hdev->vport[i];
6478                 vport->used_umv_num = 0;
6479         }
6480
6481         mutex_lock(&hdev->umv_mutex);
6482         hdev->share_umv_size = hdev->priv_umv_size +
6483                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6484         mutex_unlock(&hdev->umv_mutex);
6485 }
6486
6487 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6488 {
6489         struct hclge_dev *hdev = vport->back;
6490         bool is_full;
6491
6492         mutex_lock(&hdev->umv_mutex);
6493         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6494                    hdev->share_umv_size == 0);
6495         mutex_unlock(&hdev->umv_mutex);
6496
6497         return is_full;
6498 }
6499
6500 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6501 {
6502         struct hclge_dev *hdev = vport->back;
6503
6504         mutex_lock(&hdev->umv_mutex);
6505         if (is_free) {
6506                 if (vport->used_umv_num > hdev->priv_umv_size)
6507                         hdev->share_umv_size++;
6508
6509                 if (vport->used_umv_num > 0)
6510                         vport->used_umv_num--;
6511         } else {
6512                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6513                     hdev->share_umv_size > 0)
6514                         hdev->share_umv_size--;
6515                 vport->used_umv_num++;
6516         }
6517         mutex_unlock(&hdev->umv_mutex);
6518 }
6519
6520 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6521                              const unsigned char *addr)
6522 {
6523         struct hclge_vport *vport = hclge_get_vport(handle);
6524
6525         return hclge_add_uc_addr_common(vport, addr);
6526 }
6527
6528 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6529                              const unsigned char *addr)
6530 {
6531         struct hclge_dev *hdev = vport->back;
6532         struct hclge_mac_vlan_tbl_entry_cmd req;
6533         struct hclge_desc desc;
6534         u16 egress_port = 0;
6535         int ret;
6536
6537         /* mac addr check */
6538         if (is_zero_ether_addr(addr) ||
6539             is_broadcast_ether_addr(addr) ||
6540             is_multicast_ether_addr(addr)) {
6541                 dev_err(&hdev->pdev->dev,
6542                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6543                          addr,
6544                          is_zero_ether_addr(addr),
6545                          is_broadcast_ether_addr(addr),
6546                          is_multicast_ether_addr(addr));
6547                 return -EINVAL;
6548         }
6549
6550         memset(&req, 0, sizeof(req));
6551
6552         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6553                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6554
6555         req.egress_port = cpu_to_le16(egress_port);
6556
6557         hclge_prepare_mac_addr(&req, addr, false);
6558
6559         /* Lookup the mac address in the mac_vlan table, and add
6560          * it if the entry is inexistent. Repeated unicast entry
6561          * is not allowed in the mac vlan table.
6562          */
6563         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6564         if (ret == -ENOENT) {
6565                 if (!hclge_is_umv_space_full(vport)) {
6566                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6567                         if (!ret)
6568                                 hclge_update_umv_space(vport, false);
6569                         return ret;
6570                 }
6571
6572                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6573                         hdev->priv_umv_size);
6574
6575                 return -ENOSPC;
6576         }
6577
6578         /* check if we just hit the duplicate */
6579         if (!ret) {
6580                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6581                          vport->vport_id, addr);
6582                 return 0;
6583         }
6584
6585         dev_err(&hdev->pdev->dev,
6586                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6587                 addr);
6588
6589         return ret;
6590 }
6591
6592 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6593                             const unsigned char *addr)
6594 {
6595         struct hclge_vport *vport = hclge_get_vport(handle);
6596
6597         return hclge_rm_uc_addr_common(vport, addr);
6598 }
6599
6600 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6601                             const unsigned char *addr)
6602 {
6603         struct hclge_dev *hdev = vport->back;
6604         struct hclge_mac_vlan_tbl_entry_cmd req;
6605         int ret;
6606
6607         /* mac addr check */
6608         if (is_zero_ether_addr(addr) ||
6609             is_broadcast_ether_addr(addr) ||
6610             is_multicast_ether_addr(addr)) {
6611                 dev_dbg(&hdev->pdev->dev,
6612                         "Remove mac err! invalid mac:%pM.\n",
6613                          addr);
6614                 return -EINVAL;
6615         }
6616
6617         memset(&req, 0, sizeof(req));
6618         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6619         hclge_prepare_mac_addr(&req, addr, false);
6620         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6621         if (!ret)
6622                 hclge_update_umv_space(vport, true);
6623
6624         return ret;
6625 }
6626
6627 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6628                              const unsigned char *addr)
6629 {
6630         struct hclge_vport *vport = hclge_get_vport(handle);
6631
6632         return hclge_add_mc_addr_common(vport, addr);
6633 }
6634
6635 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6636                              const unsigned char *addr)
6637 {
6638         struct hclge_dev *hdev = vport->back;
6639         struct hclge_mac_vlan_tbl_entry_cmd req;
6640         struct hclge_desc desc[3];
6641         int status;
6642
6643         /* mac addr check */
6644         if (!is_multicast_ether_addr(addr)) {
6645                 dev_err(&hdev->pdev->dev,
6646                         "Add mc mac err! invalid mac:%pM.\n",
6647                          addr);
6648                 return -EINVAL;
6649         }
6650         memset(&req, 0, sizeof(req));
6651         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6652         hclge_prepare_mac_addr(&req, addr, true);
6653         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6654         if (!status) {
6655                 /* This mac addr exist, update VFID for it */
6656                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6657                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6658         } else {
6659                 /* This mac addr do not exist, add new entry for it */
6660                 memset(desc[0].data, 0, sizeof(desc[0].data));
6661                 memset(desc[1].data, 0, sizeof(desc[0].data));
6662                 memset(desc[2].data, 0, sizeof(desc[0].data));
6663                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6664                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6665         }
6666
6667         if (status == -ENOSPC)
6668                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6669
6670         return status;
6671 }
6672
6673 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6674                             const unsigned char *addr)
6675 {
6676         struct hclge_vport *vport = hclge_get_vport(handle);
6677
6678         return hclge_rm_mc_addr_common(vport, addr);
6679 }
6680
6681 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6682                             const unsigned char *addr)
6683 {
6684         struct hclge_dev *hdev = vport->back;
6685         struct hclge_mac_vlan_tbl_entry_cmd req;
6686         enum hclge_cmd_status status;
6687         struct hclge_desc desc[3];
6688
6689         /* mac addr check */
6690         if (!is_multicast_ether_addr(addr)) {
6691                 dev_dbg(&hdev->pdev->dev,
6692                         "Remove mc mac err! invalid mac:%pM.\n",
6693                          addr);
6694                 return -EINVAL;
6695         }
6696
6697         memset(&req, 0, sizeof(req));
6698         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6699         hclge_prepare_mac_addr(&req, addr, true);
6700         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6701         if (!status) {
6702                 /* This mac addr exist, remove this handle's VFID for it */
6703                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6704
6705                 if (hclge_is_all_function_id_zero(desc))
6706                         /* All the vfid is zero, so need to delete this entry */
6707                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6708                 else
6709                         /* Not all the vfid is zero, update the vfid */
6710                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6711
6712         } else {
6713                 /* Maybe this mac address is in mta table, but it cannot be
6714                  * deleted here because an entry of mta represents an address
6715                  * range rather than a specific address. the delete action to
6716                  * all entries will take effect in update_mta_status called by
6717                  * hns3_nic_set_rx_mode.
6718                  */
6719                 status = 0;
6720         }
6721
6722         return status;
6723 }
6724
6725 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6726                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6727 {
6728         struct hclge_vport_mac_addr_cfg *mac_cfg;
6729         struct list_head *list;
6730
6731         if (!vport->vport_id)
6732                 return;
6733
6734         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6735         if (!mac_cfg)
6736                 return;
6737
6738         mac_cfg->hd_tbl_status = true;
6739         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6740
6741         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6742                &vport->uc_mac_list : &vport->mc_mac_list;
6743
6744         list_add_tail(&mac_cfg->node, list);
6745 }
6746
6747 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6748                               bool is_write_tbl,
6749                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6750 {
6751         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6752         struct list_head *list;
6753         bool uc_flag, mc_flag;
6754
6755         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6756                &vport->uc_mac_list : &vport->mc_mac_list;
6757
6758         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6759         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6760
6761         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6762                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6763                         if (uc_flag && mac_cfg->hd_tbl_status)
6764                                 hclge_rm_uc_addr_common(vport, mac_addr);
6765
6766                         if (mc_flag && mac_cfg->hd_tbl_status)
6767                                 hclge_rm_mc_addr_common(vport, mac_addr);
6768
6769                         list_del(&mac_cfg->node);
6770                         kfree(mac_cfg);
6771                         break;
6772                 }
6773         }
6774 }
6775
6776 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6777                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6778 {
6779         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6780         struct list_head *list;
6781
6782         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6783                &vport->uc_mac_list : &vport->mc_mac_list;
6784
6785         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6786                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6787                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6788
6789                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6790                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6791
6792                 mac_cfg->hd_tbl_status = false;
6793                 if (is_del_list) {
6794                         list_del(&mac_cfg->node);
6795                         kfree(mac_cfg);
6796                 }
6797         }
6798 }
6799
6800 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6801 {
6802         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6803         struct hclge_vport *vport;
6804         int i;
6805
6806         mutex_lock(&hdev->vport_cfg_mutex);
6807         for (i = 0; i < hdev->num_alloc_vport; i++) {
6808                 vport = &hdev->vport[i];
6809                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6810                         list_del(&mac->node);
6811                         kfree(mac);
6812                 }
6813
6814                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6815                         list_del(&mac->node);
6816                         kfree(mac);
6817                 }
6818         }
6819         mutex_unlock(&hdev->vport_cfg_mutex);
6820 }
6821
6822 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6823                                               u16 cmdq_resp, u8 resp_code)
6824 {
6825 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6826 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6827 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6828 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6829
6830         int return_status;
6831
6832         if (cmdq_resp) {
6833                 dev_err(&hdev->pdev->dev,
6834                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6835                         cmdq_resp);
6836                 return -EIO;
6837         }
6838
6839         switch (resp_code) {
6840         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6841         case HCLGE_ETHERTYPE_ALREADY_ADD:
6842                 return_status = 0;
6843                 break;
6844         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6845                 dev_err(&hdev->pdev->dev,
6846                         "add mac ethertype failed for manager table overflow.\n");
6847                 return_status = -EIO;
6848                 break;
6849         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6850                 dev_err(&hdev->pdev->dev,
6851                         "add mac ethertype failed for key conflict.\n");
6852                 return_status = -EIO;
6853                 break;
6854         default:
6855                 dev_err(&hdev->pdev->dev,
6856                         "add mac ethertype failed for undefined, code=%d.\n",
6857                         resp_code);
6858                 return_status = -EIO;
6859         }
6860
6861         return return_status;
6862 }
6863
6864 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6865                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6866 {
6867         struct hclge_desc desc;
6868         u8 resp_code;
6869         u16 retval;
6870         int ret;
6871
6872         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6873         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6874
6875         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6876         if (ret) {
6877                 dev_err(&hdev->pdev->dev,
6878                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6879                         ret);
6880                 return ret;
6881         }
6882
6883         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6884         retval = le16_to_cpu(desc.retval);
6885
6886         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6887 }
6888
6889 static int init_mgr_tbl(struct hclge_dev *hdev)
6890 {
6891         int ret;
6892         int i;
6893
6894         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6895                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6896                 if (ret) {
6897                         dev_err(&hdev->pdev->dev,
6898                                 "add mac ethertype failed, ret =%d.\n",
6899                                 ret);
6900                         return ret;
6901                 }
6902         }
6903
6904         return 0;
6905 }
6906
6907 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6908 {
6909         struct hclge_vport *vport = hclge_get_vport(handle);
6910         struct hclge_dev *hdev = vport->back;
6911
6912         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6913 }
6914
6915 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6916                               bool is_first)
6917 {
6918         const unsigned char *new_addr = (const unsigned char *)p;
6919         struct hclge_vport *vport = hclge_get_vport(handle);
6920         struct hclge_dev *hdev = vport->back;
6921         int ret;
6922
6923         /* mac addr check */
6924         if (is_zero_ether_addr(new_addr) ||
6925             is_broadcast_ether_addr(new_addr) ||
6926             is_multicast_ether_addr(new_addr)) {
6927                 dev_err(&hdev->pdev->dev,
6928                         "Change uc mac err! invalid mac:%p.\n",
6929                          new_addr);
6930                 return -EINVAL;
6931         }
6932
6933         if ((!is_first || is_kdump_kernel()) &&
6934             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6935                 dev_warn(&hdev->pdev->dev,
6936                          "remove old uc mac address fail.\n");
6937
6938         ret = hclge_add_uc_addr(handle, new_addr);
6939         if (ret) {
6940                 dev_err(&hdev->pdev->dev,
6941                         "add uc mac address fail, ret =%d.\n",
6942                         ret);
6943
6944                 if (!is_first &&
6945                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6946                         dev_err(&hdev->pdev->dev,
6947                                 "restore uc mac address fail.\n");
6948
6949                 return -EIO;
6950         }
6951
6952         ret = hclge_pause_addr_cfg(hdev, new_addr);
6953         if (ret) {
6954                 dev_err(&hdev->pdev->dev,
6955                         "configure mac pause address fail, ret =%d.\n",
6956                         ret);
6957                 return -EIO;
6958         }
6959
6960         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6961
6962         return 0;
6963 }
6964
6965 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6966                           int cmd)
6967 {
6968         struct hclge_vport *vport = hclge_get_vport(handle);
6969         struct hclge_dev *hdev = vport->back;
6970
6971         if (!hdev->hw.mac.phydev)
6972                 return -EOPNOTSUPP;
6973
6974         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6975 }
6976
6977 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6978                                       u8 fe_type, bool filter_en, u8 vf_id)
6979 {
6980         struct hclge_vlan_filter_ctrl_cmd *req;
6981         struct hclge_desc desc;
6982         int ret;
6983
6984         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6985
6986         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6987         req->vlan_type = vlan_type;
6988         req->vlan_fe = filter_en ? fe_type : 0;
6989         req->vf_id = vf_id;
6990
6991         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6992         if (ret)
6993                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6994                         ret);
6995
6996         return ret;
6997 }
6998
6999 #define HCLGE_FILTER_TYPE_VF            0
7000 #define HCLGE_FILTER_TYPE_PORT          1
7001 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7002 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7003 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7004 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7005 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7006 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7007                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7008 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7009                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7010
7011 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7012 {
7013         struct hclge_vport *vport = hclge_get_vport(handle);
7014         struct hclge_dev *hdev = vport->back;
7015
7016         if (hdev->pdev->revision >= 0x21) {
7017                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7018                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7019                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7020                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7021         } else {
7022                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7023                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7024                                            0);
7025         }
7026         if (enable)
7027                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7028         else
7029                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7030 }
7031
7032 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7033                                     bool is_kill, u16 vlan, u8 qos,
7034                                     __be16 proto)
7035 {
7036 #define HCLGE_MAX_VF_BYTES  16
7037         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7038         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7039         struct hclge_desc desc[2];
7040         u8 vf_byte_val;
7041         u8 vf_byte_off;
7042         int ret;
7043
7044         hclge_cmd_setup_basic_desc(&desc[0],
7045                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7046         hclge_cmd_setup_basic_desc(&desc[1],
7047                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7048
7049         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7050
7051         vf_byte_off = vfid / 8;
7052         vf_byte_val = 1 << (vfid % 8);
7053
7054         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7055         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7056
7057         req0->vlan_id  = cpu_to_le16(vlan);
7058         req0->vlan_cfg = is_kill;
7059
7060         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7061                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7062         else
7063                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7064
7065         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7066         if (ret) {
7067                 dev_err(&hdev->pdev->dev,
7068                         "Send vf vlan command fail, ret =%d.\n",
7069                         ret);
7070                 return ret;
7071         }
7072
7073         if (!is_kill) {
7074 #define HCLGE_VF_VLAN_NO_ENTRY  2
7075                 if (!req0->resp_code || req0->resp_code == 1)
7076                         return 0;
7077
7078                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7079                         dev_warn(&hdev->pdev->dev,
7080                                  "vf vlan table is full, vf vlan filter is disabled\n");
7081                         return 0;
7082                 }
7083
7084                 dev_err(&hdev->pdev->dev,
7085                         "Add vf vlan filter fail, ret =%d.\n",
7086                         req0->resp_code);
7087         } else {
7088 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7089                 if (!req0->resp_code)
7090                         return 0;
7091
7092                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7093                         dev_warn(&hdev->pdev->dev,
7094                                  "vlan %d filter is not in vf vlan table\n",
7095                                  vlan);
7096                         return 0;
7097                 }
7098
7099                 dev_err(&hdev->pdev->dev,
7100                         "Kill vf vlan filter fail, ret =%d.\n",
7101                         req0->resp_code);
7102         }
7103
7104         return -EIO;
7105 }
7106
7107 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7108                                       u16 vlan_id, bool is_kill)
7109 {
7110         struct hclge_vlan_filter_pf_cfg_cmd *req;
7111         struct hclge_desc desc;
7112         u8 vlan_offset_byte_val;
7113         u8 vlan_offset_byte;
7114         u8 vlan_offset_160;
7115         int ret;
7116
7117         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7118
7119         vlan_offset_160 = vlan_id / 160;
7120         vlan_offset_byte = (vlan_id % 160) / 8;
7121         vlan_offset_byte_val = 1 << (vlan_id % 8);
7122
7123         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7124         req->vlan_offset = vlan_offset_160;
7125         req->vlan_cfg = is_kill;
7126         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7127
7128         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7129         if (ret)
7130                 dev_err(&hdev->pdev->dev,
7131                         "port vlan command, send fail, ret =%d.\n", ret);
7132         return ret;
7133 }
7134
7135 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7136                                     u16 vport_id, u16 vlan_id, u8 qos,
7137                                     bool is_kill)
7138 {
7139         u16 vport_idx, vport_num = 0;
7140         int ret;
7141
7142         if (is_kill && !vlan_id)
7143                 return 0;
7144
7145         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7146                                        0, proto);
7147         if (ret) {
7148                 dev_err(&hdev->pdev->dev,
7149                         "Set %d vport vlan filter config fail, ret =%d.\n",
7150                         vport_id, ret);
7151                 return ret;
7152         }
7153
7154         /* vlan 0 may be added twice when 8021q module is enabled */
7155         if (!is_kill && !vlan_id &&
7156             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7157                 return 0;
7158
7159         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7160                 dev_err(&hdev->pdev->dev,
7161                         "Add port vlan failed, vport %d is already in vlan %d\n",
7162                         vport_id, vlan_id);
7163                 return -EINVAL;
7164         }
7165
7166         if (is_kill &&
7167             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7168                 dev_err(&hdev->pdev->dev,
7169                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7170                         vport_id, vlan_id);
7171                 return -EINVAL;
7172         }
7173
7174         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7175                 vport_num++;
7176
7177         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7178                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7179                                                  is_kill);
7180
7181         return ret;
7182 }
7183
7184 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7185 {
7186         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7187         struct hclge_vport_vtag_tx_cfg_cmd *req;
7188         struct hclge_dev *hdev = vport->back;
7189         struct hclge_desc desc;
7190         int status;
7191
7192         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7193
7194         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7195         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7196         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7197         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7198                       vcfg->accept_tag1 ? 1 : 0);
7199         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7200                       vcfg->accept_untag1 ? 1 : 0);
7201         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7202                       vcfg->accept_tag2 ? 1 : 0);
7203         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7204                       vcfg->accept_untag2 ? 1 : 0);
7205         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7206                       vcfg->insert_tag1_en ? 1 : 0);
7207         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7208                       vcfg->insert_tag2_en ? 1 : 0);
7209         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7210
7211         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7212         req->vf_bitmap[req->vf_offset] =
7213                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7214
7215         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7216         if (status)
7217                 dev_err(&hdev->pdev->dev,
7218                         "Send port txvlan cfg command fail, ret =%d\n",
7219                         status);
7220
7221         return status;
7222 }
7223
7224 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7225 {
7226         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7227         struct hclge_vport_vtag_rx_cfg_cmd *req;
7228         struct hclge_dev *hdev = vport->back;
7229         struct hclge_desc desc;
7230         int status;
7231
7232         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7233
7234         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7235         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7236                       vcfg->strip_tag1_en ? 1 : 0);
7237         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7238                       vcfg->strip_tag2_en ? 1 : 0);
7239         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7240                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7241         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7242                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7243
7244         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7245         req->vf_bitmap[req->vf_offset] =
7246                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7247
7248         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7249         if (status)
7250                 dev_err(&hdev->pdev->dev,
7251                         "Send port rxvlan cfg command fail, ret =%d\n",
7252                         status);
7253
7254         return status;
7255 }
7256
7257 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7258                                   u16 port_base_vlan_state,
7259                                   u16 vlan_tag)
7260 {
7261         int ret;
7262
7263         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7264                 vport->txvlan_cfg.accept_tag1 = true;
7265                 vport->txvlan_cfg.insert_tag1_en = false;
7266                 vport->txvlan_cfg.default_tag1 = 0;
7267         } else {
7268                 vport->txvlan_cfg.accept_tag1 = false;
7269                 vport->txvlan_cfg.insert_tag1_en = true;
7270                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7271         }
7272
7273         vport->txvlan_cfg.accept_untag1 = true;
7274
7275         /* accept_tag2 and accept_untag2 are not supported on
7276          * pdev revision(0x20), new revision support them,
7277          * this two fields can not be configured by user.
7278          */
7279         vport->txvlan_cfg.accept_tag2 = true;
7280         vport->txvlan_cfg.accept_untag2 = true;
7281         vport->txvlan_cfg.insert_tag2_en = false;
7282         vport->txvlan_cfg.default_tag2 = 0;
7283
7284         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7285                 vport->rxvlan_cfg.strip_tag1_en = false;
7286                 vport->rxvlan_cfg.strip_tag2_en =
7287                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7288         } else {
7289                 vport->rxvlan_cfg.strip_tag1_en =
7290                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7291                 vport->rxvlan_cfg.strip_tag2_en = true;
7292         }
7293         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7294         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7295
7296         ret = hclge_set_vlan_tx_offload_cfg(vport);
7297         if (ret)
7298                 return ret;
7299
7300         return hclge_set_vlan_rx_offload_cfg(vport);
7301 }
7302
7303 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7304 {
7305         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7306         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7307         struct hclge_desc desc;
7308         int status;
7309
7310         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7311         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7312         rx_req->ot_fst_vlan_type =
7313                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7314         rx_req->ot_sec_vlan_type =
7315                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7316         rx_req->in_fst_vlan_type =
7317                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7318         rx_req->in_sec_vlan_type =
7319                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7320
7321         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7322         if (status) {
7323                 dev_err(&hdev->pdev->dev,
7324                         "Send rxvlan protocol type command fail, ret =%d\n",
7325                         status);
7326                 return status;
7327         }
7328
7329         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7330
7331         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7332         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7333         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7334
7335         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7336         if (status)
7337                 dev_err(&hdev->pdev->dev,
7338                         "Send txvlan protocol type command fail, ret =%d\n",
7339                         status);
7340
7341         return status;
7342 }
7343
7344 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7345 {
7346 #define HCLGE_DEF_VLAN_TYPE             0x8100
7347
7348         struct hnae3_handle *handle = &hdev->vport[0].nic;
7349         struct hclge_vport *vport;
7350         int ret;
7351         int i;
7352
7353         if (hdev->pdev->revision >= 0x21) {
7354                 /* for revision 0x21, vf vlan filter is per function */
7355                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7356                         vport = &hdev->vport[i];
7357                         ret = hclge_set_vlan_filter_ctrl(hdev,
7358                                                          HCLGE_FILTER_TYPE_VF,
7359                                                          HCLGE_FILTER_FE_EGRESS,
7360                                                          true,
7361                                                          vport->vport_id);
7362                         if (ret)
7363                                 return ret;
7364                 }
7365
7366                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7367                                                  HCLGE_FILTER_FE_INGRESS, true,
7368                                                  0);
7369                 if (ret)
7370                         return ret;
7371         } else {
7372                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7373                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7374                                                  true, 0);
7375                 if (ret)
7376                         return ret;
7377         }
7378
7379         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7380
7381         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7382         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7383         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7384         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7385         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7386         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7387
7388         ret = hclge_set_vlan_protocol_type(hdev);
7389         if (ret)
7390                 return ret;
7391
7392         for (i = 0; i < hdev->num_alloc_vport; i++) {
7393                 u16 vlan_tag;
7394
7395                 vport = &hdev->vport[i];
7396                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7397
7398                 ret = hclge_vlan_offload_cfg(vport,
7399                                              vport->port_base_vlan_cfg.state,
7400                                              vlan_tag);
7401                 if (ret)
7402                         return ret;
7403         }
7404
7405         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7406 }
7407
7408 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7409                                        bool writen_to_tbl)
7410 {
7411         struct hclge_vport_vlan_cfg *vlan;
7412
7413         /* vlan 0 is reserved */
7414         if (!vlan_id)
7415                 return;
7416
7417         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7418         if (!vlan)
7419                 return;
7420
7421         vlan->hd_tbl_status = writen_to_tbl;
7422         vlan->vlan_id = vlan_id;
7423
7424         list_add_tail(&vlan->node, &vport->vlan_list);
7425 }
7426
7427 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7428 {
7429         struct hclge_vport_vlan_cfg *vlan, *tmp;
7430         struct hclge_dev *hdev = vport->back;
7431         int ret;
7432
7433         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7434                 if (!vlan->hd_tbl_status) {
7435                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7436                                                        vport->vport_id,
7437                                                        vlan->vlan_id, 0, false);
7438                         if (ret) {
7439                                 dev_err(&hdev->pdev->dev,
7440                                         "restore vport vlan list failed, ret=%d\n",
7441                                         ret);
7442                                 return ret;
7443                         }
7444                 }
7445                 vlan->hd_tbl_status = true;
7446         }
7447
7448         return 0;
7449 }
7450
7451 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7452                                       bool is_write_tbl)
7453 {
7454         struct hclge_vport_vlan_cfg *vlan, *tmp;
7455         struct hclge_dev *hdev = vport->back;
7456
7457         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7458                 if (vlan->vlan_id == vlan_id) {
7459                         if (is_write_tbl && vlan->hd_tbl_status)
7460                                 hclge_set_vlan_filter_hw(hdev,
7461                                                          htons(ETH_P_8021Q),
7462                                                          vport->vport_id,
7463                                                          vlan_id, 0,
7464                                                          true);
7465
7466                         list_del(&vlan->node);
7467                         kfree(vlan);
7468                         break;
7469                 }
7470         }
7471 }
7472
7473 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7474 {
7475         struct hclge_vport_vlan_cfg *vlan, *tmp;
7476         struct hclge_dev *hdev = vport->back;
7477
7478         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7479                 if (vlan->hd_tbl_status)
7480                         hclge_set_vlan_filter_hw(hdev,
7481                                                  htons(ETH_P_8021Q),
7482                                                  vport->vport_id,
7483                                                  vlan->vlan_id, 0,
7484                                                  true);
7485
7486                 vlan->hd_tbl_status = false;
7487                 if (is_del_list) {
7488                         list_del(&vlan->node);
7489                         kfree(vlan);
7490                 }
7491         }
7492 }
7493
7494 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7495 {
7496         struct hclge_vport_vlan_cfg *vlan, *tmp;
7497         struct hclge_vport *vport;
7498         int i;
7499
7500         mutex_lock(&hdev->vport_cfg_mutex);
7501         for (i = 0; i < hdev->num_alloc_vport; i++) {
7502                 vport = &hdev->vport[i];
7503                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7504                         list_del(&vlan->node);
7505                         kfree(vlan);
7506                 }
7507         }
7508         mutex_unlock(&hdev->vport_cfg_mutex);
7509 }
7510
7511 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7512 {
7513         struct hclge_vport *vport = hclge_get_vport(handle);
7514
7515         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7516                 vport->rxvlan_cfg.strip_tag1_en = false;
7517                 vport->rxvlan_cfg.strip_tag2_en = enable;
7518         } else {
7519                 vport->rxvlan_cfg.strip_tag1_en = enable;
7520                 vport->rxvlan_cfg.strip_tag2_en = true;
7521         }
7522         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7523         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7524         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7525
7526         return hclge_set_vlan_rx_offload_cfg(vport);
7527 }
7528
7529 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7530                                             u16 port_base_vlan_state,
7531                                             struct hclge_vlan_info *new_info,
7532                                             struct hclge_vlan_info *old_info)
7533 {
7534         struct hclge_dev *hdev = vport->back;
7535         int ret;
7536
7537         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7538                 hclge_rm_vport_all_vlan_table(vport, false);
7539                 return hclge_set_vlan_filter_hw(hdev,
7540                                                  htons(new_info->vlan_proto),
7541                                                  vport->vport_id,
7542                                                  new_info->vlan_tag,
7543                                                  new_info->qos, false);
7544         }
7545
7546         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7547                                        vport->vport_id, old_info->vlan_tag,
7548                                        old_info->qos, true);
7549         if (ret)
7550                 return ret;
7551
7552         return hclge_add_vport_all_vlan_table(vport);
7553 }
7554
7555 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7556                                     struct hclge_vlan_info *vlan_info)
7557 {
7558         struct hnae3_handle *nic = &vport->nic;
7559         struct hclge_vlan_info *old_vlan_info;
7560         struct hclge_dev *hdev = vport->back;
7561         int ret;
7562
7563         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7564
7565         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7566         if (ret)
7567                 return ret;
7568
7569         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7570                 /* add new VLAN tag */
7571                 ret = hclge_set_vlan_filter_hw(hdev,
7572                                                htons(vlan_info->vlan_proto),
7573                                                vport->vport_id,
7574                                                vlan_info->vlan_tag,
7575                                                vlan_info->qos, false);
7576                 if (ret)
7577                         return ret;
7578
7579                 /* remove old VLAN tag */
7580                 ret = hclge_set_vlan_filter_hw(hdev,
7581                                                htons(old_vlan_info->vlan_proto),
7582                                                vport->vport_id,
7583                                                old_vlan_info->vlan_tag,
7584                                                old_vlan_info->qos, true);
7585                 if (ret)
7586                         return ret;
7587
7588                 goto update;
7589         }
7590
7591         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7592                                                old_vlan_info);
7593         if (ret)
7594                 return ret;
7595
7596         /* update state only when disable/enable port based VLAN */
7597         vport->port_base_vlan_cfg.state = state;
7598         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7599                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7600         else
7601                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7602
7603 update:
7604         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7605         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7606         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7607
7608         return 0;
7609 }
7610
7611 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7612                                           enum hnae3_port_base_vlan_state state,
7613                                           u16 vlan)
7614 {
7615         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7616                 if (!vlan)
7617                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7618                 else
7619                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7620         } else {
7621                 if (!vlan)
7622                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7623                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7624                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7625                 else
7626                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7627         }
7628 }
7629
7630 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7631                                     u16 vlan, u8 qos, __be16 proto)
7632 {
7633         struct hclge_vport *vport = hclge_get_vport(handle);
7634         struct hclge_dev *hdev = vport->back;
7635         struct hclge_vlan_info vlan_info;
7636         u16 state;
7637         int ret;
7638
7639         if (hdev->pdev->revision == 0x20)
7640                 return -EOPNOTSUPP;
7641
7642         /* qos is a 3 bits value, so can not be bigger than 7 */
7643         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7644                 return -EINVAL;
7645         if (proto != htons(ETH_P_8021Q))
7646                 return -EPROTONOSUPPORT;
7647
7648         vport = &hdev->vport[vfid];
7649         state = hclge_get_port_base_vlan_state(vport,
7650                                                vport->port_base_vlan_cfg.state,
7651                                                vlan);
7652         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7653                 return 0;
7654
7655         vlan_info.vlan_tag = vlan;
7656         vlan_info.qos = qos;
7657         vlan_info.vlan_proto = ntohs(proto);
7658
7659         /* update port based VLAN for PF */
7660         if (!vfid) {
7661                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7662                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7663                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7664
7665                 return ret;
7666         }
7667
7668         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7669                 return hclge_update_port_base_vlan_cfg(vport, state,
7670                                                        &vlan_info);
7671         } else {
7672                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7673                                                         (u8)vfid, state,
7674                                                         vlan, qos,
7675                                                         ntohs(proto));
7676                 return ret;
7677         }
7678 }
7679
7680 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7681                           u16 vlan_id, bool is_kill)
7682 {
7683         struct hclge_vport *vport = hclge_get_vport(handle);
7684         struct hclge_dev *hdev = vport->back;
7685         bool writen_to_tbl = false;
7686         int ret = 0;
7687
7688         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7689          * filter entry. In this case, we don't update VLAN filter table
7690          * when user add new VLAN or remove exist VLAN, just update the vport
7691          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7692          * table until port based VLAN disabled
7693          */
7694         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7695                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7696                                                vlan_id, 0, is_kill);
7697                 writen_to_tbl = true;
7698         }
7699
7700         if (ret)
7701                 return ret;
7702
7703         if (is_kill)
7704                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7705         else
7706                 hclge_add_vport_vlan_table(vport, vlan_id,
7707                                            writen_to_tbl);
7708
7709         return 0;
7710 }
7711
7712 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7713 {
7714         struct hclge_config_max_frm_size_cmd *req;
7715         struct hclge_desc desc;
7716
7717         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7718
7719         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7720         req->max_frm_size = cpu_to_le16(new_mps);
7721         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7722
7723         return hclge_cmd_send(&hdev->hw, &desc, 1);
7724 }
7725
7726 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7727 {
7728         struct hclge_vport *vport = hclge_get_vport(handle);
7729
7730         return hclge_set_vport_mtu(vport, new_mtu);
7731 }
7732
7733 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7734 {
7735         struct hclge_dev *hdev = vport->back;
7736         int i, max_frm_size, ret = 0;
7737
7738         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7739         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7740             max_frm_size > HCLGE_MAC_MAX_FRAME)
7741                 return -EINVAL;
7742
7743         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7744         mutex_lock(&hdev->vport_lock);
7745         /* VF's mps must fit within hdev->mps */
7746         if (vport->vport_id && max_frm_size > hdev->mps) {
7747                 mutex_unlock(&hdev->vport_lock);
7748                 return -EINVAL;
7749         } else if (vport->vport_id) {
7750                 vport->mps = max_frm_size;
7751                 mutex_unlock(&hdev->vport_lock);
7752                 return 0;
7753         }
7754
7755         /* PF's mps must be greater then VF's mps */
7756         for (i = 1; i < hdev->num_alloc_vport; i++)
7757                 if (max_frm_size < hdev->vport[i].mps) {
7758                         mutex_unlock(&hdev->vport_lock);
7759                         return -EINVAL;
7760                 }
7761
7762         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7763
7764         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7765         if (ret) {
7766                 dev_err(&hdev->pdev->dev,
7767                         "Change mtu fail, ret =%d\n", ret);
7768                 goto out;
7769         }
7770
7771         hdev->mps = max_frm_size;
7772         vport->mps = max_frm_size;
7773
7774         ret = hclge_buffer_alloc(hdev);
7775         if (ret)
7776                 dev_err(&hdev->pdev->dev,
7777                         "Allocate buffer fail, ret =%d\n", ret);
7778
7779 out:
7780         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7781         mutex_unlock(&hdev->vport_lock);
7782         return ret;
7783 }
7784
7785 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7786                                     bool enable)
7787 {
7788         struct hclge_reset_tqp_queue_cmd *req;
7789         struct hclge_desc desc;
7790         int ret;
7791
7792         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7793
7794         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7795         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7796         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7797
7798         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7799         if (ret) {
7800                 dev_err(&hdev->pdev->dev,
7801                         "Send tqp reset cmd error, status =%d\n", ret);
7802                 return ret;
7803         }
7804
7805         return 0;
7806 }
7807
7808 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7809 {
7810         struct hclge_reset_tqp_queue_cmd *req;
7811         struct hclge_desc desc;
7812         int ret;
7813
7814         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7815
7816         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7817         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7818
7819         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7820         if (ret) {
7821                 dev_err(&hdev->pdev->dev,
7822                         "Get reset status error, status =%d\n", ret);
7823                 return ret;
7824         }
7825
7826         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7827 }
7828
7829 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7830 {
7831         struct hnae3_queue *queue;
7832         struct hclge_tqp *tqp;
7833
7834         queue = handle->kinfo.tqp[queue_id];
7835         tqp = container_of(queue, struct hclge_tqp, q);
7836
7837         return tqp->index;
7838 }
7839
7840 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7841 {
7842         struct hclge_vport *vport = hclge_get_vport(handle);
7843         struct hclge_dev *hdev = vport->back;
7844         int reset_try_times = 0;
7845         int reset_status;
7846         u16 queue_gid;
7847         int ret = 0;
7848
7849         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7850
7851         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7852         if (ret) {
7853                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7854                 return ret;
7855         }
7856
7857         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7858         if (ret) {
7859                 dev_err(&hdev->pdev->dev,
7860                         "Send reset tqp cmd fail, ret = %d\n", ret);
7861                 return ret;
7862         }
7863
7864         reset_try_times = 0;
7865         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7866                 /* Wait for tqp hw reset */
7867                 msleep(20);
7868                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7869                 if (reset_status)
7870                         break;
7871         }
7872
7873         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7874                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7875                 return ret;
7876         }
7877
7878         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7879         if (ret)
7880                 dev_err(&hdev->pdev->dev,
7881                         "Deassert the soft reset fail, ret = %d\n", ret);
7882
7883         return ret;
7884 }
7885
7886 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7887 {
7888         struct hclge_dev *hdev = vport->back;
7889         int reset_try_times = 0;
7890         int reset_status;
7891         u16 queue_gid;
7892         int ret;
7893
7894         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7895
7896         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7897         if (ret) {
7898                 dev_warn(&hdev->pdev->dev,
7899                          "Send reset tqp cmd fail, ret = %d\n", ret);
7900                 return;
7901         }
7902
7903         reset_try_times = 0;
7904         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7905                 /* Wait for tqp hw reset */
7906                 msleep(20);
7907                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7908                 if (reset_status)
7909                         break;
7910         }
7911
7912         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7913                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7914                 return;
7915         }
7916
7917         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7918         if (ret)
7919                 dev_warn(&hdev->pdev->dev,
7920                          "Deassert the soft reset fail, ret = %d\n", ret);
7921 }
7922
7923 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7924 {
7925         struct hclge_vport *vport = hclge_get_vport(handle);
7926         struct hclge_dev *hdev = vport->back;
7927
7928         return hdev->fw_version;
7929 }
7930
7931 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7932 {
7933         struct phy_device *phydev = hdev->hw.mac.phydev;
7934
7935         if (!phydev)
7936                 return;
7937
7938         phy_set_asym_pause(phydev, rx_en, tx_en);
7939 }
7940
7941 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7942 {
7943         int ret;
7944
7945         if (rx_en && tx_en)
7946                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7947         else if (rx_en && !tx_en)
7948                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7949         else if (!rx_en && tx_en)
7950                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7951         else
7952                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7953
7954         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7955                 return 0;
7956
7957         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7958         if (ret) {
7959                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7960                         ret);
7961                 return ret;
7962         }
7963
7964         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7965
7966         return 0;
7967 }
7968
7969 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7970 {
7971         struct phy_device *phydev = hdev->hw.mac.phydev;
7972         u16 remote_advertising = 0;
7973         u16 local_advertising = 0;
7974         u32 rx_pause, tx_pause;
7975         u8 flowctl;
7976
7977         if (!phydev->link || !phydev->autoneg)
7978                 return 0;
7979
7980         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7981
7982         if (phydev->pause)
7983                 remote_advertising = LPA_PAUSE_CAP;
7984
7985         if (phydev->asym_pause)
7986                 remote_advertising |= LPA_PAUSE_ASYM;
7987
7988         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7989                                            remote_advertising);
7990         tx_pause = flowctl & FLOW_CTRL_TX;
7991         rx_pause = flowctl & FLOW_CTRL_RX;
7992
7993         if (phydev->duplex == HCLGE_MAC_HALF) {
7994                 tx_pause = 0;
7995                 rx_pause = 0;
7996         }
7997
7998         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7999 }
8000
8001 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8002                                  u32 *rx_en, u32 *tx_en)
8003 {
8004         struct hclge_vport *vport = hclge_get_vport(handle);
8005         struct hclge_dev *hdev = vport->back;
8006
8007         *auto_neg = hclge_get_autoneg(handle);
8008
8009         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8010                 *rx_en = 0;
8011                 *tx_en = 0;
8012                 return;
8013         }
8014
8015         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8016                 *rx_en = 1;
8017                 *tx_en = 0;
8018         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8019                 *tx_en = 1;
8020                 *rx_en = 0;
8021         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8022                 *rx_en = 1;
8023                 *tx_en = 1;
8024         } else {
8025                 *rx_en = 0;
8026                 *tx_en = 0;
8027         }
8028 }
8029
8030 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8031                                 u32 rx_en, u32 tx_en)
8032 {
8033         struct hclge_vport *vport = hclge_get_vport(handle);
8034         struct hclge_dev *hdev = vport->back;
8035         struct phy_device *phydev = hdev->hw.mac.phydev;
8036         u32 fc_autoneg;
8037
8038         fc_autoneg = hclge_get_autoneg(handle);
8039         if (auto_neg != fc_autoneg) {
8040                 dev_info(&hdev->pdev->dev,
8041                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8042                 return -EOPNOTSUPP;
8043         }
8044
8045         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8046                 dev_info(&hdev->pdev->dev,
8047                          "Priority flow control enabled. Cannot set link flow control.\n");
8048                 return -EOPNOTSUPP;
8049         }
8050
8051         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8052
8053         if (!fc_autoneg)
8054                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8055
8056         if (phydev)
8057                 return phy_start_aneg(phydev);
8058
8059         if (hdev->pdev->revision == 0x20)
8060                 return -EOPNOTSUPP;
8061
8062         return hclge_restart_autoneg(handle);
8063 }
8064
8065 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8066                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8067 {
8068         struct hclge_vport *vport = hclge_get_vport(handle);
8069         struct hclge_dev *hdev = vport->back;
8070
8071         if (speed)
8072                 *speed = hdev->hw.mac.speed;
8073         if (duplex)
8074                 *duplex = hdev->hw.mac.duplex;
8075         if (auto_neg)
8076                 *auto_neg = hdev->hw.mac.autoneg;
8077 }
8078
8079 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8080                                  u8 *module_type)
8081 {
8082         struct hclge_vport *vport = hclge_get_vport(handle);
8083         struct hclge_dev *hdev = vport->back;
8084
8085         if (media_type)
8086                 *media_type = hdev->hw.mac.media_type;
8087
8088         if (module_type)
8089                 *module_type = hdev->hw.mac.module_type;
8090 }
8091
8092 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8093                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8094 {
8095         struct hclge_vport *vport = hclge_get_vport(handle);
8096         struct hclge_dev *hdev = vport->back;
8097         struct phy_device *phydev = hdev->hw.mac.phydev;
8098         int mdix_ctrl, mdix, retval, is_resolved;
8099
8100         if (!phydev) {
8101                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8102                 *tp_mdix = ETH_TP_MDI_INVALID;
8103                 return;
8104         }
8105
8106         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8107
8108         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8109         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8110                                     HCLGE_PHY_MDIX_CTRL_S);
8111
8112         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8113         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8114         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8115
8116         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8117
8118         switch (mdix_ctrl) {
8119         case 0x0:
8120                 *tp_mdix_ctrl = ETH_TP_MDI;
8121                 break;
8122         case 0x1:
8123                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8124                 break;
8125         case 0x3:
8126                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8127                 break;
8128         default:
8129                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8130                 break;
8131         }
8132
8133         if (!is_resolved)
8134                 *tp_mdix = ETH_TP_MDI_INVALID;
8135         else if (mdix)
8136                 *tp_mdix = ETH_TP_MDI_X;
8137         else
8138                 *tp_mdix = ETH_TP_MDI;
8139 }
8140
8141 static void hclge_info_show(struct hclge_dev *hdev)
8142 {
8143         struct device *dev = &hdev->pdev->dev;
8144
8145         dev_info(dev, "PF info begin:\n");
8146
8147         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8148         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8149         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8150         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8151         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8152         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8153         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8154         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8155         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8156         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8157         dev_info(dev, "This is %s PF\n",
8158                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8159         dev_info(dev, "DCB %s\n",
8160                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8161         dev_info(dev, "MQPRIO %s\n",
8162                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8163
8164         dev_info(dev, "PF info end.\n");
8165 }
8166
8167 static int hclge_init_client_instance(struct hnae3_client *client,
8168                                       struct hnae3_ae_dev *ae_dev)
8169 {
8170         struct hclge_dev *hdev = ae_dev->priv;
8171         struct hclge_vport *vport;
8172         int i, ret;
8173
8174         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8175                 vport = &hdev->vport[i];
8176
8177                 switch (client->type) {
8178                 case HNAE3_CLIENT_KNIC:
8179
8180                         hdev->nic_client = client;
8181                         vport->nic.client = client;
8182                         ret = client->ops->init_instance(&vport->nic);
8183                         if (ret)
8184                                 goto clear_nic;
8185
8186                         hnae3_set_client_init_flag(client, ae_dev, 1);
8187
8188                         if (netif_msg_drv(&hdev->vport->nic))
8189                                 hclge_info_show(hdev);
8190
8191                         if (hdev->roce_client &&
8192                             hnae3_dev_roce_supported(hdev)) {
8193                                 struct hnae3_client *rc = hdev->roce_client;
8194
8195                                 ret = hclge_init_roce_base_info(vport);
8196                                 if (ret)
8197                                         goto clear_roce;
8198
8199                                 ret = rc->ops->init_instance(&vport->roce);
8200                                 if (ret)
8201                                         goto clear_roce;
8202
8203                                 hnae3_set_client_init_flag(hdev->roce_client,
8204                                                            ae_dev, 1);
8205                         }
8206
8207                         break;
8208                 case HNAE3_CLIENT_UNIC:
8209                         hdev->nic_client = client;
8210                         vport->nic.client = client;
8211
8212                         ret = client->ops->init_instance(&vport->nic);
8213                         if (ret)
8214                                 goto clear_nic;
8215
8216                         hnae3_set_client_init_flag(client, ae_dev, 1);
8217
8218                         break;
8219                 case HNAE3_CLIENT_ROCE:
8220                         if (hnae3_dev_roce_supported(hdev)) {
8221                                 hdev->roce_client = client;
8222                                 vport->roce.client = client;
8223                         }
8224
8225                         if (hdev->roce_client && hdev->nic_client) {
8226                                 ret = hclge_init_roce_base_info(vport);
8227                                 if (ret)
8228                                         goto clear_roce;
8229
8230                                 ret = client->ops->init_instance(&vport->roce);
8231                                 if (ret)
8232                                         goto clear_roce;
8233
8234                                 hnae3_set_client_init_flag(client, ae_dev, 1);
8235                         }
8236
8237                         break;
8238                 default:
8239                         return -EINVAL;
8240                 }
8241         }
8242
8243         return 0;
8244
8245 clear_nic:
8246         hdev->nic_client = NULL;
8247         vport->nic.client = NULL;
8248         return ret;
8249 clear_roce:
8250         hdev->roce_client = NULL;
8251         vport->roce.client = NULL;
8252         return ret;
8253 }
8254
8255 static void hclge_uninit_client_instance(struct hnae3_client *client,
8256                                          struct hnae3_ae_dev *ae_dev)
8257 {
8258         struct hclge_dev *hdev = ae_dev->priv;
8259         struct hclge_vport *vport;
8260         int i;
8261
8262         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8263                 vport = &hdev->vport[i];
8264                 if (hdev->roce_client) {
8265                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8266                                                                 0);
8267                         hdev->roce_client = NULL;
8268                         vport->roce.client = NULL;
8269                 }
8270                 if (client->type == HNAE3_CLIENT_ROCE)
8271                         return;
8272                 if (hdev->nic_client && client->ops->uninit_instance) {
8273                         client->ops->uninit_instance(&vport->nic, 0);
8274                         hdev->nic_client = NULL;
8275                         vport->nic.client = NULL;
8276                 }
8277         }
8278 }
8279
8280 static int hclge_pci_init(struct hclge_dev *hdev)
8281 {
8282         struct pci_dev *pdev = hdev->pdev;
8283         struct hclge_hw *hw;
8284         int ret;
8285
8286         ret = pci_enable_device(pdev);
8287         if (ret) {
8288                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8289                 return ret;
8290         }
8291
8292         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8293         if (ret) {
8294                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8295                 if (ret) {
8296                         dev_err(&pdev->dev,
8297                                 "can't set consistent PCI DMA");
8298                         goto err_disable_device;
8299                 }
8300                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8301         }
8302
8303         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8304         if (ret) {
8305                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8306                 goto err_disable_device;
8307         }
8308
8309         pci_set_master(pdev);
8310         hw = &hdev->hw;
8311         hw->io_base = pcim_iomap(pdev, 2, 0);
8312         if (!hw->io_base) {
8313                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8314                 ret = -ENOMEM;
8315                 goto err_clr_master;
8316         }
8317
8318         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8319
8320         return 0;
8321 err_clr_master:
8322         pci_clear_master(pdev);
8323         pci_release_regions(pdev);
8324 err_disable_device:
8325         pci_disable_device(pdev);
8326
8327         return ret;
8328 }
8329
8330 static void hclge_pci_uninit(struct hclge_dev *hdev)
8331 {
8332         struct pci_dev *pdev = hdev->pdev;
8333
8334         pcim_iounmap(pdev, hdev->hw.io_base);
8335         pci_free_irq_vectors(pdev);
8336         pci_clear_master(pdev);
8337         pci_release_mem_regions(pdev);
8338         pci_disable_device(pdev);
8339 }
8340
8341 static void hclge_state_init(struct hclge_dev *hdev)
8342 {
8343         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8344         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8345         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8346         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8347         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8348         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8349 }
8350
8351 static void hclge_state_uninit(struct hclge_dev *hdev)
8352 {
8353         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8354
8355         if (hdev->service_timer.function)
8356                 del_timer_sync(&hdev->service_timer);
8357         if (hdev->reset_timer.function)
8358                 del_timer_sync(&hdev->reset_timer);
8359         if (hdev->service_task.func)
8360                 cancel_work_sync(&hdev->service_task);
8361         if (hdev->rst_service_task.func)
8362                 cancel_work_sync(&hdev->rst_service_task);
8363         if (hdev->mbx_service_task.func)
8364                 cancel_work_sync(&hdev->mbx_service_task);
8365 }
8366
8367 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8368 {
8369 #define HCLGE_FLR_WAIT_MS       100
8370 #define HCLGE_FLR_WAIT_CNT      50
8371         struct hclge_dev *hdev = ae_dev->priv;
8372         int cnt = 0;
8373
8374         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8375         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8376         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8377         hclge_reset_event(hdev->pdev, NULL);
8378
8379         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8380                cnt++ < HCLGE_FLR_WAIT_CNT)
8381                 msleep(HCLGE_FLR_WAIT_MS);
8382
8383         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8384                 dev_err(&hdev->pdev->dev,
8385                         "flr wait down timeout: %d\n", cnt);
8386 }
8387
8388 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8389 {
8390         struct hclge_dev *hdev = ae_dev->priv;
8391
8392         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8393 }
8394
8395 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8396 {
8397         struct pci_dev *pdev = ae_dev->pdev;
8398         struct hclge_dev *hdev;
8399         int ret;
8400
8401         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8402         if (!hdev) {
8403                 ret = -ENOMEM;
8404                 goto out;
8405         }
8406
8407         hdev->pdev = pdev;
8408         hdev->ae_dev = ae_dev;
8409         hdev->reset_type = HNAE3_NONE_RESET;
8410         hdev->reset_level = HNAE3_FUNC_RESET;
8411         ae_dev->priv = hdev;
8412         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8413
8414         mutex_init(&hdev->vport_lock);
8415         mutex_init(&hdev->vport_cfg_mutex);
8416         spin_lock_init(&hdev->fd_rule_lock);
8417
8418         ret = hclge_pci_init(hdev);
8419         if (ret) {
8420                 dev_err(&pdev->dev, "PCI init failed\n");
8421                 goto out;
8422         }
8423
8424         /* Firmware command queue initialize */
8425         ret = hclge_cmd_queue_init(hdev);
8426         if (ret) {
8427                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8428                 goto err_pci_uninit;
8429         }
8430
8431         /* Firmware command initialize */
8432         ret = hclge_cmd_init(hdev);
8433         if (ret)
8434                 goto err_cmd_uninit;
8435
8436         ret = hclge_get_cap(hdev);
8437         if (ret) {
8438                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8439                         ret);
8440                 goto err_cmd_uninit;
8441         }
8442
8443         ret = hclge_configure(hdev);
8444         if (ret) {
8445                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8446                 goto err_cmd_uninit;
8447         }
8448
8449         ret = hclge_init_msi(hdev);
8450         if (ret) {
8451                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8452                 goto err_cmd_uninit;
8453         }
8454
8455         ret = hclge_misc_irq_init(hdev);
8456         if (ret) {
8457                 dev_err(&pdev->dev,
8458                         "Misc IRQ(vector0) init error, ret = %d.\n",
8459                         ret);
8460                 goto err_msi_uninit;
8461         }
8462
8463         ret = hclge_alloc_tqps(hdev);
8464         if (ret) {
8465                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8466                 goto err_msi_irq_uninit;
8467         }
8468
8469         ret = hclge_alloc_vport(hdev);
8470         if (ret) {
8471                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8472                 goto err_msi_irq_uninit;
8473         }
8474
8475         ret = hclge_map_tqp(hdev);
8476         if (ret) {
8477                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8478                 goto err_msi_irq_uninit;
8479         }
8480
8481         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8482                 ret = hclge_mac_mdio_config(hdev);
8483                 if (ret) {
8484                         dev_err(&hdev->pdev->dev,
8485                                 "mdio config fail ret=%d\n", ret);
8486                         goto err_msi_irq_uninit;
8487                 }
8488         }
8489
8490         ret = hclge_init_umv_space(hdev);
8491         if (ret) {
8492                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8493                 goto err_mdiobus_unreg;
8494         }
8495
8496         ret = hclge_mac_init(hdev);
8497         if (ret) {
8498                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8499                 goto err_mdiobus_unreg;
8500         }
8501
8502         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8503         if (ret) {
8504                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8505                 goto err_mdiobus_unreg;
8506         }
8507
8508         ret = hclge_config_gro(hdev, true);
8509         if (ret)
8510                 goto err_mdiobus_unreg;
8511
8512         ret = hclge_init_vlan_config(hdev);
8513         if (ret) {
8514                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8515                 goto err_mdiobus_unreg;
8516         }
8517
8518         ret = hclge_tm_schd_init(hdev);
8519         if (ret) {
8520                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8521                 goto err_mdiobus_unreg;
8522         }
8523
8524         hclge_rss_init_cfg(hdev);
8525         ret = hclge_rss_init_hw(hdev);
8526         if (ret) {
8527                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8528                 goto err_mdiobus_unreg;
8529         }
8530
8531         ret = init_mgr_tbl(hdev);
8532         if (ret) {
8533                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8534                 goto err_mdiobus_unreg;
8535         }
8536
8537         ret = hclge_init_fd_config(hdev);
8538         if (ret) {
8539                 dev_err(&pdev->dev,
8540                         "fd table init fail, ret=%d\n", ret);
8541                 goto err_mdiobus_unreg;
8542         }
8543
8544         ret = hclge_hw_error_set_state(hdev, true);
8545         if (ret) {
8546                 dev_err(&pdev->dev,
8547                         "fail(%d) to enable hw error interrupts\n", ret);
8548                 goto err_mdiobus_unreg;
8549         }
8550
8551         INIT_KFIFO(hdev->mac_tnl_log);
8552
8553         hclge_dcb_ops_set(hdev);
8554
8555         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8556         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8557         INIT_WORK(&hdev->service_task, hclge_service_task);
8558         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8559         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8560
8561         hclge_clear_all_event_cause(hdev);
8562
8563         /* Enable MISC vector(vector0) */
8564         hclge_enable_vector(&hdev->misc_vector, true);
8565
8566         hclge_state_init(hdev);
8567         hdev->last_reset_time = jiffies;
8568
8569         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8570         return 0;
8571
8572 err_mdiobus_unreg:
8573         if (hdev->hw.mac.phydev)
8574                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8575 err_msi_irq_uninit:
8576         hclge_misc_irq_uninit(hdev);
8577 err_msi_uninit:
8578         pci_free_irq_vectors(pdev);
8579 err_cmd_uninit:
8580         hclge_cmd_uninit(hdev);
8581 err_pci_uninit:
8582         pcim_iounmap(pdev, hdev->hw.io_base);
8583         pci_clear_master(pdev);
8584         pci_release_regions(pdev);
8585         pci_disable_device(pdev);
8586 out:
8587         return ret;
8588 }
8589
8590 static void hclge_stats_clear(struct hclge_dev *hdev)
8591 {
8592         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8593 }
8594
8595 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8596 {
8597         struct hclge_vport *vport = hdev->vport;
8598         int i;
8599
8600         for (i = 0; i < hdev->num_alloc_vport; i++) {
8601                 hclge_vport_stop(vport);
8602                 vport++;
8603         }
8604 }
8605
8606 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8607 {
8608         struct hclge_dev *hdev = ae_dev->priv;
8609         struct pci_dev *pdev = ae_dev->pdev;
8610         int ret;
8611
8612         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8613
8614         hclge_stats_clear(hdev);
8615         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8616
8617         ret = hclge_cmd_init(hdev);
8618         if (ret) {
8619                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8620                 return ret;
8621         }
8622
8623         ret = hclge_map_tqp(hdev);
8624         if (ret) {
8625                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8626                 return ret;
8627         }
8628
8629         hclge_reset_umv_space(hdev);
8630
8631         ret = hclge_mac_init(hdev);
8632         if (ret) {
8633                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8634                 return ret;
8635         }
8636
8637         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8638         if (ret) {
8639                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8640                 return ret;
8641         }
8642
8643         ret = hclge_config_gro(hdev, true);
8644         if (ret)
8645                 return ret;
8646
8647         ret = hclge_init_vlan_config(hdev);
8648         if (ret) {
8649                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8650                 return ret;
8651         }
8652
8653         ret = hclge_tm_init_hw(hdev, true);
8654         if (ret) {
8655                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8656                 return ret;
8657         }
8658
8659         ret = hclge_rss_init_hw(hdev);
8660         if (ret) {
8661                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8662                 return ret;
8663         }
8664
8665         ret = hclge_init_fd_config(hdev);
8666         if (ret) {
8667                 dev_err(&pdev->dev,
8668                         "fd table init fail, ret=%d\n", ret);
8669                 return ret;
8670         }
8671
8672         /* Re-enable the hw error interrupts because
8673          * the interrupts get disabled on core/global reset.
8674          */
8675         ret = hclge_hw_error_set_state(hdev, true);
8676         if (ret) {
8677                 dev_err(&pdev->dev,
8678                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8679                 return ret;
8680         }
8681
8682         hclge_reset_vport_state(hdev);
8683
8684         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8685                  HCLGE_DRIVER_NAME);
8686
8687         return 0;
8688 }
8689
8690 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8691 {
8692         struct hclge_dev *hdev = ae_dev->priv;
8693         struct hclge_mac *mac = &hdev->hw.mac;
8694
8695         hclge_state_uninit(hdev);
8696
8697         if (mac->phydev)
8698                 mdiobus_unregister(mac->mdio_bus);
8699
8700         hclge_uninit_umv_space(hdev);
8701
8702         /* Disable MISC vector(vector0) */
8703         hclge_enable_vector(&hdev->misc_vector, false);
8704         synchronize_irq(hdev->misc_vector.vector_irq);
8705
8706         hclge_config_mac_tnl_int(hdev, false);
8707         hclge_hw_error_set_state(hdev, false);
8708         hclge_cmd_uninit(hdev);
8709         hclge_misc_irq_uninit(hdev);
8710         hclge_pci_uninit(hdev);
8711         mutex_destroy(&hdev->vport_lock);
8712         hclge_uninit_vport_mac_table(hdev);
8713         hclge_uninit_vport_vlan_table(hdev);
8714         mutex_destroy(&hdev->vport_cfg_mutex);
8715         ae_dev->priv = NULL;
8716 }
8717
8718 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8719 {
8720         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8721         struct hclge_vport *vport = hclge_get_vport(handle);
8722         struct hclge_dev *hdev = vport->back;
8723
8724         return min_t(u32, hdev->rss_size_max,
8725                      vport->alloc_tqps / kinfo->num_tc);
8726 }
8727
8728 static void hclge_get_channels(struct hnae3_handle *handle,
8729                                struct ethtool_channels *ch)
8730 {
8731         ch->max_combined = hclge_get_max_channels(handle);
8732         ch->other_count = 1;
8733         ch->max_other = 1;
8734         ch->combined_count = handle->kinfo.rss_size;
8735 }
8736
8737 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8738                                         u16 *alloc_tqps, u16 *max_rss_size)
8739 {
8740         struct hclge_vport *vport = hclge_get_vport(handle);
8741         struct hclge_dev *hdev = vport->back;
8742
8743         *alloc_tqps = vport->alloc_tqps;
8744         *max_rss_size = hdev->rss_size_max;
8745 }
8746
8747 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8748                               bool rxfh_configured)
8749 {
8750         struct hclge_vport *vport = hclge_get_vport(handle);
8751         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8752         struct hclge_dev *hdev = vport->back;
8753         int cur_rss_size = kinfo->rss_size;
8754         int cur_tqps = kinfo->num_tqps;
8755         u16 tc_offset[HCLGE_MAX_TC_NUM];
8756         u16 tc_valid[HCLGE_MAX_TC_NUM];
8757         u16 tc_size[HCLGE_MAX_TC_NUM];
8758         u16 roundup_size;
8759         u32 *rss_indir;
8760         int ret, i;
8761
8762         kinfo->req_rss_size = new_tqps_num;
8763
8764         ret = hclge_tm_vport_map_update(hdev);
8765         if (ret) {
8766                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8767                 return ret;
8768         }
8769
8770         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8771         roundup_size = ilog2(roundup_size);
8772         /* Set the RSS TC mode according to the new RSS size */
8773         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8774                 tc_valid[i] = 0;
8775
8776                 if (!(hdev->hw_tc_map & BIT(i)))
8777                         continue;
8778
8779                 tc_valid[i] = 1;
8780                 tc_size[i] = roundup_size;
8781                 tc_offset[i] = kinfo->rss_size * i;
8782         }
8783         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8784         if (ret)
8785                 return ret;
8786
8787         /* RSS indirection table has been configuared by user */
8788         if (rxfh_configured)
8789                 goto out;
8790
8791         /* Reinitializes the rss indirect table according to the new RSS size */
8792         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8793         if (!rss_indir)
8794                 return -ENOMEM;
8795
8796         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8797                 rss_indir[i] = i % kinfo->rss_size;
8798
8799         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8800         if (ret)
8801                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8802                         ret);
8803
8804         kfree(rss_indir);
8805
8806 out:
8807         if (!ret)
8808                 dev_info(&hdev->pdev->dev,
8809                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8810                          cur_rss_size, kinfo->rss_size,
8811                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8812
8813         return ret;
8814 }
8815
8816 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8817                               u32 *regs_num_64_bit)
8818 {
8819         struct hclge_desc desc;
8820         u32 total_num;
8821         int ret;
8822
8823         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8824         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8825         if (ret) {
8826                 dev_err(&hdev->pdev->dev,
8827                         "Query register number cmd failed, ret = %d.\n", ret);
8828                 return ret;
8829         }
8830
8831         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8832         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8833
8834         total_num = *regs_num_32_bit + *regs_num_64_bit;
8835         if (!total_num)
8836                 return -EINVAL;
8837
8838         return 0;
8839 }
8840
8841 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8842                                  void *data)
8843 {
8844 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8845
8846         struct hclge_desc *desc;
8847         u32 *reg_val = data;
8848         __le32 *desc_data;
8849         int cmd_num;
8850         int i, k, n;
8851         int ret;
8852
8853         if (regs_num == 0)
8854                 return 0;
8855
8856         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8857         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8858         if (!desc)
8859                 return -ENOMEM;
8860
8861         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8862         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8863         if (ret) {
8864                 dev_err(&hdev->pdev->dev,
8865                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8866                 kfree(desc);
8867                 return ret;
8868         }
8869
8870         for (i = 0; i < cmd_num; i++) {
8871                 if (i == 0) {
8872                         desc_data = (__le32 *)(&desc[i].data[0]);
8873                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8874                 } else {
8875                         desc_data = (__le32 *)(&desc[i]);
8876                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8877                 }
8878                 for (k = 0; k < n; k++) {
8879                         *reg_val++ = le32_to_cpu(*desc_data++);
8880
8881                         regs_num--;
8882                         if (!regs_num)
8883                                 break;
8884                 }
8885         }
8886
8887         kfree(desc);
8888         return 0;
8889 }
8890
8891 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8892                                  void *data)
8893 {
8894 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8895
8896         struct hclge_desc *desc;
8897         u64 *reg_val = data;
8898         __le64 *desc_data;
8899         int cmd_num;
8900         int i, k, n;
8901         int ret;
8902
8903         if (regs_num == 0)
8904                 return 0;
8905
8906         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8907         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8908         if (!desc)
8909                 return -ENOMEM;
8910
8911         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8912         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8913         if (ret) {
8914                 dev_err(&hdev->pdev->dev,
8915                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8916                 kfree(desc);
8917                 return ret;
8918         }
8919
8920         for (i = 0; i < cmd_num; i++) {
8921                 if (i == 0) {
8922                         desc_data = (__le64 *)(&desc[i].data[0]);
8923                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8924                 } else {
8925                         desc_data = (__le64 *)(&desc[i]);
8926                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8927                 }
8928                 for (k = 0; k < n; k++) {
8929                         *reg_val++ = le64_to_cpu(*desc_data++);
8930
8931                         regs_num--;
8932                         if (!regs_num)
8933                                 break;
8934                 }
8935         }
8936
8937         kfree(desc);
8938         return 0;
8939 }
8940
8941 #define MAX_SEPARATE_NUM        4
8942 #define SEPARATOR_VALUE         0xFFFFFFFF
8943 #define REG_NUM_PER_LINE        4
8944 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8945
8946 static int hclge_get_regs_len(struct hnae3_handle *handle)
8947 {
8948         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8949         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8950         struct hclge_vport *vport = hclge_get_vport(handle);
8951         struct hclge_dev *hdev = vport->back;
8952         u32 regs_num_32_bit, regs_num_64_bit;
8953         int ret;
8954
8955         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8956         if (ret) {
8957                 dev_err(&hdev->pdev->dev,
8958                         "Get register number failed, ret = %d.\n", ret);
8959                 return -EOPNOTSUPP;
8960         }
8961
8962         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8963         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8964         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8965         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8966
8967         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8968                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8969                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8970 }
8971
8972 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8973                            void *data)
8974 {
8975         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8976         struct hclge_vport *vport = hclge_get_vport(handle);
8977         struct hclge_dev *hdev = vport->back;
8978         u32 regs_num_32_bit, regs_num_64_bit;
8979         int i, j, reg_um, separator_num;
8980         u32 *reg = data;
8981         int ret;
8982
8983         *version = hdev->fw_version;
8984
8985         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8986         if (ret) {
8987                 dev_err(&hdev->pdev->dev,
8988                         "Get register number failed, ret = %d.\n", ret);
8989                 return;
8990         }
8991
8992         /* fetching per-PF registers valus from PF PCIe register space */
8993         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8994         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8995         for (i = 0; i < reg_um; i++)
8996                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8997         for (i = 0; i < separator_num; i++)
8998                 *reg++ = SEPARATOR_VALUE;
8999
9000         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9001         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9002         for (i = 0; i < reg_um; i++)
9003                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9004         for (i = 0; i < separator_num; i++)
9005                 *reg++ = SEPARATOR_VALUE;
9006
9007         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9008         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9009         for (j = 0; j < kinfo->num_tqps; j++) {
9010                 for (i = 0; i < reg_um; i++)
9011                         *reg++ = hclge_read_dev(&hdev->hw,
9012                                                 ring_reg_addr_list[i] +
9013                                                 0x200 * j);
9014                 for (i = 0; i < separator_num; i++)
9015                         *reg++ = SEPARATOR_VALUE;
9016         }
9017
9018         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9019         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9020         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9021                 for (i = 0; i < reg_um; i++)
9022                         *reg++ = hclge_read_dev(&hdev->hw,
9023                                                 tqp_intr_reg_addr_list[i] +
9024                                                 4 * j);
9025                 for (i = 0; i < separator_num; i++)
9026                         *reg++ = SEPARATOR_VALUE;
9027         }
9028
9029         /* fetching PF common registers values from firmware */
9030         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9031         if (ret) {
9032                 dev_err(&hdev->pdev->dev,
9033                         "Get 32 bit register failed, ret = %d.\n", ret);
9034                 return;
9035         }
9036
9037         reg += regs_num_32_bit;
9038         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9039         if (ret)
9040                 dev_err(&hdev->pdev->dev,
9041                         "Get 64 bit register failed, ret = %d.\n", ret);
9042 }
9043
9044 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9045 {
9046         struct hclge_set_led_state_cmd *req;
9047         struct hclge_desc desc;
9048         int ret;
9049
9050         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9051
9052         req = (struct hclge_set_led_state_cmd *)desc.data;
9053         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9054                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9055
9056         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9057         if (ret)
9058                 dev_err(&hdev->pdev->dev,
9059                         "Send set led state cmd error, ret =%d\n", ret);
9060
9061         return ret;
9062 }
9063
9064 enum hclge_led_status {
9065         HCLGE_LED_OFF,
9066         HCLGE_LED_ON,
9067         HCLGE_LED_NO_CHANGE = 0xFF,
9068 };
9069
9070 static int hclge_set_led_id(struct hnae3_handle *handle,
9071                             enum ethtool_phys_id_state status)
9072 {
9073         struct hclge_vport *vport = hclge_get_vport(handle);
9074         struct hclge_dev *hdev = vport->back;
9075
9076         switch (status) {
9077         case ETHTOOL_ID_ACTIVE:
9078                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9079         case ETHTOOL_ID_INACTIVE:
9080                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9081         default:
9082                 return -EINVAL;
9083         }
9084 }
9085
9086 static void hclge_get_link_mode(struct hnae3_handle *handle,
9087                                 unsigned long *supported,
9088                                 unsigned long *advertising)
9089 {
9090         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9091         struct hclge_vport *vport = hclge_get_vport(handle);
9092         struct hclge_dev *hdev = vport->back;
9093         unsigned int idx = 0;
9094
9095         for (; idx < size; idx++) {
9096                 supported[idx] = hdev->hw.mac.supported[idx];
9097                 advertising[idx] = hdev->hw.mac.advertising[idx];
9098         }
9099 }
9100
9101 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9102 {
9103         struct hclge_vport *vport = hclge_get_vport(handle);
9104         struct hclge_dev *hdev = vport->back;
9105
9106         return hclge_config_gro(hdev, enable);
9107 }
9108
9109 static const struct hnae3_ae_ops hclge_ops = {
9110         .init_ae_dev = hclge_init_ae_dev,
9111         .uninit_ae_dev = hclge_uninit_ae_dev,
9112         .flr_prepare = hclge_flr_prepare,
9113         .flr_done = hclge_flr_done,
9114         .init_client_instance = hclge_init_client_instance,
9115         .uninit_client_instance = hclge_uninit_client_instance,
9116         .map_ring_to_vector = hclge_map_ring_to_vector,
9117         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9118         .get_vector = hclge_get_vector,
9119         .put_vector = hclge_put_vector,
9120         .set_promisc_mode = hclge_set_promisc_mode,
9121         .set_loopback = hclge_set_loopback,
9122         .start = hclge_ae_start,
9123         .stop = hclge_ae_stop,
9124         .client_start = hclge_client_start,
9125         .client_stop = hclge_client_stop,
9126         .get_status = hclge_get_status,
9127         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9128         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9129         .get_media_type = hclge_get_media_type,
9130         .check_port_speed = hclge_check_port_speed,
9131         .get_fec = hclge_get_fec,
9132         .set_fec = hclge_set_fec,
9133         .get_rss_key_size = hclge_get_rss_key_size,
9134         .get_rss_indir_size = hclge_get_rss_indir_size,
9135         .get_rss = hclge_get_rss,
9136         .set_rss = hclge_set_rss,
9137         .set_rss_tuple = hclge_set_rss_tuple,
9138         .get_rss_tuple = hclge_get_rss_tuple,
9139         .get_tc_size = hclge_get_tc_size,
9140         .get_mac_addr = hclge_get_mac_addr,
9141         .set_mac_addr = hclge_set_mac_addr,
9142         .do_ioctl = hclge_do_ioctl,
9143         .add_uc_addr = hclge_add_uc_addr,
9144         .rm_uc_addr = hclge_rm_uc_addr,
9145         .add_mc_addr = hclge_add_mc_addr,
9146         .rm_mc_addr = hclge_rm_mc_addr,
9147         .set_autoneg = hclge_set_autoneg,
9148         .get_autoneg = hclge_get_autoneg,
9149         .restart_autoneg = hclge_restart_autoneg,
9150         .get_pauseparam = hclge_get_pauseparam,
9151         .set_pauseparam = hclge_set_pauseparam,
9152         .set_mtu = hclge_set_mtu,
9153         .reset_queue = hclge_reset_tqp,
9154         .get_stats = hclge_get_stats,
9155         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9156         .update_stats = hclge_update_stats,
9157         .get_strings = hclge_get_strings,
9158         .get_sset_count = hclge_get_sset_count,
9159         .get_fw_version = hclge_get_fw_version,
9160         .get_mdix_mode = hclge_get_mdix_mode,
9161         .enable_vlan_filter = hclge_enable_vlan_filter,
9162         .set_vlan_filter = hclge_set_vlan_filter,
9163         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9164         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9165         .reset_event = hclge_reset_event,
9166         .set_default_reset_request = hclge_set_def_reset_request,
9167         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9168         .set_channels = hclge_set_channels,
9169         .get_channels = hclge_get_channels,
9170         .get_regs_len = hclge_get_regs_len,
9171         .get_regs = hclge_get_regs,
9172         .set_led_id = hclge_set_led_id,
9173         .get_link_mode = hclge_get_link_mode,
9174         .add_fd_entry = hclge_add_fd_entry,
9175         .del_fd_entry = hclge_del_fd_entry,
9176         .del_all_fd_entries = hclge_del_all_fd_entries,
9177         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9178         .get_fd_rule_info = hclge_get_fd_rule_info,
9179         .get_fd_all_rules = hclge_get_all_rules,
9180         .restore_fd_rules = hclge_restore_fd_entries,
9181         .enable_fd = hclge_enable_fd,
9182         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9183         .dbg_run_cmd = hclge_dbg_run_cmd,
9184         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9185         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9186         .ae_dev_resetting = hclge_ae_dev_resetting,
9187         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9188         .set_gro_en = hclge_gro_en,
9189         .get_global_queue_id = hclge_covert_handle_qid_global,
9190         .set_timer_task = hclge_set_timer_task,
9191         .mac_connect_phy = hclge_mac_connect_phy,
9192         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9193 };
9194
9195 static struct hnae3_ae_algo ae_algo = {
9196         .ops = &hclge_ops,
9197         .pdev_id_table = ae_algo_pci_tbl,
9198 };
9199
9200 static int hclge_init(void)
9201 {
9202         pr_info("%s is initializing\n", HCLGE_NAME);
9203
9204         hnae3_register_ae_algo(&ae_algo);
9205
9206         return 0;
9207 }
9208
9209 static void hclge_exit(void)
9210 {
9211         hnae3_unregister_ae_algo(&ae_algo);
9212 }
9213 module_init(hclge_init);
9214 module_exit(hclge_exit);
9215
9216 MODULE_LICENSE("GPL");
9217 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9218 MODULE_DESCRIPTION("HCLGE Driver");
9219 MODULE_VERSION(HCLGE_MOD_VERSION);