]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
87c5cb0f4c8ca2240c5036bf3c19ca96cef2dc59
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
40
41 static struct hnae3_ae_algo ae_algo;
42
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51         /* required last entry */
52         {0, }
53 };
54
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
56
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58                                          HCLGE_CMDQ_TX_ADDR_H_REG,
59                                          HCLGE_CMDQ_TX_DEPTH_REG,
60                                          HCLGE_CMDQ_TX_TAIL_REG,
61                                          HCLGE_CMDQ_TX_HEAD_REG,
62                                          HCLGE_CMDQ_RX_ADDR_L_REG,
63                                          HCLGE_CMDQ_RX_ADDR_H_REG,
64                                          HCLGE_CMDQ_RX_DEPTH_REG,
65                                          HCLGE_CMDQ_RX_TAIL_REG,
66                                          HCLGE_CMDQ_RX_HEAD_REG,
67                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
68                                          HCLGE_CMDQ_INTR_STS_REG,
69                                          HCLGE_CMDQ_INTR_EN_REG,
70                                          HCLGE_CMDQ_INTR_GEN_REG};
71
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73                                            HCLGE_VECTOR0_OTER_EN_REG,
74                                            HCLGE_MISC_RESET_STS_REG,
75                                            HCLGE_MISC_VECTOR_INT_STS,
76                                            HCLGE_GLOBAL_RESET_REG,
77                                            HCLGE_FUN_RST_ING,
78                                            HCLGE_GRO_EN_REG};
79
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81                                          HCLGE_RING_RX_ADDR_H_REG,
82                                          HCLGE_RING_RX_BD_NUM_REG,
83                                          HCLGE_RING_RX_BD_LENGTH_REG,
84                                          HCLGE_RING_RX_MERGE_EN_REG,
85                                          HCLGE_RING_RX_TAIL_REG,
86                                          HCLGE_RING_RX_HEAD_REG,
87                                          HCLGE_RING_RX_FBD_NUM_REG,
88                                          HCLGE_RING_RX_OFFSET_REG,
89                                          HCLGE_RING_RX_FBD_OFFSET_REG,
90                                          HCLGE_RING_RX_STASH_REG,
91                                          HCLGE_RING_RX_BD_ERR_REG,
92                                          HCLGE_RING_TX_ADDR_L_REG,
93                                          HCLGE_RING_TX_ADDR_H_REG,
94                                          HCLGE_RING_TX_BD_NUM_REG,
95                                          HCLGE_RING_TX_PRIORITY_REG,
96                                          HCLGE_RING_TX_TC_REG,
97                                          HCLGE_RING_TX_MERGE_EN_REG,
98                                          HCLGE_RING_TX_TAIL_REG,
99                                          HCLGE_RING_TX_HEAD_REG,
100                                          HCLGE_RING_TX_FBD_NUM_REG,
101                                          HCLGE_RING_TX_OFFSET_REG,
102                                          HCLGE_RING_TX_EBD_NUM_REG,
103                                          HCLGE_RING_TX_EBD_OFFSET_REG,
104                                          HCLGE_RING_TX_BD_ERR_REG,
105                                          HCLGE_RING_EN_REG};
106
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108                                              HCLGE_TQP_INTR_GL0_REG,
109                                              HCLGE_TQP_INTR_GL1_REG,
110                                              HCLGE_TQP_INTR_GL2_REG,
111                                              HCLGE_TQP_INTR_RL_REG};
112
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
114         "App    Loopback test",
115         "Serdes serial Loopback test",
116         "Serdes parallel Loopback test",
117         "Phy    Loopback test"
118 };
119
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121         {"mac_tx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123         {"mac_rx_mac_pause_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125         {"mac_tx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127         {"mac_rx_control_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129         {"mac_tx_pfc_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131         {"mac_tx_pfc_pri0_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133         {"mac_tx_pfc_pri1_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135         {"mac_tx_pfc_pri2_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137         {"mac_tx_pfc_pri3_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139         {"mac_tx_pfc_pri4_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141         {"mac_tx_pfc_pri5_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143         {"mac_tx_pfc_pri6_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145         {"mac_tx_pfc_pri7_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147         {"mac_rx_pfc_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149         {"mac_rx_pfc_pri0_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151         {"mac_rx_pfc_pri1_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153         {"mac_rx_pfc_pri2_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155         {"mac_rx_pfc_pri3_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157         {"mac_rx_pfc_pri4_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159         {"mac_rx_pfc_pri5_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161         {"mac_rx_pfc_pri6_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163         {"mac_rx_pfc_pri7_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165         {"mac_tx_total_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167         {"mac_tx_total_oct_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169         {"mac_tx_good_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171         {"mac_tx_bad_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173         {"mac_tx_good_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175         {"mac_tx_bad_oct_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177         {"mac_tx_uni_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179         {"mac_tx_multi_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181         {"mac_tx_broad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183         {"mac_tx_undersize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185         {"mac_tx_oversize_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187         {"mac_tx_64_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189         {"mac_tx_65_127_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191         {"mac_tx_128_255_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193         {"mac_tx_256_511_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195         {"mac_tx_512_1023_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197         {"mac_tx_1024_1518_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199         {"mac_tx_1519_2047_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201         {"mac_tx_2048_4095_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203         {"mac_tx_4096_8191_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205         {"mac_tx_8192_9216_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207         {"mac_tx_9217_12287_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209         {"mac_tx_12288_16383_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211         {"mac_tx_1519_max_good_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213         {"mac_tx_1519_max_bad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215         {"mac_rx_total_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217         {"mac_rx_total_oct_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219         {"mac_rx_good_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221         {"mac_rx_bad_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223         {"mac_rx_good_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225         {"mac_rx_bad_oct_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227         {"mac_rx_uni_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229         {"mac_rx_multi_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231         {"mac_rx_broad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233         {"mac_rx_undersize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235         {"mac_rx_oversize_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237         {"mac_rx_64_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239         {"mac_rx_65_127_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241         {"mac_rx_128_255_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243         {"mac_rx_256_511_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245         {"mac_rx_512_1023_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247         {"mac_rx_1024_1518_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249         {"mac_rx_1519_2047_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251         {"mac_rx_2048_4095_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253         {"mac_rx_4096_8191_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255         {"mac_rx_8192_9216_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257         {"mac_rx_9217_12287_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259         {"mac_rx_12288_16383_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261         {"mac_rx_1519_max_good_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263         {"mac_rx_1519_max_bad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
265
266         {"mac_tx_fragment_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268         {"mac_tx_undermin_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270         {"mac_tx_jabber_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272         {"mac_tx_err_all_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274         {"mac_tx_from_app_good_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276         {"mac_tx_from_app_bad_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278         {"mac_rx_fragment_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280         {"mac_rx_undermin_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282         {"mac_rx_jabber_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284         {"mac_rx_fcs_err_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286         {"mac_rx_send_app_good_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288         {"mac_rx_send_app_bad_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 };
291
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
293         {
294                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298                 .i_port_bitmap = 0x1,
299         },
300 };
301
302 static const u8 hclge_hash_key[] = {
303         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 };
309
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
311 {
312 #define HCLGE_MAC_CMD_NUM 21
313
314         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
316         __le64 *desc_data;
317         int i, k, n;
318         int ret;
319
320         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
322         if (ret) {
323                 dev_err(&hdev->pdev->dev,
324                         "Get MAC pkt stats fail, status = %d.\n", ret);
325
326                 return ret;
327         }
328
329         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330                 /* for special opcode 0032, only the first desc has the head */
331                 if (unlikely(i == 0)) {
332                         desc_data = (__le64 *)(&desc[i].data[0]);
333                         n = HCLGE_RD_FIRST_STATS_NUM;
334                 } else {
335                         desc_data = (__le64 *)(&desc[i]);
336                         n = HCLGE_RD_OTHER_STATS_NUM;
337                 }
338
339                 for (k = 0; k < n; k++) {
340                         *data += le64_to_cpu(*desc_data);
341                         data++;
342                         desc_data++;
343                 }
344         }
345
346         return 0;
347 }
348
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
350 {
351         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352         struct hclge_desc *desc;
353         __le64 *desc_data;
354         u16 i, k, n;
355         int ret;
356
357         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358         if (!desc)
359                 return -ENOMEM;
360         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
362         if (ret) {
363                 kfree(desc);
364                 return ret;
365         }
366
367         for (i = 0; i < desc_num; i++) {
368                 /* for special opcode 0034, only the first desc has the head */
369                 if (i == 0) {
370                         desc_data = (__le64 *)(&desc[i].data[0]);
371                         n = HCLGE_RD_FIRST_STATS_NUM;
372                 } else {
373                         desc_data = (__le64 *)(&desc[i]);
374                         n = HCLGE_RD_OTHER_STATS_NUM;
375                 }
376
377                 for (k = 0; k < n; k++) {
378                         *data += le64_to_cpu(*desc_data);
379                         data++;
380                         desc_data++;
381                 }
382         }
383
384         kfree(desc);
385
386         return 0;
387 }
388
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
390 {
391         struct hclge_desc desc;
392         __le32 *desc_data;
393         u32 reg_num;
394         int ret;
395
396         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398         if (ret)
399                 return ret;
400
401         desc_data = (__le32 *)(&desc.data[0]);
402         reg_num = le32_to_cpu(*desc_data);
403
404         *desc_num = 1 + ((reg_num - 3) >> 2) +
405                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
406
407         return 0;
408 }
409
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
411 {
412         u32 desc_num;
413         int ret;
414
415         ret = hclge_mac_query_reg_num(hdev, &desc_num);
416
417         /* The firmware supports the new statistics acquisition method */
418         if (!ret)
419                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420         else if (ret == -EOPNOTSUPP)
421                 ret = hclge_mac_update_stats_defective(hdev);
422         else
423                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
424
425         return ret;
426 }
427
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
429 {
430         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431         struct hclge_vport *vport = hclge_get_vport(handle);
432         struct hclge_dev *hdev = vport->back;
433         struct hnae3_queue *queue;
434         struct hclge_desc desc[1];
435         struct hclge_tqp *tqp;
436         int ret, i;
437
438         for (i = 0; i < kinfo->num_tqps; i++) {
439                 queue = handle->kinfo.tqp[i];
440                 tqp = container_of(queue, struct hclge_tqp, q);
441                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442                 hclge_cmd_setup_basic_desc(&desc[0],
443                                            HCLGE_OPC_QUERY_RX_STATUS,
444                                            true);
445
446                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
448                 if (ret) {
449                         dev_err(&hdev->pdev->dev,
450                                 "Query tqp stat fail, status = %d,queue = %d\n",
451                                 ret,    i);
452                         return ret;
453                 }
454                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455                         le32_to_cpu(desc[0].data[1]);
456         }
457
458         for (i = 0; i < kinfo->num_tqps; i++) {
459                 queue = handle->kinfo.tqp[i];
460                 tqp = container_of(queue, struct hclge_tqp, q);
461                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462                 hclge_cmd_setup_basic_desc(&desc[0],
463                                            HCLGE_OPC_QUERY_TX_STATUS,
464                                            true);
465
466                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
468                 if (ret) {
469                         dev_err(&hdev->pdev->dev,
470                                 "Query tqp stat fail, status = %d,queue = %d\n",
471                                 ret, i);
472                         return ret;
473                 }
474                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475                         le32_to_cpu(desc[0].data[1]);
476         }
477
478         return 0;
479 }
480
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
482 {
483         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484         struct hclge_tqp *tqp;
485         u64 *buff = data;
486         int i;
487
488         for (i = 0; i < kinfo->num_tqps; i++) {
489                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491         }
492
493         for (i = 0; i < kinfo->num_tqps; i++) {
494                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
496         }
497
498         return buff;
499 }
500
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
502 {
503         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504
505         return kinfo->num_tqps * (2);
506 }
507
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
509 {
510         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
511         u8 *buff = data;
512         int i = 0;
513
514         for (i = 0; i < kinfo->num_tqps; i++) {
515                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516                         struct hclge_tqp, q);
517                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
518                          tqp->index);
519                 buff = buff + ETH_GSTRING_LEN;
520         }
521
522         for (i = 0; i < kinfo->num_tqps; i++) {
523                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524                         struct hclge_tqp, q);
525                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
526                          tqp->index);
527                 buff = buff + ETH_GSTRING_LEN;
528         }
529
530         return buff;
531 }
532
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534                                  const struct hclge_comm_stats_str strs[],
535                                  int size, u64 *data)
536 {
537         u64 *buf = data;
538         u32 i;
539
540         for (i = 0; i < size; i++)
541                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
542
543         return buf + size;
544 }
545
546 static u8 *hclge_comm_get_strings(u32 stringset,
547                                   const struct hclge_comm_stats_str strs[],
548                                   int size, u8 *data)
549 {
550         char *buff = (char *)data;
551         u32 i;
552
553         if (stringset != ETH_SS_STATS)
554                 return buff;
555
556         for (i = 0; i < size; i++) {
557                 snprintf(buff, ETH_GSTRING_LEN,
558                          strs[i].desc);
559                 buff = buff + ETH_GSTRING_LEN;
560         }
561
562         return (u8 *)buff;
563 }
564
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
566 {
567         struct hnae3_handle *handle;
568         int status;
569
570         handle = &hdev->vport[0].nic;
571         if (handle->client) {
572                 status = hclge_tqps_update_stats(handle);
573                 if (status) {
574                         dev_err(&hdev->pdev->dev,
575                                 "Update TQPS stats fail, status = %d.\n",
576                                 status);
577                 }
578         }
579
580         status = hclge_mac_update_stats(hdev);
581         if (status)
582                 dev_err(&hdev->pdev->dev,
583                         "Update MAC stats fail, status = %d.\n", status);
584 }
585
586 static void hclge_update_stats(struct hnae3_handle *handle,
587                                struct net_device_stats *net_stats)
588 {
589         struct hclge_vport *vport = hclge_get_vport(handle);
590         struct hclge_dev *hdev = vport->back;
591         int status;
592
593         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594                 return;
595
596         status = hclge_mac_update_stats(hdev);
597         if (status)
598                 dev_err(&hdev->pdev->dev,
599                         "Update MAC stats fail, status = %d.\n",
600                         status);
601
602         status = hclge_tqps_update_stats(handle);
603         if (status)
604                 dev_err(&hdev->pdev->dev,
605                         "Update TQPS stats fail, status = %d.\n",
606                         status);
607
608         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 }
610
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614                 HNAE3_SUPPORT_PHY_LOOPBACK |\
615                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
617
618         struct hclge_vport *vport = hclge_get_vport(handle);
619         struct hclge_dev *hdev = vport->back;
620         int count = 0;
621
622         /* Loopback test support rules:
623          * mac: only GE mode support
624          * serdes: all mac mode will support include GE/XGE/LGE/CGE
625          * phy: only support when phy device exist on board
626          */
627         if (stringset == ETH_SS_TEST) {
628                 /* clear loopback bit flags at first */
629                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630                 if (hdev->pdev->revision >= 0x21 ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
634                         count += 1;
635                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
636                 }
637
638                 count += 2;
639                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641         } else if (stringset == ETH_SS_STATS) {
642                 count = ARRAY_SIZE(g_mac_stats_string) +
643                         hclge_tqps_get_sset_count(handle, stringset);
644         }
645
646         return count;
647 }
648
649 static void hclge_get_strings(struct hnae3_handle *handle,
650                               u32 stringset,
651                               u8 *data)
652 {
653         u8 *p = (char *)data;
654         int size;
655
656         if (stringset == ETH_SS_STATS) {
657                 size = ARRAY_SIZE(g_mac_stats_string);
658                 p = hclge_comm_get_strings(stringset,
659                                            g_mac_stats_string,
660                                            size,
661                                            p);
662                 p = hclge_tqps_get_strings(handle, p);
663         } else if (stringset == ETH_SS_TEST) {
664                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
665                         memcpy(p,
666                                hns3_nic_test_strs[HNAE3_LOOP_APP],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671                         memcpy(p,
672                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
677                         memcpy(p,
678                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
679                                ETH_GSTRING_LEN);
680                         p += ETH_GSTRING_LEN;
681                 }
682                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
683                         memcpy(p,
684                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
685                                ETH_GSTRING_LEN);
686                         p += ETH_GSTRING_LEN;
687                 }
688         }
689 }
690
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
692 {
693         struct hclge_vport *vport = hclge_get_vport(handle);
694         struct hclge_dev *hdev = vport->back;
695         u64 *p;
696
697         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
698                                  g_mac_stats_string,
699                                  ARRAY_SIZE(g_mac_stats_string),
700                                  data);
701         p = hclge_tqps_get_stats(handle, p);
702 }
703
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705                                      u64 *rx_cnt)
706 {
707         struct hclge_vport *vport = hclge_get_vport(handle);
708         struct hclge_dev *hdev = vport->back;
709
710         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 }
713
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715                                    struct hclge_func_status_cmd *status)
716 {
717         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718                 return -EINVAL;
719
720         /* Set the pf to main pf */
721         if (status->pf_state & HCLGE_PF_STATE_MAIN)
722                 hdev->flag |= HCLGE_FLAG_MAIN;
723         else
724                 hdev->flag &= ~HCLGE_FLAG_MAIN;
725
726         return 0;
727 }
728
729 static int hclge_query_function_status(struct hclge_dev *hdev)
730 {
731         struct hclge_func_status_cmd *req;
732         struct hclge_desc desc;
733         int timeout = 0;
734         int ret;
735
736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737         req = (struct hclge_func_status_cmd *)desc.data;
738
739         do {
740                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741                 if (ret) {
742                         dev_err(&hdev->pdev->dev,
743                                 "query function status failed %d.\n",
744                                 ret);
745
746                         return ret;
747                 }
748
749                 /* Check pf reset is done */
750                 if (req->pf_state)
751                         break;
752                 usleep_range(1000, 2000);
753         } while (timeout++ < 5);
754
755         ret = hclge_parse_func_status(hdev, req);
756
757         return ret;
758 }
759
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
761 {
762         struct hclge_pf_res_cmd *req;
763         struct hclge_desc desc;
764         int ret;
765
766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768         if (ret) {
769                 dev_err(&hdev->pdev->dev,
770                         "query pf resource failed %d.\n", ret);
771                 return ret;
772         }
773
774         req = (struct hclge_pf_res_cmd *)desc.data;
775         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
777
778         if (req->tx_buf_size)
779                 hdev->tx_buf_size =
780                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
781         else
782                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
783
784         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
785
786         if (req->dv_buf_size)
787                 hdev->dv_buf_size =
788                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
789         else
790                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
791
792         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
793
794         if (hnae3_dev_roce_supported(hdev)) {
795                 hdev->roce_base_msix_offset =
796                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
798                 hdev->num_roce_msi =
799                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
801
802                 /* PF should have NIC vectors and Roce vectors,
803                  * NIC vectors are queued before Roce vectors.
804                  */
805                 hdev->num_msi = hdev->num_roce_msi  +
806                                 hdev->roce_base_msix_offset;
807         } else {
808                 hdev->num_msi =
809                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
811         }
812
813         return 0;
814 }
815
816 static int hclge_parse_speed(int speed_cmd, int *speed)
817 {
818         switch (speed_cmd) {
819         case 6:
820                 *speed = HCLGE_MAC_SPEED_10M;
821                 break;
822         case 7:
823                 *speed = HCLGE_MAC_SPEED_100M;
824                 break;
825         case 0:
826                 *speed = HCLGE_MAC_SPEED_1G;
827                 break;
828         case 1:
829                 *speed = HCLGE_MAC_SPEED_10G;
830                 break;
831         case 2:
832                 *speed = HCLGE_MAC_SPEED_25G;
833                 break;
834         case 3:
835                 *speed = HCLGE_MAC_SPEED_40G;
836                 break;
837         case 4:
838                 *speed = HCLGE_MAC_SPEED_50G;
839                 break;
840         case 5:
841                 *speed = HCLGE_MAC_SPEED_100G;
842                 break;
843         default:
844                 return -EINVAL;
845         }
846
847         return 0;
848 }
849
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
851 {
852         struct hclge_vport *vport = hclge_get_vport(handle);
853         struct hclge_dev *hdev = vport->back;
854         u32 speed_ability = hdev->hw.mac.speed_ability;
855         u32 speed_bit = 0;
856
857         switch (speed) {
858         case HCLGE_MAC_SPEED_10M:
859                 speed_bit = HCLGE_SUPPORT_10M_BIT;
860                 break;
861         case HCLGE_MAC_SPEED_100M:
862                 speed_bit = HCLGE_SUPPORT_100M_BIT;
863                 break;
864         case HCLGE_MAC_SPEED_1G:
865                 speed_bit = HCLGE_SUPPORT_1G_BIT;
866                 break;
867         case HCLGE_MAC_SPEED_10G:
868                 speed_bit = HCLGE_SUPPORT_10G_BIT;
869                 break;
870         case HCLGE_MAC_SPEED_25G:
871                 speed_bit = HCLGE_SUPPORT_25G_BIT;
872                 break;
873         case HCLGE_MAC_SPEED_40G:
874                 speed_bit = HCLGE_SUPPORT_40G_BIT;
875                 break;
876         case HCLGE_MAC_SPEED_50G:
877                 speed_bit = HCLGE_SUPPORT_50G_BIT;
878                 break;
879         case HCLGE_MAC_SPEED_100G:
880                 speed_bit = HCLGE_SUPPORT_100G_BIT;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885
886         if (speed_bit & speed_ability)
887                 return 0;
888
889         return -EINVAL;
890 }
891
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
893 {
894         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
896                                  mac->supported);
897         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
899                                  mac->supported);
900         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
902                                  mac->supported);
903         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
905                                  mac->supported);
906         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
908                                  mac->supported);
909 }
910
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
912 {
913         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
915                                  mac->supported);
916         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
918                                  mac->supported);
919         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
924                                  mac->supported);
925         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
927                                  mac->supported);
928 }
929
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
931 {
932         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
934                                  mac->supported);
935         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
937                                  mac->supported);
938         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
940                                  mac->supported);
941         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
943                                  mac->supported);
944         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
946                                  mac->supported);
947 }
948
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
950 {
951         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
953                                  mac->supported);
954         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
956                                  mac->supported);
957         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
962                                  mac->supported);
963         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
965                                  mac->supported);
966         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
968                                  mac->supported);
969 }
970
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
972 {
973         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
975
976         switch (mac->speed) {
977         case HCLGE_MAC_SPEED_10G:
978         case HCLGE_MAC_SPEED_40G:
979                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980                                  mac->supported);
981                 mac->fec_ability =
982                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
983                 break;
984         case HCLGE_MAC_SPEED_25G:
985         case HCLGE_MAC_SPEED_50G:
986                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987                                  mac->supported);
988                 mac->fec_ability =
989                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990                         BIT(HNAE3_FEC_AUTO);
991                 break;
992         case HCLGE_MAC_SPEED_100G:
993                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995                 break;
996         default:
997                 mac->fec_ability = 0;
998                 break;
999         }
1000 }
1001
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003                                         u8 speed_ability)
1004 {
1005         struct hclge_mac *mac = &hdev->hw.mac;
1006
1007         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009                                  mac->supported);
1010
1011         hclge_convert_setting_sr(mac, speed_ability);
1012         hclge_convert_setting_lr(mac, speed_ability);
1013         hclge_convert_setting_cr(mac, speed_ability);
1014         if (hdev->pdev->revision >= 0x21)
1015                 hclge_convert_setting_fec(mac);
1016
1017         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 }
1021
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023                                             u8 speed_ability)
1024 {
1025         struct hclge_mac *mac = &hdev->hw.mac;
1026
1027         hclge_convert_setting_kr(mac, speed_ability);
1028         if (hdev->pdev->revision >= 0x21)
1029                 hclge_convert_setting_fec(mac);
1030         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 }
1034
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036                                          u8 speed_ability)
1037 {
1038         unsigned long *supported = hdev->hw.mac.supported;
1039
1040         /* default to support all speed for GE port */
1041         if (!speed_ability)
1042                 speed_ability = HCLGE_SUPPORT_GE;
1043
1044         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046                                  supported);
1047
1048         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1050                                  supported);
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1052                                  supported);
1053         }
1054
1055         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058         }
1059
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 }
1064
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1066 {
1067         u8 media_type = hdev->hw.mac.media_type;
1068
1069         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072                 hclge_parse_copper_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1075 }
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1077 {
1078         struct hclge_cfg_param_cmd *req;
1079         u64 mac_addr_tmp_high;
1080         u64 mac_addr_tmp;
1081         int i;
1082
1083         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1084
1085         /* get the configuration */
1086         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087                                               HCLGE_CFG_VMDQ_M,
1088                                               HCLGE_CFG_VMDQ_S);
1089         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                             HCLGE_CFG_TQP_DESC_N_M,
1093                                             HCLGE_CFG_TQP_DESC_N_S);
1094
1095         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096                                         HCLGE_CFG_PHY_ADDR_M,
1097                                         HCLGE_CFG_PHY_ADDR_S);
1098         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099                                           HCLGE_CFG_MEDIA_TP_M,
1100                                           HCLGE_CFG_MEDIA_TP_S);
1101         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102                                           HCLGE_CFG_RX_BUF_LEN_M,
1103                                           HCLGE_CFG_RX_BUF_LEN_S);
1104         /* get mac_address */
1105         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107                                             HCLGE_CFG_MAC_ADDR_H_M,
1108                                             HCLGE_CFG_MAC_ADDR_H_S);
1109
1110         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1111
1112         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113                                              HCLGE_CFG_DEFAULT_SPEED_M,
1114                                              HCLGE_CFG_DEFAULT_SPEED_S);
1115         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116                                             HCLGE_CFG_RSS_SIZE_M,
1117                                             HCLGE_CFG_RSS_SIZE_S);
1118
1119         for (i = 0; i < ETH_ALEN; i++)
1120                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1121
1122         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1124
1125         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126                                              HCLGE_CFG_SPEED_ABILITY_M,
1127                                              HCLGE_CFG_SPEED_ABILITY_S);
1128         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1130                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1131         if (!cfg->umv_space)
1132                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 }
1134
1135 /* hclge_get_cfg: query the static parameter from flash
1136  * @hdev: pointer to struct hclge_dev
1137  * @hcfg: the config structure to be getted
1138  */
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1140 {
1141         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142         struct hclge_cfg_param_cmd *req;
1143         int i, ret;
1144
1145         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1146                 u32 offset = 0;
1147
1148                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1150                                            true);
1151                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153                 /* Len should be united by 4 bytes when send to hardware */
1154                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156                 req->offset = cpu_to_le32(offset);
1157         }
1158
1159         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1160         if (ret) {
1161                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1162                 return ret;
1163         }
1164
1165         hclge_parse_cfg(hcfg, desc);
1166
1167         return 0;
1168 }
1169
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1171 {
1172         int ret;
1173
1174         ret = hclge_query_function_status(hdev);
1175         if (ret) {
1176                 dev_err(&hdev->pdev->dev,
1177                         "query function status error %d.\n", ret);
1178                 return ret;
1179         }
1180
1181         /* get pf resource */
1182         ret = hclge_query_pf_resource(hdev);
1183         if (ret)
1184                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1185
1186         return ret;
1187 }
1188
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1190 {
1191 #define HCLGE_MIN_TX_DESC       64
1192 #define HCLGE_MIN_RX_DESC       64
1193
1194         if (!is_kdump_kernel())
1195                 return;
1196
1197         dev_info(&hdev->pdev->dev,
1198                  "Running kdump kernel. Using minimal resources\n");
1199
1200         /* minimal queue pairs equals to the number of vports */
1201         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1204 }
1205
1206 static int hclge_configure(struct hclge_dev *hdev)
1207 {
1208         struct hclge_cfg cfg;
1209         int ret, i;
1210
1211         ret = hclge_get_cfg(hdev, &cfg);
1212         if (ret) {
1213                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1214                 return ret;
1215         }
1216
1217         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218         hdev->base_tqp_pid = 0;
1219         hdev->rss_size_max = cfg.rss_size_max;
1220         hdev->rx_buf_len = cfg.rx_buf_len;
1221         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222         hdev->hw.mac.media_type = cfg.media_type;
1223         hdev->hw.mac.phy_addr = cfg.phy_addr;
1224         hdev->num_tx_desc = cfg.tqp_desc_num;
1225         hdev->num_rx_desc = cfg.tqp_desc_num;
1226         hdev->tm_info.num_pg = 1;
1227         hdev->tc_max = cfg.tc_num;
1228         hdev->tm_info.hw_pfc_map = 0;
1229         hdev->wanted_umv_size = cfg.umv_space;
1230
1231         if (hnae3_dev_fd_supported(hdev)) {
1232                 hdev->fd_en = true;
1233                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1234         }
1235
1236         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1237         if (ret) {
1238                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1239                 return ret;
1240         }
1241
1242         hclge_parse_link_mode(hdev, cfg.speed_ability);
1243
1244         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245             (hdev->tc_max < 1)) {
1246                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247                          hdev->tc_max);
1248                 hdev->tc_max = 1;
1249         }
1250
1251         /* Dev does not support DCB */
1252         if (!hnae3_dev_dcb_supported(hdev)) {
1253                 hdev->tc_max = 1;
1254                 hdev->pfc_max = 0;
1255         } else {
1256                 hdev->pfc_max = hdev->tc_max;
1257         }
1258
1259         hdev->tm_info.num_tc = 1;
1260
1261         /* Currently not support uncontiuous tc */
1262         for (i = 0; i < hdev->tm_info.num_tc; i++)
1263                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1264
1265         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1266
1267         hclge_init_kdump_kernel_config(hdev);
1268
1269         return ret;
1270 }
1271
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1273                             int tso_mss_max)
1274 {
1275         struct hclge_cfg_tso_status_cmd *req;
1276         struct hclge_desc desc;
1277         u16 tso_mss;
1278
1279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1280
1281         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1282
1283         tso_mss = 0;
1284         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286         req->tso_mss_min = cpu_to_le16(tso_mss);
1287
1288         tso_mss = 0;
1289         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291         req->tso_mss_max = cpu_to_le16(tso_mss);
1292
1293         return hclge_cmd_send(&hdev->hw, &desc, 1);
1294 }
1295
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1297 {
1298         struct hclge_cfg_gro_status_cmd *req;
1299         struct hclge_desc desc;
1300         int ret;
1301
1302         if (!hnae3_dev_gro_supported(hdev))
1303                 return 0;
1304
1305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1307
1308         req->gro_en = cpu_to_le16(en ? 1 : 0);
1309
1310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1311         if (ret)
1312                 dev_err(&hdev->pdev->dev,
1313                         "GRO hardware config cmd failed, ret = %d\n", ret);
1314
1315         return ret;
1316 }
1317
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1319 {
1320         struct hclge_tqp *tqp;
1321         int i;
1322
1323         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1325         if (!hdev->htqp)
1326                 return -ENOMEM;
1327
1328         tqp = hdev->htqp;
1329
1330         for (i = 0; i < hdev->num_tqps; i++) {
1331                 tqp->dev = &hdev->pdev->dev;
1332                 tqp->index = i;
1333
1334                 tqp->q.ae_algo = &ae_algo;
1335                 tqp->q.buf_size = hdev->rx_buf_len;
1336                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339                         i * HCLGE_TQP_REG_SIZE;
1340
1341                 tqp++;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1349 {
1350         struct hclge_tqp_map_cmd *req;
1351         struct hclge_desc desc;
1352         int ret;
1353
1354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1355
1356         req = (struct hclge_tqp_map_cmd *)desc.data;
1357         req->tqp_id = cpu_to_le16(tqp_pid);
1358         req->tqp_vf = func_id;
1359         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360                         1 << HCLGE_TQP_MAP_EN_B;
1361         req->tqp_vid = cpu_to_le16(tqp_vid);
1362
1363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1364         if (ret)
1365                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366
1367         return ret;
1368 }
1369
1370 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1371 {
1372         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373         struct hclge_dev *hdev = vport->back;
1374         int i, alloced;
1375
1376         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377              alloced < num_tqps; i++) {
1378                 if (!hdev->htqp[i].alloced) {
1379                         hdev->htqp[i].q.handle = &vport->nic;
1380                         hdev->htqp[i].q.tqp_index = alloced;
1381                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384                         hdev->htqp[i].alloced = true;
1385                         alloced++;
1386                 }
1387         }
1388         vport->alloc_tqps = alloced;
1389         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1391
1392         return 0;
1393 }
1394
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396                             u16 num_tx_desc, u16 num_rx_desc)
1397
1398 {
1399         struct hnae3_handle *nic = &vport->nic;
1400         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401         struct hclge_dev *hdev = vport->back;
1402         int ret;
1403
1404         kinfo->num_tx_desc = num_tx_desc;
1405         kinfo->num_rx_desc = num_rx_desc;
1406
1407         kinfo->rx_buf_len = hdev->rx_buf_len;
1408
1409         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1411         if (!kinfo->tqp)
1412                 return -ENOMEM;
1413
1414         ret = hclge_assign_tqp(vport, num_tqps);
1415         if (ret)
1416                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417
1418         return ret;
1419 }
1420
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422                                   struct hclge_vport *vport)
1423 {
1424         struct hnae3_handle *nic = &vport->nic;
1425         struct hnae3_knic_private_info *kinfo;
1426         u16 i;
1427
1428         kinfo = &nic->kinfo;
1429         for (i = 0; i < vport->alloc_tqps; i++) {
1430                 struct hclge_tqp *q =
1431                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1432                 bool is_pf;
1433                 int ret;
1434
1435                 is_pf = !(vport->vport_id);
1436                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1437                                              i, is_pf);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1446 {
1447         struct hclge_vport *vport = hdev->vport;
1448         u16 i, num_vport;
1449
1450         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451         for (i = 0; i < num_vport; i++) {
1452                 int ret;
1453
1454                 ret = hclge_map_tqp_to_vport(hdev, vport);
1455                 if (ret)
1456                         return ret;
1457
1458                 vport++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1465 {
1466         /* this would be initialized later */
1467 }
1468
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1470 {
1471         struct hnae3_handle *nic = &vport->nic;
1472         struct hclge_dev *hdev = vport->back;
1473         int ret;
1474
1475         nic->pdev = hdev->pdev;
1476         nic->ae_algo = &ae_algo;
1477         nic->numa_node_mask = hdev->numa_node_mask;
1478
1479         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480                 ret = hclge_knic_setup(vport, num_tqps,
1481                                        hdev->num_tx_desc, hdev->num_rx_desc);
1482
1483                 if (ret) {
1484                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1485                                 ret);
1486                         return ret;
1487                 }
1488         } else {
1489                 hclge_unic_setup(vport, num_tqps);
1490         }
1491
1492         return 0;
1493 }
1494
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1496 {
1497         struct pci_dev *pdev = hdev->pdev;
1498         struct hclge_vport *vport;
1499         u32 tqp_main_vport;
1500         u32 tqp_per_vport;
1501         int num_vport, i;
1502         int ret;
1503
1504         /* We need to alloc a vport for main NIC of PF */
1505         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1506
1507         if (hdev->num_tqps < num_vport) {
1508                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509                         hdev->num_tqps, num_vport);
1510                 return -EINVAL;
1511         }
1512
1513         /* Alloc the same number of TQPs for every vport */
1514         tqp_per_vport = hdev->num_tqps / num_vport;
1515         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1516
1517         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1518                              GFP_KERNEL);
1519         if (!vport)
1520                 return -ENOMEM;
1521
1522         hdev->vport = vport;
1523         hdev->num_alloc_vport = num_vport;
1524
1525         if (IS_ENABLED(CONFIG_PCI_IOV))
1526                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1527
1528         for (i = 0; i < num_vport; i++) {
1529                 vport->back = hdev;
1530                 vport->vport_id = i;
1531                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534                 INIT_LIST_HEAD(&vport->vlan_list);
1535                 INIT_LIST_HEAD(&vport->uc_mac_list);
1536                 INIT_LIST_HEAD(&vport->mc_mac_list);
1537
1538                 if (i == 0)
1539                         ret = hclge_vport_setup(vport, tqp_main_vport);
1540                 else
1541                         ret = hclge_vport_setup(vport, tqp_per_vport);
1542                 if (ret) {
1543                         dev_err(&pdev->dev,
1544                                 "vport setup failed for vport %d, %d\n",
1545                                 i, ret);
1546                         return ret;
1547                 }
1548
1549                 vport++;
1550         }
1551
1552         return 0;
1553 }
1554
1555 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556                                     struct hclge_pkt_buf_alloc *buf_alloc)
1557 {
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1561         struct hclge_tx_buff_alloc_cmd *req;
1562         struct hclge_desc desc;
1563         int ret;
1564         u8 i;
1565
1566         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1567
1568         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1571
1572                 req->tx_pkt_buff[i] =
1573                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1575         }
1576
1577         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1578         if (ret)
1579                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1580                         ret);
1581
1582         return ret;
1583 }
1584
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586                                  struct hclge_pkt_buf_alloc *buf_alloc)
1587 {
1588         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1589
1590         if (ret)
1591                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1592
1593         return ret;
1594 }
1595
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1597 {
1598         int i, cnt = 0;
1599
1600         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601                 if (hdev->hw_tc_map & BIT(i))
1602                         cnt++;
1603         return cnt;
1604 }
1605
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608                                   struct hclge_pkt_buf_alloc *buf_alloc)
1609 {
1610         struct hclge_priv_buf *priv;
1611         int i, cnt = 0;
1612
1613         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614                 priv = &buf_alloc->priv_buf[i];
1615                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616                     priv->enable)
1617                         cnt++;
1618         }
1619
1620         return cnt;
1621 }
1622
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625                                      struct hclge_pkt_buf_alloc *buf_alloc)
1626 {
1627         struct hclge_priv_buf *priv;
1628         int i, cnt = 0;
1629
1630         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631                 priv = &buf_alloc->priv_buf[i];
1632                 if (hdev->hw_tc_map & BIT(i) &&
1633                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1634                     priv->enable)
1635                         cnt++;
1636         }
1637
1638         return cnt;
1639 }
1640
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1642 {
1643         struct hclge_priv_buf *priv;
1644         u32 rx_priv = 0;
1645         int i;
1646
1647         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648                 priv = &buf_alloc->priv_buf[i];
1649                 if (priv->enable)
1650                         rx_priv += priv->buf_size;
1651         }
1652         return rx_priv;
1653 }
1654
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1656 {
1657         u32 i, total_tx_size = 0;
1658
1659         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1661
1662         return total_tx_size;
1663 }
1664
1665 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666                                 struct hclge_pkt_buf_alloc *buf_alloc,
1667                                 u32 rx_all)
1668 {
1669         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670         u32 tc_num = hclge_get_tc_num(hdev);
1671         u32 shared_buf, aligned_mps;
1672         u32 rx_priv;
1673         int i;
1674
1675         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1676
1677         if (hnae3_dev_dcb_supported(hdev))
1678                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1679         else
1680                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681                                         + hdev->dv_buf_size;
1682
1683         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685                              HCLGE_BUF_SIZE_UNIT);
1686
1687         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688         if (rx_all < rx_priv + shared_std)
1689                 return false;
1690
1691         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692         buf_alloc->s_buf.buf_size = shared_buf;
1693         if (hnae3_dev_dcb_supported(hdev)) {
1694                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1697         } else {
1698                 buf_alloc->s_buf.self.high = aligned_mps +
1699                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700                 buf_alloc->s_buf.self.low = aligned_mps;
1701         }
1702
1703         if (hnae3_dev_dcb_supported(hdev)) {
1704                 if (tc_num)
1705                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1706                 else
1707                         hi_thrd = shared_buf - hdev->dv_buf_size;
1708
1709                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711                 lo_thrd = hi_thrd - aligned_mps / 2;
1712         } else {
1713                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714                 lo_thrd = aligned_mps;
1715         }
1716
1717         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720         }
1721
1722         return true;
1723 }
1724
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726                                 struct hclge_pkt_buf_alloc *buf_alloc)
1727 {
1728         u32 i, total_size;
1729
1730         total_size = hdev->pkt_buf_size;
1731
1732         /* alloc tx buffer for all enabled tc */
1733         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1735
1736                 if (hdev->hw_tc_map & BIT(i)) {
1737                         if (total_size < hdev->tx_buf_size)
1738                                 return -ENOMEM;
1739
1740                         priv->tx_buf_size = hdev->tx_buf_size;
1741                 } else {
1742                         priv->tx_buf_size = 0;
1743                 }
1744
1745                 total_size -= priv->tx_buf_size;
1746         }
1747
1748         return 0;
1749 }
1750
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752                                   struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1756         int i;
1757
1758         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1760
1761                 priv->enable = 0;
1762                 priv->wl.low = 0;
1763                 priv->wl.high = 0;
1764                 priv->buf_size = 0;
1765
1766                 if (!(hdev->hw_tc_map & BIT(i)))
1767                         continue;
1768
1769                 priv->enable = 1;
1770
1771                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772                         priv->wl.low = max ? aligned_mps : 256;
1773                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774                                                 HCLGE_BUF_SIZE_UNIT);
1775                 } else {
1776                         priv->wl.low = 0;
1777                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1778                 }
1779
1780                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1781         }
1782
1783         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1784 }
1785
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787                                           struct hclge_pkt_buf_alloc *buf_alloc)
1788 {
1789         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1791         int i;
1792
1793         /* let the last to be cleared first */
1794         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1796
1797                 if (hdev->hw_tc_map & BIT(i) &&
1798                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799                         /* Clear the no pfc TC private buffer */
1800                         priv->wl.low = 0;
1801                         priv->wl.high = 0;
1802                         priv->buf_size = 0;
1803                         priv->enable = 0;
1804                         no_pfc_priv_num--;
1805                 }
1806
1807                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808                     no_pfc_priv_num == 0)
1809                         break;
1810         }
1811
1812         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1813 }
1814
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816                                         struct hclge_pkt_buf_alloc *buf_alloc)
1817 {
1818         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1820         int i;
1821
1822         /* let the last to be cleared first */
1823         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1825
1826                 if (hdev->hw_tc_map & BIT(i) &&
1827                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1828                         /* Reduce the number of pfc TC with private buffer */
1829                         priv->wl.low = 0;
1830                         priv->enable = 0;
1831                         priv->wl.high = 0;
1832                         priv->buf_size = 0;
1833                         pfc_priv_num--;
1834                 }
1835
1836                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1837                     pfc_priv_num == 0)
1838                         break;
1839         }
1840
1841         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1842 }
1843
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845  * @hdev: pointer to struct hclge_dev
1846  * @buf_alloc: pointer to buffer calculation data
1847  * @return: 0: calculate sucessful, negative: fail
1848  */
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850                                 struct hclge_pkt_buf_alloc *buf_alloc)
1851 {
1852         /* When DCB is not supported, rx private buffer is not allocated. */
1853         if (!hnae3_dev_dcb_supported(hdev)) {
1854                 u32 rx_all = hdev->pkt_buf_size;
1855
1856                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1858                         return -ENOMEM;
1859
1860                 return 0;
1861         }
1862
1863         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1864                 return 0;
1865
1866         /* try to decrease the buffer size */
1867         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1868                 return 0;
1869
1870         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1871                 return 0;
1872
1873         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1874                 return 0;
1875
1876         return -ENOMEM;
1877 }
1878
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880                                    struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882         struct hclge_rx_priv_buff_cmd *req;
1883         struct hclge_desc desc;
1884         int ret;
1885         int i;
1886
1887         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1889
1890         /* Alloc private buffer TCs */
1891         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1893
1894                 req->buf_num[i] =
1895                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1896                 req->buf_num[i] |=
1897                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1898         }
1899
1900         req->shared_buf =
1901                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1903
1904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1905         if (ret)
1906                 dev_err(&hdev->pdev->dev,
1907                         "rx private buffer alloc cmd failed %d\n", ret);
1908
1909         return ret;
1910 }
1911
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913                                    struct hclge_pkt_buf_alloc *buf_alloc)
1914 {
1915         struct hclge_rx_priv_wl_buf *req;
1916         struct hclge_priv_buf *priv;
1917         struct hclge_desc desc[2];
1918         int i, j;
1919         int ret;
1920
1921         for (i = 0; i < 2; i++) {
1922                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1923                                            false);
1924                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1925
1926                 /* The first descriptor set the NEXT bit to 1 */
1927                 if (i == 0)
1928                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1929                 else
1930                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1931
1932                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1934
1935                         priv = &buf_alloc->priv_buf[idx];
1936                         req->tc_wl[j].high =
1937                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938                         req->tc_wl[j].high |=
1939                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1940                         req->tc_wl[j].low =
1941                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942                         req->tc_wl[j].low |=
1943                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1944                 }
1945         }
1946
1947         /* Send 2 descriptor at one time */
1948         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1949         if (ret)
1950                 dev_err(&hdev->pdev->dev,
1951                         "rx private waterline config cmd failed %d\n",
1952                         ret);
1953         return ret;
1954 }
1955
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957                                     struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960         struct hclge_rx_com_thrd *req;
1961         struct hclge_desc desc[2];
1962         struct hclge_tc_thrd *tc;
1963         int i, j;
1964         int ret;
1965
1966         for (i = 0; i < 2; i++) {
1967                 hclge_cmd_setup_basic_desc(&desc[i],
1968                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1970
1971                 /* The first descriptor set the NEXT bit to 1 */
1972                 if (i == 0)
1973                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1974                 else
1975                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1976
1977                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1979
1980                         req->com_thrd[j].high =
1981                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982                         req->com_thrd[j].high |=
1983                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984                         req->com_thrd[j].low =
1985                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986                         req->com_thrd[j].low |=
1987                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1988                 }
1989         }
1990
1991         /* Send 2 descriptors at one time */
1992         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1993         if (ret)
1994                 dev_err(&hdev->pdev->dev,
1995                         "common threshold config cmd failed %d\n", ret);
1996         return ret;
1997 }
1998
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000                                   struct hclge_pkt_buf_alloc *buf_alloc)
2001 {
2002         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003         struct hclge_rx_com_wl *req;
2004         struct hclge_desc desc;
2005         int ret;
2006
2007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2008
2009         req = (struct hclge_rx_com_wl *)desc.data;
2010         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2012
2013         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2015
2016         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2017         if (ret)
2018                 dev_err(&hdev->pdev->dev,
2019                         "common waterline config cmd failed %d\n", ret);
2020
2021         return ret;
2022 }
2023
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2025 {
2026         struct hclge_pkt_buf_alloc *pkt_buf;
2027         int ret;
2028
2029         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2030         if (!pkt_buf)
2031                 return -ENOMEM;
2032
2033         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2034         if (ret) {
2035                 dev_err(&hdev->pdev->dev,
2036                         "could not calc tx buffer size for all TCs %d\n", ret);
2037                 goto out;
2038         }
2039
2040         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2041         if (ret) {
2042                 dev_err(&hdev->pdev->dev,
2043                         "could not alloc tx buffers %d\n", ret);
2044                 goto out;
2045         }
2046
2047         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2048         if (ret) {
2049                 dev_err(&hdev->pdev->dev,
2050                         "could not calc rx priv buffer size for all TCs %d\n",
2051                         ret);
2052                 goto out;
2053         }
2054
2055         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2056         if (ret) {
2057                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2058                         ret);
2059                 goto out;
2060         }
2061
2062         if (hnae3_dev_dcb_supported(hdev)) {
2063                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2064                 if (ret) {
2065                         dev_err(&hdev->pdev->dev,
2066                                 "could not configure rx private waterline %d\n",
2067                                 ret);
2068                         goto out;
2069                 }
2070
2071                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2072                 if (ret) {
2073                         dev_err(&hdev->pdev->dev,
2074                                 "could not configure common threshold %d\n",
2075                                 ret);
2076                         goto out;
2077                 }
2078         }
2079
2080         ret = hclge_common_wl_config(hdev, pkt_buf);
2081         if (ret)
2082                 dev_err(&hdev->pdev->dev,
2083                         "could not configure common waterline %d\n", ret);
2084
2085 out:
2086         kfree(pkt_buf);
2087         return ret;
2088 }
2089
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2091 {
2092         struct hnae3_handle *roce = &vport->roce;
2093         struct hnae3_handle *nic = &vport->nic;
2094
2095         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2096
2097         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098             vport->back->num_msi_left == 0)
2099                 return -EINVAL;
2100
2101         roce->rinfo.base_vector = vport->back->roce_base_vector;
2102
2103         roce->rinfo.netdev = nic->kinfo.netdev;
2104         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2105
2106         roce->pdev = nic->pdev;
2107         roce->ae_algo = nic->ae_algo;
2108         roce->numa_node_mask = nic->numa_node_mask;
2109
2110         return 0;
2111 }
2112
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2114 {
2115         struct pci_dev *pdev = hdev->pdev;
2116         int vectors;
2117         int i;
2118
2119         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2121         if (vectors < 0) {
2122                 dev_err(&pdev->dev,
2123                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2124                         vectors);
2125                 return vectors;
2126         }
2127         if (vectors < hdev->num_msi)
2128                 dev_warn(&hdev->pdev->dev,
2129                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130                          hdev->num_msi, vectors);
2131
2132         hdev->num_msi = vectors;
2133         hdev->num_msi_left = vectors;
2134         hdev->base_msi_vector = pdev->irq;
2135         hdev->roce_base_vector = hdev->base_msi_vector +
2136                                 hdev->roce_base_msix_offset;
2137
2138         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139                                            sizeof(u16), GFP_KERNEL);
2140         if (!hdev->vector_status) {
2141                 pci_free_irq_vectors(pdev);
2142                 return -ENOMEM;
2143         }
2144
2145         for (i = 0; i < hdev->num_msi; i++)
2146                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2147
2148         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149                                         sizeof(int), GFP_KERNEL);
2150         if (!hdev->vector_irq) {
2151                 pci_free_irq_vectors(pdev);
2152                 return -ENOMEM;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2159 {
2160
2161         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162                 duplex = HCLGE_MAC_FULL;
2163
2164         return duplex;
2165 }
2166
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2168                                       u8 duplex)
2169 {
2170         struct hclge_config_mac_speed_dup_cmd *req;
2171         struct hclge_desc desc;
2172         int ret;
2173
2174         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2175
2176         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2177
2178         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2179
2180         switch (speed) {
2181         case HCLGE_MAC_SPEED_10M:
2182                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183                                 HCLGE_CFG_SPEED_S, 6);
2184                 break;
2185         case HCLGE_MAC_SPEED_100M:
2186                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187                                 HCLGE_CFG_SPEED_S, 7);
2188                 break;
2189         case HCLGE_MAC_SPEED_1G:
2190                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191                                 HCLGE_CFG_SPEED_S, 0);
2192                 break;
2193         case HCLGE_MAC_SPEED_10G:
2194                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195                                 HCLGE_CFG_SPEED_S, 1);
2196                 break;
2197         case HCLGE_MAC_SPEED_25G:
2198                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199                                 HCLGE_CFG_SPEED_S, 2);
2200                 break;
2201         case HCLGE_MAC_SPEED_40G:
2202                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203                                 HCLGE_CFG_SPEED_S, 3);
2204                 break;
2205         case HCLGE_MAC_SPEED_50G:
2206                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207                                 HCLGE_CFG_SPEED_S, 4);
2208                 break;
2209         case HCLGE_MAC_SPEED_100G:
2210                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211                                 HCLGE_CFG_SPEED_S, 5);
2212                 break;
2213         default:
2214                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2215                 return -EINVAL;
2216         }
2217
2218         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2219                       1);
2220
2221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222         if (ret) {
2223                 dev_err(&hdev->pdev->dev,
2224                         "mac speed/duplex config cmd failed %d.\n", ret);
2225                 return ret;
2226         }
2227
2228         return 0;
2229 }
2230
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2232 {
2233         int ret;
2234
2235         duplex = hclge_check_speed_dup(duplex, speed);
2236         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2237                 return 0;
2238
2239         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2240         if (ret)
2241                 return ret;
2242
2243         hdev->hw.mac.speed = speed;
2244         hdev->hw.mac.duplex = duplex;
2245
2246         return 0;
2247 }
2248
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2250                                      u8 duplex)
2251 {
2252         struct hclge_vport *vport = hclge_get_vport(handle);
2253         struct hclge_dev *hdev = vport->back;
2254
2255         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2256 }
2257
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2259 {
2260         struct hclge_config_auto_neg_cmd *req;
2261         struct hclge_desc desc;
2262         u32 flag = 0;
2263         int ret;
2264
2265         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2266
2267         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2270
2271         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2272         if (ret)
2273                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2274                         ret);
2275
2276         return ret;
2277 }
2278
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2280 {
2281         struct hclge_vport *vport = hclge_get_vport(handle);
2282         struct hclge_dev *hdev = vport->back;
2283
2284         if (!hdev->hw.mac.support_autoneg) {
2285                 if (enable) {
2286                         dev_err(&hdev->pdev->dev,
2287                                 "autoneg is not supported by current port\n");
2288                         return -EOPNOTSUPP;
2289                 } else {
2290                         return 0;
2291                 }
2292         }
2293
2294         return hclge_set_autoneg_en(hdev, enable);
2295 }
2296
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2298 {
2299         struct hclge_vport *vport = hclge_get_vport(handle);
2300         struct hclge_dev *hdev = vport->back;
2301         struct phy_device *phydev = hdev->hw.mac.phydev;
2302
2303         if (phydev)
2304                 return phydev->autoneg;
2305
2306         return hdev->hw.mac.autoneg;
2307 }
2308
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2310 {
2311         struct hclge_vport *vport = hclge_get_vport(handle);
2312         struct hclge_dev *hdev = vport->back;
2313         int ret;
2314
2315         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2316
2317         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2318         if (ret)
2319                 return ret;
2320         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2321 }
2322
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2324 {
2325         struct hclge_config_fec_cmd *req;
2326         struct hclge_desc desc;
2327         int ret;
2328
2329         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2330
2331         req = (struct hclge_config_fec_cmd *)desc.data;
2332         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334         if (fec_mode & BIT(HNAE3_FEC_RS))
2335                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337         if (fec_mode & BIT(HNAE3_FEC_BASER))
2338                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2340
2341         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2342         if (ret)
2343                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2344
2345         return ret;
2346 }
2347
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2349 {
2350         struct hclge_vport *vport = hclge_get_vport(handle);
2351         struct hclge_dev *hdev = vport->back;
2352         struct hclge_mac *mac = &hdev->hw.mac;
2353         int ret;
2354
2355         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2357                 return -EINVAL;
2358         }
2359
2360         ret = hclge_set_fec_hw(hdev, fec_mode);
2361         if (ret)
2362                 return ret;
2363
2364         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2365         return 0;
2366 }
2367
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2369                           u8 *fec_mode)
2370 {
2371         struct hclge_vport *vport = hclge_get_vport(handle);
2372         struct hclge_dev *hdev = vport->back;
2373         struct hclge_mac *mac = &hdev->hw.mac;
2374
2375         if (fec_ability)
2376                 *fec_ability = mac->fec_ability;
2377         if (fec_mode)
2378                 *fec_mode = mac->fec_mode;
2379 }
2380
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2382 {
2383         struct hclge_mac *mac = &hdev->hw.mac;
2384         int ret;
2385
2386         hdev->support_sfp_query = true;
2387         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389                                          hdev->hw.mac.duplex);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "Config mac speed dup fail ret=%d\n", ret);
2393                 return ret;
2394         }
2395
2396         mac->link = 0;
2397
2398         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2400                 if (ret) {
2401                         dev_err(&hdev->pdev->dev,
2402                                 "Fec mode init fail, ret = %d\n", ret);
2403                         return ret;
2404                 }
2405         }
2406
2407         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2408         if (ret) {
2409                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2410                 return ret;
2411         }
2412
2413         ret = hclge_buffer_alloc(hdev);
2414         if (ret)
2415                 dev_err(&hdev->pdev->dev,
2416                         "allocate buffer fail, ret=%d\n", ret);
2417
2418         return ret;
2419 }
2420
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2422 {
2423         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425                 schedule_work(&hdev->mbx_service_task);
2426 }
2427
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2429 {
2430         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431                 schedule_work(&hdev->rst_service_task);
2432 }
2433
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2435 {
2436         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439                 (void)schedule_work(&hdev->service_task);
2440 }
2441
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2443 {
2444         struct hclge_link_status_cmd *req;
2445         struct hclge_desc desc;
2446         int link_status;
2447         int ret;
2448
2449         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2451         if (ret) {
2452                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2453                         ret);
2454                 return ret;
2455         }
2456
2457         req = (struct hclge_link_status_cmd *)desc.data;
2458         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2459
2460         return !!link_status;
2461 }
2462
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2464 {
2465         int mac_state;
2466         int link_stat;
2467
2468         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2469                 return 0;
2470
2471         mac_state = hclge_get_mac_link_status(hdev);
2472
2473         if (hdev->hw.mac.phydev) {
2474                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475                         link_stat = mac_state &
2476                                 hdev->hw.mac.phydev->link;
2477                 else
2478                         link_stat = 0;
2479
2480         } else {
2481                 link_stat = mac_state;
2482         }
2483
2484         return !!link_stat;
2485 }
2486
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2488 {
2489         struct hnae3_client *rclient = hdev->roce_client;
2490         struct hnae3_client *client = hdev->nic_client;
2491         struct hnae3_handle *rhandle;
2492         struct hnae3_handle *handle;
2493         int state;
2494         int i;
2495
2496         if (!client)
2497                 return;
2498         state = hclge_get_mac_phy_link(hdev);
2499         if (state != hdev->hw.mac.link) {
2500                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501                         handle = &hdev->vport[i].nic;
2502                         client->ops->link_status_change(handle, state);
2503                         hclge_config_mac_tnl_int(hdev, state);
2504                         rhandle = &hdev->vport[i].roce;
2505                         if (rclient && rclient->ops->link_status_change)
2506                                 rclient->ops->link_status_change(rhandle,
2507                                                                  state);
2508                 }
2509                 hdev->hw.mac.link = state;
2510         }
2511 }
2512
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2514 {
2515         /* update fec ability by speed */
2516         hclge_convert_setting_fec(mac);
2517
2518         /* firmware can not identify back plane type, the media type
2519          * read from configuration can help deal it
2520          */
2521         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2526
2527         if (mac->support_autoneg == true) {
2528                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529                 linkmode_copy(mac->advertising, mac->supported);
2530         } else {
2531                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2532                                    mac->supported);
2533                 linkmode_zero(mac->advertising);
2534         }
2535 }
2536
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2538 {
2539         struct hclge_sfp_info_cmd *resp = NULL;
2540         struct hclge_desc desc;
2541         int ret;
2542
2543         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544         resp = (struct hclge_sfp_info_cmd *)desc.data;
2545         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546         if (ret == -EOPNOTSUPP) {
2547                 dev_warn(&hdev->pdev->dev,
2548                          "IMP do not support get SFP speed %d\n", ret);
2549                 return ret;
2550         } else if (ret) {
2551                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2552                 return ret;
2553         }
2554
2555         *speed = le32_to_cpu(resp->speed);
2556
2557         return 0;
2558 }
2559
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2561 {
2562         struct hclge_sfp_info_cmd *resp;
2563         struct hclge_desc desc;
2564         int ret;
2565
2566         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567         resp = (struct hclge_sfp_info_cmd *)desc.data;
2568
2569         resp->query_type = QUERY_ACTIVE_SPEED;
2570
2571         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572         if (ret == -EOPNOTSUPP) {
2573                 dev_warn(&hdev->pdev->dev,
2574                          "IMP does not support get SFP info %d\n", ret);
2575                 return ret;
2576         } else if (ret) {
2577                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2578                 return ret;
2579         }
2580
2581         mac->speed = le32_to_cpu(resp->speed);
2582         /* if resp->speed_ability is 0, it means it's an old version
2583          * firmware, do not update these params
2584          */
2585         if (resp->speed_ability) {
2586                 mac->module_type = le32_to_cpu(resp->module_type);
2587                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588                 mac->autoneg = resp->autoneg;
2589                 mac->support_autoneg = resp->autoneg_ability;
2590                 if (!resp->active_fec)
2591                         mac->fec_mode = 0;
2592                 else
2593                         mac->fec_mode = BIT(resp->active_fec);
2594         } else {
2595                 mac->speed_type = QUERY_SFP_SPEED;
2596         }
2597
2598         return 0;
2599 }
2600
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2602 {
2603         struct hclge_mac *mac = &hdev->hw.mac;
2604         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2605         int ret;
2606
2607         /* get the port info from SFP cmd if not copper port */
2608         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2609                 return 0;
2610
2611         /* if IMP does not support get SFP/qSFP info, return directly */
2612         if (!hdev->support_sfp_query)
2613                 return 0;
2614
2615         if (hdev->pdev->revision >= 0x21)
2616                 ret = hclge_get_sfp_info(hdev, mac);
2617         else
2618                 ret = hclge_get_sfp_speed(hdev, &speed);
2619
2620         if (ret == -EOPNOTSUPP) {
2621                 hdev->support_sfp_query = false;
2622                 return ret;
2623         } else if (ret) {
2624                 return ret;
2625         }
2626
2627         if (hdev->pdev->revision >= 0x21) {
2628                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629                         hclge_update_port_capability(mac);
2630                         return 0;
2631                 }
2632                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2633                                                HCLGE_MAC_FULL);
2634         } else {
2635                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636                         return 0; /* do nothing if no SFP */
2637
2638                 /* must config full duplex for SFP */
2639                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2640         }
2641 }
2642
2643 static int hclge_get_status(struct hnae3_handle *handle)
2644 {
2645         struct hclge_vport *vport = hclge_get_vport(handle);
2646         struct hclge_dev *hdev = vport->back;
2647
2648         hclge_update_link_status(hdev);
2649
2650         return hdev->hw.mac.link;
2651 }
2652
2653 static void hclge_service_timer(struct timer_list *t)
2654 {
2655         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2656
2657         mod_timer(&hdev->service_timer, jiffies + HZ);
2658         hdev->hw_stats.stats_timer++;
2659         hdev->fd_arfs_expire_timer++;
2660         hclge_task_schedule(hdev);
2661 }
2662
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2664 {
2665         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2666
2667         /* Flush memory before next watchdog */
2668         smp_mb__before_atomic();
2669         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2670 }
2671
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2673 {
2674         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2675
2676         /* fetch the events from their corresponding regs */
2677         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679         msix_src_reg = hclge_read_dev(&hdev->hw,
2680                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2681
2682         /* Assumption: If by any chance reset and mailbox events are reported
2683          * together then we will only process reset event in this go and will
2684          * defer the processing of the mailbox events. Since, we would have not
2685          * cleared RX CMDQ event this time we would receive again another
2686          * interrupt from H/W just for the mailbox.
2687          */
2688
2689         /* check for vector0 reset event sources */
2690         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695                 hdev->rst_stats.imp_rst_cnt++;
2696                 return HCLGE_VECTOR0_EVENT_RST;
2697         }
2698
2699         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704                 hdev->rst_stats.global_rst_cnt++;
2705                 return HCLGE_VECTOR0_EVENT_RST;
2706         }
2707
2708         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713                 hdev->rst_stats.core_rst_cnt++;
2714                 return HCLGE_VECTOR0_EVENT_RST;
2715         }
2716
2717         /* check for vector0 msix event source */
2718         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2720                         msix_src_reg);
2721                 return HCLGE_VECTOR0_EVENT_ERR;
2722         }
2723
2724         /* check for vector0 mailbox(=CMDQ RX) event source */
2725         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727                 *clearval = cmdq_src_reg;
2728                 return HCLGE_VECTOR0_EVENT_MBX;
2729         }
2730
2731         /* print other vector0 event source */
2732         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733                 cmdq_src_reg, msix_src_reg);
2734         return HCLGE_VECTOR0_EVENT_OTHER;
2735 }
2736
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2738                                     u32 regclr)
2739 {
2740         switch (event_type) {
2741         case HCLGE_VECTOR0_EVENT_RST:
2742                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2743                 break;
2744         case HCLGE_VECTOR0_EVENT_MBX:
2745                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2746                 break;
2747         default:
2748                 break;
2749         }
2750 }
2751
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2753 {
2754         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2759 }
2760
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2762 {
2763         writel(enable ? 1 : 0, vector->addr);
2764 }
2765
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2767 {
2768         struct hclge_dev *hdev = data;
2769         u32 event_cause;
2770         u32 clearval;
2771
2772         hclge_enable_vector(&hdev->misc_vector, false);
2773         event_cause = hclge_check_event_cause(hdev, &clearval);
2774
2775         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776         switch (event_cause) {
2777         case HCLGE_VECTOR0_EVENT_ERR:
2778                 /* we do not know what type of reset is required now. This could
2779                  * only be decided after we fetch the type of errors which
2780                  * caused this event. Therefore, we will do below for now:
2781                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782                  *    have defered type of reset to be used.
2783                  * 2. Schedule the reset serivce task.
2784                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2785                  *    will fetch the correct type of reset.  This would be done
2786                  *    by first decoding the types of errors.
2787                  */
2788                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2789                 /* fall through */
2790         case HCLGE_VECTOR0_EVENT_RST:
2791                 hclge_reset_task_schedule(hdev);
2792                 break;
2793         case HCLGE_VECTOR0_EVENT_MBX:
2794                 /* If we are here then,
2795                  * 1. Either we are not handling any mbx task and we are not
2796                  *    scheduled as well
2797                  *                        OR
2798                  * 2. We could be handling a mbx task but nothing more is
2799                  *    scheduled.
2800                  * In both cases, we should schedule mbx task as there are more
2801                  * mbx messages reported by this interrupt.
2802                  */
2803                 hclge_mbx_task_schedule(hdev);
2804                 break;
2805         default:
2806                 dev_warn(&hdev->pdev->dev,
2807                          "received unknown or unhandled event of vector0\n");
2808                 break;
2809         }
2810
2811         /* clear the source of interrupt if it is not cause by reset */
2812         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813                 hclge_clear_event_cause(hdev, event_cause, clearval);
2814                 hclge_enable_vector(&hdev->misc_vector, true);
2815         }
2816
2817         return IRQ_HANDLED;
2818 }
2819
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2821 {
2822         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823                 dev_warn(&hdev->pdev->dev,
2824                          "vector(vector_id %d) has been freed.\n", vector_id);
2825                 return;
2826         }
2827
2828         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829         hdev->num_msi_left += 1;
2830         hdev->num_msi_used -= 1;
2831 }
2832
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2834 {
2835         struct hclge_misc_vector *vector = &hdev->misc_vector;
2836
2837         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2838
2839         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840         hdev->vector_status[0] = 0;
2841
2842         hdev->num_msi_left -= 1;
2843         hdev->num_msi_used += 1;
2844 }
2845
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2847 {
2848         int ret;
2849
2850         hclge_get_misc_vector(hdev);
2851
2852         /* this would be explicitly freed in the end */
2853         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854                           0, "hclge_misc", hdev);
2855         if (ret) {
2856                 hclge_free_vector(hdev, 0);
2857                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858                         hdev->misc_vector.vector_irq);
2859         }
2860
2861         return ret;
2862 }
2863
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2865 {
2866         free_irq(hdev->misc_vector.vector_irq, hdev);
2867         hclge_free_vector(hdev, 0);
2868 }
2869
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871                         enum hnae3_reset_notify_type type)
2872 {
2873         struct hnae3_client *client = hdev->nic_client;
2874         u16 i;
2875
2876         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2877             !client)
2878                 return 0;
2879
2880         if (!client->ops->reset_notify)
2881                 return -EOPNOTSUPP;
2882
2883         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2884                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2885                 int ret;
2886
2887                 ret = client->ops->reset_notify(handle, type);
2888                 if (ret) {
2889                         dev_err(&hdev->pdev->dev,
2890                                 "notify nic client failed %d(%d)\n", type, ret);
2891                         return ret;
2892                 }
2893         }
2894
2895         return 0;
2896 }
2897
2898 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2899                                     enum hnae3_reset_notify_type type)
2900 {
2901         struct hnae3_client *client = hdev->roce_client;
2902         int ret = 0;
2903         u16 i;
2904
2905         if (!client)
2906                 return 0;
2907
2908         if (!client->ops->reset_notify)
2909                 return -EOPNOTSUPP;
2910
2911         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2912                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2913
2914                 ret = client->ops->reset_notify(handle, type);
2915                 if (ret) {
2916                         dev_err(&hdev->pdev->dev,
2917                                 "notify roce client failed %d(%d)",
2918                                 type, ret);
2919                         return ret;
2920                 }
2921         }
2922
2923         return ret;
2924 }
2925
2926 static int hclge_reset_wait(struct hclge_dev *hdev)
2927 {
2928 #define HCLGE_RESET_WATI_MS     100
2929 #define HCLGE_RESET_WAIT_CNT    200
2930         u32 val, reg, reg_bit;
2931         u32 cnt = 0;
2932
2933         switch (hdev->reset_type) {
2934         case HNAE3_IMP_RESET:
2935                 reg = HCLGE_GLOBAL_RESET_REG;
2936                 reg_bit = HCLGE_IMP_RESET_BIT;
2937                 break;
2938         case HNAE3_GLOBAL_RESET:
2939                 reg = HCLGE_GLOBAL_RESET_REG;
2940                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2941                 break;
2942         case HNAE3_CORE_RESET:
2943                 reg = HCLGE_GLOBAL_RESET_REG;
2944                 reg_bit = HCLGE_CORE_RESET_BIT;
2945                 break;
2946         case HNAE3_FUNC_RESET:
2947                 reg = HCLGE_FUN_RST_ING;
2948                 reg_bit = HCLGE_FUN_RST_ING_B;
2949                 break;
2950         case HNAE3_FLR_RESET:
2951                 break;
2952         default:
2953                 dev_err(&hdev->pdev->dev,
2954                         "Wait for unsupported reset type: %d\n",
2955                         hdev->reset_type);
2956                 return -EINVAL;
2957         }
2958
2959         if (hdev->reset_type == HNAE3_FLR_RESET) {
2960                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2961                        cnt++ < HCLGE_RESET_WAIT_CNT)
2962                         msleep(HCLGE_RESET_WATI_MS);
2963
2964                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2965                         dev_err(&hdev->pdev->dev,
2966                                 "flr wait timeout: %d\n", cnt);
2967                         return -EBUSY;
2968                 }
2969
2970                 return 0;
2971         }
2972
2973         val = hclge_read_dev(&hdev->hw, reg);
2974         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2975                 msleep(HCLGE_RESET_WATI_MS);
2976                 val = hclge_read_dev(&hdev->hw, reg);
2977                 cnt++;
2978         }
2979
2980         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2981                 dev_warn(&hdev->pdev->dev,
2982                          "Wait for reset timeout: %d\n", hdev->reset_type);
2983                 return -EBUSY;
2984         }
2985
2986         return 0;
2987 }
2988
2989 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2990 {
2991         struct hclge_vf_rst_cmd *req;
2992         struct hclge_desc desc;
2993
2994         req = (struct hclge_vf_rst_cmd *)desc.data;
2995         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2996         req->dest_vfid = func_id;
2997
2998         if (reset)
2999                 req->vf_rst = 0x1;
3000
3001         return hclge_cmd_send(&hdev->hw, &desc, 1);
3002 }
3003
3004 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3005 {
3006         int i;
3007
3008         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3009                 struct hclge_vport *vport = &hdev->vport[i];
3010                 int ret;
3011
3012                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3013                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3014                 if (ret) {
3015                         dev_err(&hdev->pdev->dev,
3016                                 "set vf(%d) rst failed %d!\n",
3017                                 vport->vport_id, ret);
3018                         return ret;
3019                 }
3020
3021                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3022                         continue;
3023
3024                 /* Inform VF to process the reset.
3025                  * hclge_inform_reset_assert_to_vf may fail if VF
3026                  * driver is not loaded.
3027                  */
3028                 ret = hclge_inform_reset_assert_to_vf(vport);
3029                 if (ret)
3030                         dev_warn(&hdev->pdev->dev,
3031                                  "inform reset to vf(%d) failed %d!\n",
3032                                  vport->vport_id, ret);
3033         }
3034
3035         return 0;
3036 }
3037
3038 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3039 {
3040         struct hclge_desc desc;
3041         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3042         int ret;
3043
3044         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3045         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3046         req->fun_reset_vfid = func_id;
3047
3048         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3049         if (ret)
3050                 dev_err(&hdev->pdev->dev,
3051                         "send function reset cmd fail, status =%d\n", ret);
3052
3053         return ret;
3054 }
3055
3056 static void hclge_do_reset(struct hclge_dev *hdev)
3057 {
3058         struct hnae3_handle *handle = &hdev->vport[0].nic;
3059         struct pci_dev *pdev = hdev->pdev;
3060         u32 val;
3061
3062         if (hclge_get_hw_reset_stat(handle)) {
3063                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3064                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3065                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3066                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3067                 return;
3068         }
3069
3070         switch (hdev->reset_type) {
3071         case HNAE3_GLOBAL_RESET:
3072                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3073                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3074                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3075                 dev_info(&pdev->dev, "Global Reset requested\n");
3076                 break;
3077         case HNAE3_CORE_RESET:
3078                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3079                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3080                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3081                 dev_info(&pdev->dev, "Core Reset requested\n");
3082                 break;
3083         case HNAE3_FUNC_RESET:
3084                 dev_info(&pdev->dev, "PF Reset requested\n");
3085                 /* schedule again to check later */
3086                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3087                 hclge_reset_task_schedule(hdev);
3088                 break;
3089         case HNAE3_FLR_RESET:
3090                 dev_info(&pdev->dev, "FLR requested\n");
3091                 /* schedule again to check later */
3092                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3093                 hclge_reset_task_schedule(hdev);
3094                 break;
3095         default:
3096                 dev_warn(&pdev->dev,
3097                          "Unsupported reset type: %d\n", hdev->reset_type);
3098                 break;
3099         }
3100 }
3101
3102 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3103                                                    unsigned long *addr)
3104 {
3105         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3106
3107         /* first, resolve any unknown reset type to the known type(s) */
3108         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3109                 /* we will intentionally ignore any errors from this function
3110                  *  as we will end up in *some* reset request in any case
3111                  */
3112                 hclge_handle_hw_msix_error(hdev, addr);
3113                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3114                 /* We defered the clearing of the error event which caused
3115                  * interrupt since it was not posssible to do that in
3116                  * interrupt context (and this is the reason we introduced
3117                  * new UNKNOWN reset type). Now, the errors have been
3118                  * handled and cleared in hardware we can safely enable
3119                  * interrupts. This is an exception to the norm.
3120                  */
3121                 hclge_enable_vector(&hdev->misc_vector, true);
3122         }
3123
3124         /* return the highest priority reset level amongst all */
3125         if (test_bit(HNAE3_IMP_RESET, addr)) {
3126                 rst_level = HNAE3_IMP_RESET;
3127                 clear_bit(HNAE3_IMP_RESET, addr);
3128                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3129                 clear_bit(HNAE3_CORE_RESET, addr);
3130                 clear_bit(HNAE3_FUNC_RESET, addr);
3131         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3132                 rst_level = HNAE3_GLOBAL_RESET;
3133                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3134                 clear_bit(HNAE3_CORE_RESET, addr);
3135                 clear_bit(HNAE3_FUNC_RESET, addr);
3136         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3137                 rst_level = HNAE3_CORE_RESET;
3138                 clear_bit(HNAE3_CORE_RESET, addr);
3139                 clear_bit(HNAE3_FUNC_RESET, addr);
3140         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3141                 rst_level = HNAE3_FUNC_RESET;
3142                 clear_bit(HNAE3_FUNC_RESET, addr);
3143         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3144                 rst_level = HNAE3_FLR_RESET;
3145                 clear_bit(HNAE3_FLR_RESET, addr);
3146         }
3147
3148         if (hdev->reset_type != HNAE3_NONE_RESET &&
3149             rst_level < hdev->reset_type)
3150                 return HNAE3_NONE_RESET;
3151
3152         return rst_level;
3153 }
3154
3155 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3156 {
3157         u32 clearval = 0;
3158
3159         switch (hdev->reset_type) {
3160         case HNAE3_IMP_RESET:
3161                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3162                 break;
3163         case HNAE3_GLOBAL_RESET:
3164                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3165                 break;
3166         case HNAE3_CORE_RESET:
3167                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3168                 break;
3169         default:
3170                 break;
3171         }
3172
3173         if (!clearval)
3174                 return;
3175
3176         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3177         hclge_enable_vector(&hdev->misc_vector, true);
3178 }
3179
3180 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3181 {
3182         int ret = 0;
3183
3184         switch (hdev->reset_type) {
3185         case HNAE3_FUNC_RESET:
3186                 /* fall through */
3187         case HNAE3_FLR_RESET:
3188                 ret = hclge_set_all_vf_rst(hdev, true);
3189                 break;
3190         default:
3191                 break;
3192         }
3193
3194         return ret;
3195 }
3196
3197 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3198 {
3199         u32 reg_val;
3200         int ret = 0;
3201
3202         switch (hdev->reset_type) {
3203         case HNAE3_FUNC_RESET:
3204                 /* There is no mechanism for PF to know if VF has stopped IO
3205                  * for now, just wait 100 ms for VF to stop IO
3206                  */
3207                 msleep(100);
3208                 ret = hclge_func_reset_cmd(hdev, 0);
3209                 if (ret) {
3210                         dev_err(&hdev->pdev->dev,
3211                                 "asserting function reset fail %d!\n", ret);
3212                         return ret;
3213                 }
3214
3215                 /* After performaning pf reset, it is not necessary to do the
3216                  * mailbox handling or send any command to firmware, because
3217                  * any mailbox handling or command to firmware is only valid
3218                  * after hclge_cmd_init is called.
3219                  */
3220                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3221                 hdev->rst_stats.pf_rst_cnt++;
3222                 break;
3223         case HNAE3_FLR_RESET:
3224                 /* There is no mechanism for PF to know if VF has stopped IO
3225                  * for now, just wait 100 ms for VF to stop IO
3226                  */
3227                 msleep(100);
3228                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3229                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3230                 hdev->rst_stats.flr_rst_cnt++;
3231                 break;
3232         case HNAE3_IMP_RESET:
3233                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3234                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3235                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3236                 break;
3237         default:
3238                 break;
3239         }
3240
3241         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3242
3243         return ret;
3244 }
3245
3246 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3247 {
3248 #define MAX_RESET_FAIL_CNT 5
3249 #define RESET_UPGRADE_DELAY_SEC 10
3250
3251         if (hdev->reset_pending) {
3252                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3253                          hdev->reset_pending);
3254                 return true;
3255         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3256                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3257                     BIT(HCLGE_IMP_RESET_BIT))) {
3258                 dev_info(&hdev->pdev->dev,
3259                          "reset failed because IMP Reset is pending\n");
3260                 hclge_clear_reset_cause(hdev);
3261                 return false;
3262         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3263                 hdev->reset_fail_cnt++;
3264                 if (is_timeout) {
3265                         set_bit(hdev->reset_type, &hdev->reset_pending);
3266                         dev_info(&hdev->pdev->dev,
3267                                  "re-schedule to wait for hw reset done\n");
3268                         return true;
3269                 }
3270
3271                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3272                 hclge_clear_reset_cause(hdev);
3273                 mod_timer(&hdev->reset_timer,
3274                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3275
3276                 return false;
3277         }
3278
3279         hclge_clear_reset_cause(hdev);
3280         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3281         return false;
3282 }
3283
3284 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3285 {
3286         int ret = 0;
3287
3288         switch (hdev->reset_type) {
3289         case HNAE3_FUNC_RESET:
3290                 /* fall through */
3291         case HNAE3_FLR_RESET:
3292                 ret = hclge_set_all_vf_rst(hdev, false);
3293                 break;
3294         default:
3295                 break;
3296         }
3297
3298         return ret;
3299 }
3300
3301 static void hclge_reset(struct hclge_dev *hdev)
3302 {
3303         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3304         bool is_timeout = false;
3305         int ret;
3306
3307         /* Initialize ae_dev reset status as well, in case enet layer wants to
3308          * know if device is undergoing reset
3309          */
3310         ae_dev->reset_type = hdev->reset_type;
3311         hdev->rst_stats.reset_cnt++;
3312         /* perform reset of the stack & ae device for a client */
3313         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3314         if (ret)
3315                 goto err_reset;
3316
3317         ret = hclge_reset_prepare_down(hdev);
3318         if (ret)
3319                 goto err_reset;
3320
3321         rtnl_lock();
3322         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3323         if (ret)
3324                 goto err_reset_lock;
3325
3326         rtnl_unlock();
3327
3328         ret = hclge_reset_prepare_wait(hdev);
3329         if (ret)
3330                 goto err_reset;
3331
3332         if (hclge_reset_wait(hdev)) {
3333                 is_timeout = true;
3334                 goto err_reset;
3335         }
3336
3337         hdev->rst_stats.hw_reset_done_cnt++;
3338
3339         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3340         if (ret)
3341                 goto err_reset;
3342
3343         rtnl_lock();
3344         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3345         if (ret)
3346                 goto err_reset_lock;
3347
3348         ret = hclge_reset_ae_dev(hdev->ae_dev);
3349         if (ret)
3350                 goto err_reset_lock;
3351
3352         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3353         if (ret)
3354                 goto err_reset_lock;
3355
3356         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3357         if (ret)
3358                 goto err_reset_lock;
3359
3360         hclge_clear_reset_cause(hdev);
3361
3362         ret = hclge_reset_prepare_up(hdev);
3363         if (ret)
3364                 goto err_reset_lock;
3365
3366         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3367         if (ret)
3368                 goto err_reset_lock;
3369
3370         rtnl_unlock();
3371
3372         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3373         if (ret)
3374                 goto err_reset;
3375
3376         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3377         if (ret)
3378                 goto err_reset;
3379
3380         hdev->last_reset_time = jiffies;
3381         hdev->reset_fail_cnt = 0;
3382         hdev->rst_stats.reset_done_cnt++;
3383         ae_dev->reset_type = HNAE3_NONE_RESET;
3384         del_timer(&hdev->reset_timer);
3385
3386         return;
3387
3388 err_reset_lock:
3389         rtnl_unlock();
3390 err_reset:
3391         if (hclge_reset_err_handle(hdev, is_timeout))
3392                 hclge_reset_task_schedule(hdev);
3393 }
3394
3395 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3396 {
3397         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3398         struct hclge_dev *hdev = ae_dev->priv;
3399
3400         /* We might end up getting called broadly because of 2 below cases:
3401          * 1. Recoverable error was conveyed through APEI and only way to bring
3402          *    normalcy is to reset.
3403          * 2. A new reset request from the stack due to timeout
3404          *
3405          * For the first case,error event might not have ae handle available.
3406          * check if this is a new reset request and we are not here just because
3407          * last reset attempt did not succeed and watchdog hit us again. We will
3408          * know this if last reset request did not occur very recently (watchdog
3409          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3410          * In case of new request we reset the "reset level" to PF reset.
3411          * And if it is a repeat reset request of the most recent one then we
3412          * want to make sure we throttle the reset request. Therefore, we will
3413          * not allow it again before 3*HZ times.
3414          */
3415         if (!handle)
3416                 handle = &hdev->vport[0].nic;
3417
3418         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3419                 return;
3420         else if (hdev->default_reset_request)
3421                 hdev->reset_level =
3422                         hclge_get_reset_level(hdev,
3423                                               &hdev->default_reset_request);
3424         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3425                 hdev->reset_level = HNAE3_FUNC_RESET;
3426
3427         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3428                  hdev->reset_level);
3429
3430         /* request reset & schedule reset task */
3431         set_bit(hdev->reset_level, &hdev->reset_request);
3432         hclge_reset_task_schedule(hdev);
3433
3434         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3435                 hdev->reset_level++;
3436 }
3437
3438 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3439                                         enum hnae3_reset_type rst_type)
3440 {
3441         struct hclge_dev *hdev = ae_dev->priv;
3442
3443         set_bit(rst_type, &hdev->default_reset_request);
3444 }
3445
3446 static void hclge_reset_timer(struct timer_list *t)
3447 {
3448         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3449
3450         dev_info(&hdev->pdev->dev,
3451                  "triggering global reset in reset timer\n");
3452         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3453         hclge_reset_event(hdev->pdev, NULL);
3454 }
3455
3456 static void hclge_reset_subtask(struct hclge_dev *hdev)
3457 {
3458         /* check if there is any ongoing reset in the hardware. This status can
3459          * be checked from reset_pending. If there is then, we need to wait for
3460          * hardware to complete reset.
3461          *    a. If we are able to figure out in reasonable time that hardware
3462          *       has fully resetted then, we can proceed with driver, client
3463          *       reset.
3464          *    b. else, we can come back later to check this status so re-sched
3465          *       now.
3466          */
3467         hdev->last_reset_time = jiffies;
3468         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3469         if (hdev->reset_type != HNAE3_NONE_RESET)
3470                 hclge_reset(hdev);
3471
3472         /* check if we got any *new* reset requests to be honored */
3473         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3474         if (hdev->reset_type != HNAE3_NONE_RESET)
3475                 hclge_do_reset(hdev);
3476
3477         hdev->reset_type = HNAE3_NONE_RESET;
3478 }
3479
3480 static void hclge_reset_service_task(struct work_struct *work)
3481 {
3482         struct hclge_dev *hdev =
3483                 container_of(work, struct hclge_dev, rst_service_task);
3484
3485         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3486                 return;
3487
3488         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3489
3490         hclge_reset_subtask(hdev);
3491
3492         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3493 }
3494
3495 static void hclge_mailbox_service_task(struct work_struct *work)
3496 {
3497         struct hclge_dev *hdev =
3498                 container_of(work, struct hclge_dev, mbx_service_task);
3499
3500         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3501                 return;
3502
3503         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3504
3505         hclge_mbx_handler(hdev);
3506
3507         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3508 }
3509
3510 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3511 {
3512         int i;
3513
3514         /* start from vport 1 for PF is always alive */
3515         for (i = 1; i < hdev->num_alloc_vport; i++) {
3516                 struct hclge_vport *vport = &hdev->vport[i];
3517
3518                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3519                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3520
3521                 /* If vf is not alive, set to default value */
3522                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3523                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3524         }
3525 }
3526
3527 static void hclge_service_task(struct work_struct *work)
3528 {
3529         struct hclge_dev *hdev =
3530                 container_of(work, struct hclge_dev, service_task);
3531
3532         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3533                 hclge_update_stats_for_all(hdev);
3534                 hdev->hw_stats.stats_timer = 0;
3535         }
3536
3537         hclge_update_port_info(hdev);
3538         hclge_update_link_status(hdev);
3539         hclge_update_vport_alive(hdev);
3540         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3541                 hclge_rfs_filter_expire(hdev);
3542                 hdev->fd_arfs_expire_timer = 0;
3543         }
3544         hclge_service_complete(hdev);
3545 }
3546
3547 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3548 {
3549         /* VF handle has no client */
3550         if (!handle->client)
3551                 return container_of(handle, struct hclge_vport, nic);
3552         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3553                 return container_of(handle, struct hclge_vport, roce);
3554         else
3555                 return container_of(handle, struct hclge_vport, nic);
3556 }
3557
3558 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3559                             struct hnae3_vector_info *vector_info)
3560 {
3561         struct hclge_vport *vport = hclge_get_vport(handle);
3562         struct hnae3_vector_info *vector = vector_info;
3563         struct hclge_dev *hdev = vport->back;
3564         int alloc = 0;
3565         int i, j;
3566
3567         vector_num = min(hdev->num_msi_left, vector_num);
3568
3569         for (j = 0; j < vector_num; j++) {
3570                 for (i = 1; i < hdev->num_msi; i++) {
3571                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3572                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3573                                 vector->io_addr = hdev->hw.io_base +
3574                                         HCLGE_VECTOR_REG_BASE +
3575                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3576                                         vport->vport_id *
3577                                         HCLGE_VECTOR_VF_OFFSET;
3578                                 hdev->vector_status[i] = vport->vport_id;
3579                                 hdev->vector_irq[i] = vector->vector;
3580
3581                                 vector++;
3582                                 alloc++;
3583
3584                                 break;
3585                         }
3586                 }
3587         }
3588         hdev->num_msi_left -= alloc;
3589         hdev->num_msi_used += alloc;
3590
3591         return alloc;
3592 }
3593
3594 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3595 {
3596         int i;
3597
3598         for (i = 0; i < hdev->num_msi; i++)
3599                 if (vector == hdev->vector_irq[i])
3600                         return i;
3601
3602         return -EINVAL;
3603 }
3604
3605 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3606 {
3607         struct hclge_vport *vport = hclge_get_vport(handle);
3608         struct hclge_dev *hdev = vport->back;
3609         int vector_id;
3610
3611         vector_id = hclge_get_vector_index(hdev, vector);
3612         if (vector_id < 0) {
3613                 dev_err(&hdev->pdev->dev,
3614                         "Get vector index fail. vector_id =%d\n", vector_id);
3615                 return vector_id;
3616         }
3617
3618         hclge_free_vector(hdev, vector_id);
3619
3620         return 0;
3621 }
3622
3623 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3624 {
3625         return HCLGE_RSS_KEY_SIZE;
3626 }
3627
3628 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3629 {
3630         return HCLGE_RSS_IND_TBL_SIZE;
3631 }
3632
3633 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3634                                   const u8 hfunc, const u8 *key)
3635 {
3636         struct hclge_rss_config_cmd *req;
3637         struct hclge_desc desc;
3638         int key_offset;
3639         int key_size;
3640         int ret;
3641
3642         req = (struct hclge_rss_config_cmd *)desc.data;
3643
3644         for (key_offset = 0; key_offset < 3; key_offset++) {
3645                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3646                                            false);
3647
3648                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3649                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3650
3651                 if (key_offset == 2)
3652                         key_size =
3653                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3654                 else
3655                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3656
3657                 memcpy(req->hash_key,
3658                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3659
3660                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3661                 if (ret) {
3662                         dev_err(&hdev->pdev->dev,
3663                                 "Configure RSS config fail, status = %d\n",
3664                                 ret);
3665                         return ret;
3666                 }
3667         }
3668         return 0;
3669 }
3670
3671 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3672 {
3673         struct hclge_rss_indirection_table_cmd *req;
3674         struct hclge_desc desc;
3675         int i, j;
3676         int ret;
3677
3678         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3679
3680         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3681                 hclge_cmd_setup_basic_desc
3682                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3683
3684                 req->start_table_index =
3685                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3686                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3687
3688                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3689                         req->rss_result[j] =
3690                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3691
3692                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3693                 if (ret) {
3694                         dev_err(&hdev->pdev->dev,
3695                                 "Configure rss indir table fail,status = %d\n",
3696                                 ret);
3697                         return ret;
3698                 }
3699         }
3700         return 0;
3701 }
3702
3703 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3704                                  u16 *tc_size, u16 *tc_offset)
3705 {
3706         struct hclge_rss_tc_mode_cmd *req;
3707         struct hclge_desc desc;
3708         int ret;
3709         int i;
3710
3711         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3712         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3713
3714         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3715                 u16 mode = 0;
3716
3717                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3718                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3719                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3720                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3721                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3722
3723                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3724         }
3725
3726         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3727         if (ret)
3728                 dev_err(&hdev->pdev->dev,
3729                         "Configure rss tc mode fail, status = %d\n", ret);
3730
3731         return ret;
3732 }
3733
3734 static void hclge_get_rss_type(struct hclge_vport *vport)
3735 {
3736         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3737             vport->rss_tuple_sets.ipv4_udp_en ||
3738             vport->rss_tuple_sets.ipv4_sctp_en ||
3739             vport->rss_tuple_sets.ipv6_tcp_en ||
3740             vport->rss_tuple_sets.ipv6_udp_en ||
3741             vport->rss_tuple_sets.ipv6_sctp_en)
3742                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3743         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3744                  vport->rss_tuple_sets.ipv6_fragment_en)
3745                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3746         else
3747                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3748 }
3749
3750 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3751 {
3752         struct hclge_rss_input_tuple_cmd *req;
3753         struct hclge_desc desc;
3754         int ret;
3755
3756         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3757
3758         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3759
3760         /* Get the tuple cfg from pf */
3761         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3762         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3763         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3764         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3765         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3766         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3767         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3768         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3769         hclge_get_rss_type(&hdev->vport[0]);
3770         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3771         if (ret)
3772                 dev_err(&hdev->pdev->dev,
3773                         "Configure rss input fail, status = %d\n", ret);
3774         return ret;
3775 }
3776
3777 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3778                          u8 *key, u8 *hfunc)
3779 {
3780         struct hclge_vport *vport = hclge_get_vport(handle);
3781         int i;
3782
3783         /* Get hash algorithm */
3784         if (hfunc) {
3785                 switch (vport->rss_algo) {
3786                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3787                         *hfunc = ETH_RSS_HASH_TOP;
3788                         break;
3789                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3790                         *hfunc = ETH_RSS_HASH_XOR;
3791                         break;
3792                 default:
3793                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3794                         break;
3795                 }
3796         }
3797
3798         /* Get the RSS Key required by the user */
3799         if (key)
3800                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3801
3802         /* Get indirect table */
3803         if (indir)
3804                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3805                         indir[i] =  vport->rss_indirection_tbl[i];
3806
3807         return 0;
3808 }
3809
3810 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3811                          const  u8 *key, const  u8 hfunc)
3812 {
3813         struct hclge_vport *vport = hclge_get_vport(handle);
3814         struct hclge_dev *hdev = vport->back;
3815         u8 hash_algo;
3816         int ret, i;
3817
3818         /* Set the RSS Hash Key if specififed by the user */
3819         if (key) {
3820                 switch (hfunc) {
3821                 case ETH_RSS_HASH_TOP:
3822                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3823                         break;
3824                 case ETH_RSS_HASH_XOR:
3825                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3826                         break;
3827                 case ETH_RSS_HASH_NO_CHANGE:
3828                         hash_algo = vport->rss_algo;
3829                         break;
3830                 default:
3831                         return -EINVAL;
3832                 }
3833
3834                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3835                 if (ret)
3836                         return ret;
3837
3838                 /* Update the shadow RSS key with user specified qids */
3839                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3840                 vport->rss_algo = hash_algo;
3841         }
3842
3843         /* Update the shadow RSS table with user specified qids */
3844         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3845                 vport->rss_indirection_tbl[i] = indir[i];
3846
3847         /* Update the hardware */
3848         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3849 }
3850
3851 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3852 {
3853         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3854
3855         if (nfc->data & RXH_L4_B_2_3)
3856                 hash_sets |= HCLGE_D_PORT_BIT;
3857         else
3858                 hash_sets &= ~HCLGE_D_PORT_BIT;
3859
3860         if (nfc->data & RXH_IP_SRC)
3861                 hash_sets |= HCLGE_S_IP_BIT;
3862         else
3863                 hash_sets &= ~HCLGE_S_IP_BIT;
3864
3865         if (nfc->data & RXH_IP_DST)
3866                 hash_sets |= HCLGE_D_IP_BIT;
3867         else
3868                 hash_sets &= ~HCLGE_D_IP_BIT;
3869
3870         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3871                 hash_sets |= HCLGE_V_TAG_BIT;
3872
3873         return hash_sets;
3874 }
3875
3876 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3877                                struct ethtool_rxnfc *nfc)
3878 {
3879         struct hclge_vport *vport = hclge_get_vport(handle);
3880         struct hclge_dev *hdev = vport->back;
3881         struct hclge_rss_input_tuple_cmd *req;
3882         struct hclge_desc desc;
3883         u8 tuple_sets;
3884         int ret;
3885
3886         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3887                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3888                 return -EINVAL;
3889
3890         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3891         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3892
3893         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3894         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3895         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3896         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3897         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3898         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3899         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3900         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3901
3902         tuple_sets = hclge_get_rss_hash_bits(nfc);
3903         switch (nfc->flow_type) {
3904         case TCP_V4_FLOW:
3905                 req->ipv4_tcp_en = tuple_sets;
3906                 break;
3907         case TCP_V6_FLOW:
3908                 req->ipv6_tcp_en = tuple_sets;
3909                 break;
3910         case UDP_V4_FLOW:
3911                 req->ipv4_udp_en = tuple_sets;
3912                 break;
3913         case UDP_V6_FLOW:
3914                 req->ipv6_udp_en = tuple_sets;
3915                 break;
3916         case SCTP_V4_FLOW:
3917                 req->ipv4_sctp_en = tuple_sets;
3918                 break;
3919         case SCTP_V6_FLOW:
3920                 if ((nfc->data & RXH_L4_B_0_1) ||
3921                     (nfc->data & RXH_L4_B_2_3))
3922                         return -EINVAL;
3923
3924                 req->ipv6_sctp_en = tuple_sets;
3925                 break;
3926         case IPV4_FLOW:
3927                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3928                 break;
3929         case IPV6_FLOW:
3930                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3931                 break;
3932         default:
3933                 return -EINVAL;
3934         }
3935
3936         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3937         if (ret) {
3938                 dev_err(&hdev->pdev->dev,
3939                         "Set rss tuple fail, status = %d\n", ret);
3940                 return ret;
3941         }
3942
3943         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3944         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3945         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3946         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3947         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3948         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3949         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3950         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3951         hclge_get_rss_type(vport);
3952         return 0;
3953 }
3954
3955 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3956                                struct ethtool_rxnfc *nfc)
3957 {
3958         struct hclge_vport *vport = hclge_get_vport(handle);
3959         u8 tuple_sets;
3960
3961         nfc->data = 0;
3962
3963         switch (nfc->flow_type) {
3964         case TCP_V4_FLOW:
3965                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3966                 break;
3967         case UDP_V4_FLOW:
3968                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3969                 break;
3970         case TCP_V6_FLOW:
3971                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3972                 break;
3973         case UDP_V6_FLOW:
3974                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3975                 break;
3976         case SCTP_V4_FLOW:
3977                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3978                 break;
3979         case SCTP_V6_FLOW:
3980                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3981                 break;
3982         case IPV4_FLOW:
3983         case IPV6_FLOW:
3984                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3985                 break;
3986         default:
3987                 return -EINVAL;
3988         }
3989
3990         if (!tuple_sets)
3991                 return 0;
3992
3993         if (tuple_sets & HCLGE_D_PORT_BIT)
3994                 nfc->data |= RXH_L4_B_2_3;
3995         if (tuple_sets & HCLGE_S_PORT_BIT)
3996                 nfc->data |= RXH_L4_B_0_1;
3997         if (tuple_sets & HCLGE_D_IP_BIT)
3998                 nfc->data |= RXH_IP_DST;
3999         if (tuple_sets & HCLGE_S_IP_BIT)
4000                 nfc->data |= RXH_IP_SRC;
4001
4002         return 0;
4003 }
4004
4005 static int hclge_get_tc_size(struct hnae3_handle *handle)
4006 {
4007         struct hclge_vport *vport = hclge_get_vport(handle);
4008         struct hclge_dev *hdev = vport->back;
4009
4010         return hdev->rss_size_max;
4011 }
4012
4013 int hclge_rss_init_hw(struct hclge_dev *hdev)
4014 {
4015         struct hclge_vport *vport = hdev->vport;
4016         u8 *rss_indir = vport[0].rss_indirection_tbl;
4017         u16 rss_size = vport[0].alloc_rss_size;
4018         u8 *key = vport[0].rss_hash_key;
4019         u8 hfunc = vport[0].rss_algo;
4020         u16 tc_offset[HCLGE_MAX_TC_NUM];
4021         u16 tc_valid[HCLGE_MAX_TC_NUM];
4022         u16 tc_size[HCLGE_MAX_TC_NUM];
4023         u16 roundup_size;
4024         int i, ret;
4025
4026         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4027         if (ret)
4028                 return ret;
4029
4030         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4031         if (ret)
4032                 return ret;
4033
4034         ret = hclge_set_rss_input_tuple(hdev);
4035         if (ret)
4036                 return ret;
4037
4038         /* Each TC have the same queue size, and tc_size set to hardware is
4039          * the log2 of roundup power of two of rss_size, the acutal queue
4040          * size is limited by indirection table.
4041          */
4042         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4043                 dev_err(&hdev->pdev->dev,
4044                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4045                         rss_size);
4046                 return -EINVAL;
4047         }
4048
4049         roundup_size = roundup_pow_of_two(rss_size);
4050         roundup_size = ilog2(roundup_size);
4051
4052         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4053                 tc_valid[i] = 0;
4054
4055                 if (!(hdev->hw_tc_map & BIT(i)))
4056                         continue;
4057
4058                 tc_valid[i] = 1;
4059                 tc_size[i] = roundup_size;
4060                 tc_offset[i] = rss_size * i;
4061         }
4062
4063         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4064 }
4065
4066 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4067 {
4068         struct hclge_vport *vport = hdev->vport;
4069         int i, j;
4070
4071         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4072                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4073                         vport[j].rss_indirection_tbl[i] =
4074                                 i % vport[j].alloc_rss_size;
4075         }
4076 }
4077
4078 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4079 {
4080         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4081         struct hclge_vport *vport = hdev->vport;
4082
4083         if (hdev->pdev->revision >= 0x21)
4084                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4085
4086         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4087                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4088                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4089                 vport[i].rss_tuple_sets.ipv4_udp_en =
4090                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4091                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4092                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4093                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4094                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4095                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4096                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4097                 vport[i].rss_tuple_sets.ipv6_udp_en =
4098                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4099                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4100                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4101                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4102                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4103
4104                 vport[i].rss_algo = rss_algo;
4105
4106                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4107                        HCLGE_RSS_KEY_SIZE);
4108         }
4109
4110         hclge_rss_indir_init_cfg(hdev);
4111 }
4112
4113 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4114                                 int vector_id, bool en,
4115                                 struct hnae3_ring_chain_node *ring_chain)
4116 {
4117         struct hclge_dev *hdev = vport->back;
4118         struct hnae3_ring_chain_node *node;
4119         struct hclge_desc desc;
4120         struct hclge_ctrl_vector_chain_cmd *req
4121                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4122         enum hclge_cmd_status status;
4123         enum hclge_opcode_type op;
4124         u16 tqp_type_and_id;
4125         int i;
4126
4127         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4128         hclge_cmd_setup_basic_desc(&desc, op, false);
4129         req->int_vector_id = vector_id;
4130
4131         i = 0;
4132         for (node = ring_chain; node; node = node->next) {
4133                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4134                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4135                                 HCLGE_INT_TYPE_S,
4136                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4137                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4138                                 HCLGE_TQP_ID_S, node->tqp_index);
4139                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4140                                 HCLGE_INT_GL_IDX_S,
4141                                 hnae3_get_field(node->int_gl_idx,
4142                                                 HNAE3_RING_GL_IDX_M,
4143                                                 HNAE3_RING_GL_IDX_S));
4144                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4145                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4146                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4147                         req->vfid = vport->vport_id;
4148
4149                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4150                         if (status) {
4151                                 dev_err(&hdev->pdev->dev,
4152                                         "Map TQP fail, status is %d.\n",
4153                                         status);
4154                                 return -EIO;
4155                         }
4156                         i = 0;
4157
4158                         hclge_cmd_setup_basic_desc(&desc,
4159                                                    op,
4160                                                    false);
4161                         req->int_vector_id = vector_id;
4162                 }
4163         }
4164
4165         if (i > 0) {
4166                 req->int_cause_num = i;
4167                 req->vfid = vport->vport_id;
4168                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4169                 if (status) {
4170                         dev_err(&hdev->pdev->dev,
4171                                 "Map TQP fail, status is %d.\n", status);
4172                         return -EIO;
4173                 }
4174         }
4175
4176         return 0;
4177 }
4178
4179 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4180                                     int vector,
4181                                     struct hnae3_ring_chain_node *ring_chain)
4182 {
4183         struct hclge_vport *vport = hclge_get_vport(handle);
4184         struct hclge_dev *hdev = vport->back;
4185         int vector_id;
4186
4187         vector_id = hclge_get_vector_index(hdev, vector);
4188         if (vector_id < 0) {
4189                 dev_err(&hdev->pdev->dev,
4190                         "Get vector index fail. vector_id =%d\n", vector_id);
4191                 return vector_id;
4192         }
4193
4194         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4195 }
4196
4197 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4198                                        int vector,
4199                                        struct hnae3_ring_chain_node *ring_chain)
4200 {
4201         struct hclge_vport *vport = hclge_get_vport(handle);
4202         struct hclge_dev *hdev = vport->back;
4203         int vector_id, ret;
4204
4205         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4206                 return 0;
4207
4208         vector_id = hclge_get_vector_index(hdev, vector);
4209         if (vector_id < 0) {
4210                 dev_err(&handle->pdev->dev,
4211                         "Get vector index fail. ret =%d\n", vector_id);
4212                 return vector_id;
4213         }
4214
4215         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4216         if (ret)
4217                 dev_err(&handle->pdev->dev,
4218                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4219                         vector_id,
4220                         ret);
4221
4222         return ret;
4223 }
4224
4225 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4226                                struct hclge_promisc_param *param)
4227 {
4228         struct hclge_promisc_cfg_cmd *req;
4229         struct hclge_desc desc;
4230         int ret;
4231
4232         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4233
4234         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4235         req->vf_id = param->vf_id;
4236
4237         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4238          * pdev revision(0x20), new revision support them. The
4239          * value of this two fields will not return error when driver
4240          * send command to fireware in revision(0x20).
4241          */
4242         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4243                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4244
4245         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4246         if (ret)
4247                 dev_err(&hdev->pdev->dev,
4248                         "Set promisc mode fail, status is %d.\n", ret);
4249
4250         return ret;
4251 }
4252
4253 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4254                               bool en_mc, bool en_bc, int vport_id)
4255 {
4256         if (!param)
4257                 return;
4258
4259         memset(param, 0, sizeof(struct hclge_promisc_param));
4260         if (en_uc)
4261                 param->enable = HCLGE_PROMISC_EN_UC;
4262         if (en_mc)
4263                 param->enable |= HCLGE_PROMISC_EN_MC;
4264         if (en_bc)
4265                 param->enable |= HCLGE_PROMISC_EN_BC;
4266         param->vf_id = vport_id;
4267 }
4268
4269 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4270                                   bool en_mc_pmc)
4271 {
4272         struct hclge_vport *vport = hclge_get_vport(handle);
4273         struct hclge_dev *hdev = vport->back;
4274         struct hclge_promisc_param param;
4275         bool en_bc_pmc = true;
4276
4277         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4278          * always bypassed. So broadcast promisc should be disabled until
4279          * user enable promisc mode
4280          */
4281         if (handle->pdev->revision == 0x20)
4282                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4283
4284         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4285                                  vport->vport_id);
4286         return hclge_cmd_set_promisc_mode(hdev, &param);
4287 }
4288
4289 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4290 {
4291         struct hclge_get_fd_mode_cmd *req;
4292         struct hclge_desc desc;
4293         int ret;
4294
4295         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4296
4297         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4298
4299         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4300         if (ret) {
4301                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4302                 return ret;
4303         }
4304
4305         *fd_mode = req->mode;
4306
4307         return ret;
4308 }
4309
4310 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4311                                    u32 *stage1_entry_num,
4312                                    u32 *stage2_entry_num,
4313                                    u16 *stage1_counter_num,
4314                                    u16 *stage2_counter_num)
4315 {
4316         struct hclge_get_fd_allocation_cmd *req;
4317         struct hclge_desc desc;
4318         int ret;
4319
4320         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4321
4322         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4323
4324         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4325         if (ret) {
4326                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4327                         ret);
4328                 return ret;
4329         }
4330
4331         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4332         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4333         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4334         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4335
4336         return ret;
4337 }
4338
4339 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4340 {
4341         struct hclge_set_fd_key_config_cmd *req;
4342         struct hclge_fd_key_cfg *stage;
4343         struct hclge_desc desc;
4344         int ret;
4345
4346         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4347
4348         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4349         stage = &hdev->fd_cfg.key_cfg[stage_num];
4350         req->stage = stage_num;
4351         req->key_select = stage->key_sel;
4352         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4353         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4354         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4355         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4356         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4357         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4358
4359         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4360         if (ret)
4361                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4362
4363         return ret;
4364 }
4365
4366 static int hclge_init_fd_config(struct hclge_dev *hdev)
4367 {
4368 #define LOW_2_WORDS             0x03
4369         struct hclge_fd_key_cfg *key_cfg;
4370         int ret;
4371
4372         if (!hnae3_dev_fd_supported(hdev))
4373                 return 0;
4374
4375         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4376         if (ret)
4377                 return ret;
4378
4379         switch (hdev->fd_cfg.fd_mode) {
4380         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4381                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4382                 break;
4383         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4384                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4385                 break;
4386         default:
4387                 dev_err(&hdev->pdev->dev,
4388                         "Unsupported flow director mode %d\n",
4389                         hdev->fd_cfg.fd_mode);
4390                 return -EOPNOTSUPP;
4391         }
4392
4393         hdev->fd_cfg.proto_support =
4394                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4395                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4396         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4397         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4398         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4399         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4400         key_cfg->outer_sipv6_word_en = 0;
4401         key_cfg->outer_dipv6_word_en = 0;
4402
4403         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4404                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4405                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4406                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4407
4408         /* If use max 400bit key, we can support tuples for ether type */
4409         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4410                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4411                 key_cfg->tuple_active |=
4412                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4413         }
4414
4415         /* roce_type is used to filter roce frames
4416          * dst_vport is used to specify the rule
4417          */
4418         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4419
4420         ret = hclge_get_fd_allocation(hdev,
4421                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4422                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4423                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4424                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4425         if (ret)
4426                 return ret;
4427
4428         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4429 }
4430
4431 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4432                                 int loc, u8 *key, bool is_add)
4433 {
4434         struct hclge_fd_tcam_config_1_cmd *req1;
4435         struct hclge_fd_tcam_config_2_cmd *req2;
4436         struct hclge_fd_tcam_config_3_cmd *req3;
4437         struct hclge_desc desc[3];
4438         int ret;
4439
4440         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4441         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4442         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4443         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4444         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4445
4446         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4447         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4448         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4449
4450         req1->stage = stage;
4451         req1->xy_sel = sel_x ? 1 : 0;
4452         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4453         req1->index = cpu_to_le32(loc);
4454         req1->entry_vld = sel_x ? is_add : 0;
4455
4456         if (key) {
4457                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4458                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4459                        sizeof(req2->tcam_data));
4460                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4461                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4462         }
4463
4464         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4465         if (ret)
4466                 dev_err(&hdev->pdev->dev,
4467                         "config tcam key fail, ret=%d\n",
4468                         ret);
4469
4470         return ret;
4471 }
4472
4473 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4474                               struct hclge_fd_ad_data *action)
4475 {
4476         struct hclge_fd_ad_config_cmd *req;
4477         struct hclge_desc desc;
4478         u64 ad_data = 0;
4479         int ret;
4480
4481         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4482
4483         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4484         req->index = cpu_to_le32(loc);
4485         req->stage = stage;
4486
4487         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4488                       action->write_rule_id_to_bd);
4489         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4490                         action->rule_id);
4491         ad_data <<= 32;
4492         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4493         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4494                       action->forward_to_direct_queue);
4495         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4496                         action->queue_id);
4497         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4498         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4499                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4500         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4501         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4502                         action->counter_id);
4503
4504         req->ad_data = cpu_to_le64(ad_data);
4505         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4506         if (ret)
4507                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4508
4509         return ret;
4510 }
4511
4512 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4513                                    struct hclge_fd_rule *rule)
4514 {
4515         u16 tmp_x_s, tmp_y_s;
4516         u32 tmp_x_l, tmp_y_l;
4517         int i;
4518
4519         if (rule->unused_tuple & tuple_bit)
4520                 return true;
4521
4522         switch (tuple_bit) {
4523         case 0:
4524                 return false;
4525         case BIT(INNER_DST_MAC):
4526                 for (i = 0; i < 6; i++) {
4527                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4528                                rule->tuples_mask.dst_mac[i]);
4529                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4530                                rule->tuples_mask.dst_mac[i]);
4531                 }
4532
4533                 return true;
4534         case BIT(INNER_SRC_MAC):
4535                 for (i = 0; i < 6; i++) {
4536                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4537                                rule->tuples.src_mac[i]);
4538                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4539                                rule->tuples.src_mac[i]);
4540                 }
4541
4542                 return true;
4543         case BIT(INNER_VLAN_TAG_FST):
4544                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4545                        rule->tuples_mask.vlan_tag1);
4546                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4547                        rule->tuples_mask.vlan_tag1);
4548                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4549                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4550
4551                 return true;
4552         case BIT(INNER_ETH_TYPE):
4553                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4554                        rule->tuples_mask.ether_proto);
4555                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4556                        rule->tuples_mask.ether_proto);
4557                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4558                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4559
4560                 return true;
4561         case BIT(INNER_IP_TOS):
4562                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4563                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4564
4565                 return true;
4566         case BIT(INNER_IP_PROTO):
4567                 calc_x(*key_x, rule->tuples.ip_proto,
4568                        rule->tuples_mask.ip_proto);
4569                 calc_y(*key_y, rule->tuples.ip_proto,
4570                        rule->tuples_mask.ip_proto);
4571
4572                 return true;
4573         case BIT(INNER_SRC_IP):
4574                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4575                        rule->tuples_mask.src_ip[3]);
4576                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4577                        rule->tuples_mask.src_ip[3]);
4578                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4579                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4580
4581                 return true;
4582         case BIT(INNER_DST_IP):
4583                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4584                        rule->tuples_mask.dst_ip[3]);
4585                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4586                        rule->tuples_mask.dst_ip[3]);
4587                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4588                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4589
4590                 return true;
4591         case BIT(INNER_SRC_PORT):
4592                 calc_x(tmp_x_s, rule->tuples.src_port,
4593                        rule->tuples_mask.src_port);
4594                 calc_y(tmp_y_s, rule->tuples.src_port,
4595                        rule->tuples_mask.src_port);
4596                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4597                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4598
4599                 return true;
4600         case BIT(INNER_DST_PORT):
4601                 calc_x(tmp_x_s, rule->tuples.dst_port,
4602                        rule->tuples_mask.dst_port);
4603                 calc_y(tmp_y_s, rule->tuples.dst_port,
4604                        rule->tuples_mask.dst_port);
4605                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4606                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4607
4608                 return true;
4609         default:
4610                 return false;
4611         }
4612 }
4613
4614 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4615                                  u8 vf_id, u8 network_port_id)
4616 {
4617         u32 port_number = 0;
4618
4619         if (port_type == HOST_PORT) {
4620                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4621                                 pf_id);
4622                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4623                                 vf_id);
4624                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4625         } else {
4626                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4627                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4628                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4629         }
4630
4631         return port_number;
4632 }
4633
4634 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4635                                        __le32 *key_x, __le32 *key_y,
4636                                        struct hclge_fd_rule *rule)
4637 {
4638         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4639         u8 cur_pos = 0, tuple_size, shift_bits;
4640         int i;
4641
4642         for (i = 0; i < MAX_META_DATA; i++) {
4643                 tuple_size = meta_data_key_info[i].key_length;
4644                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4645
4646                 switch (tuple_bit) {
4647                 case BIT(ROCE_TYPE):
4648                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4649                         cur_pos += tuple_size;
4650                         break;
4651                 case BIT(DST_VPORT):
4652                         port_number = hclge_get_port_number(HOST_PORT, 0,
4653                                                             rule->vf_id, 0);
4654                         hnae3_set_field(meta_data,
4655                                         GENMASK(cur_pos + tuple_size, cur_pos),
4656                                         cur_pos, port_number);
4657                         cur_pos += tuple_size;
4658                         break;
4659                 default:
4660                         break;
4661                 }
4662         }
4663
4664         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4665         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4666         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4667
4668         *key_x = cpu_to_le32(tmp_x << shift_bits);
4669         *key_y = cpu_to_le32(tmp_y << shift_bits);
4670 }
4671
4672 /* A complete key is combined with meta data key and tuple key.
4673  * Meta data key is stored at the MSB region, and tuple key is stored at
4674  * the LSB region, unused bits will be filled 0.
4675  */
4676 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4677                             struct hclge_fd_rule *rule)
4678 {
4679         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4680         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4681         u8 *cur_key_x, *cur_key_y;
4682         int i, ret, tuple_size;
4683         u8 meta_data_region;
4684
4685         memset(key_x, 0, sizeof(key_x));
4686         memset(key_y, 0, sizeof(key_y));
4687         cur_key_x = key_x;
4688         cur_key_y = key_y;
4689
4690         for (i = 0 ; i < MAX_TUPLE; i++) {
4691                 bool tuple_valid;
4692                 u32 check_tuple;
4693
4694                 tuple_size = tuple_key_info[i].key_length / 8;
4695                 check_tuple = key_cfg->tuple_active & BIT(i);
4696
4697                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4698                                                      cur_key_y, rule);
4699                 if (tuple_valid) {
4700                         cur_key_x += tuple_size;
4701                         cur_key_y += tuple_size;
4702                 }
4703         }
4704
4705         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4706                         MAX_META_DATA_LENGTH / 8;
4707
4708         hclge_fd_convert_meta_data(key_cfg,
4709                                    (__le32 *)(key_x + meta_data_region),
4710                                    (__le32 *)(key_y + meta_data_region),
4711                                    rule);
4712
4713         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4714                                    true);
4715         if (ret) {
4716                 dev_err(&hdev->pdev->dev,
4717                         "fd key_y config fail, loc=%d, ret=%d\n",
4718                         rule->queue_id, ret);
4719                 return ret;
4720         }
4721
4722         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4723                                    true);
4724         if (ret)
4725                 dev_err(&hdev->pdev->dev,
4726                         "fd key_x config fail, loc=%d, ret=%d\n",
4727                         rule->queue_id, ret);
4728         return ret;
4729 }
4730
4731 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4732                                struct hclge_fd_rule *rule)
4733 {
4734         struct hclge_fd_ad_data ad_data;
4735
4736         ad_data.ad_id = rule->location;
4737
4738         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4739                 ad_data.drop_packet = true;
4740                 ad_data.forward_to_direct_queue = false;
4741                 ad_data.queue_id = 0;
4742         } else {
4743                 ad_data.drop_packet = false;
4744                 ad_data.forward_to_direct_queue = true;
4745                 ad_data.queue_id = rule->queue_id;
4746         }
4747
4748         ad_data.use_counter = false;
4749         ad_data.counter_id = 0;
4750
4751         ad_data.use_next_stage = false;
4752         ad_data.next_input_key = 0;
4753
4754         ad_data.write_rule_id_to_bd = true;
4755         ad_data.rule_id = rule->location;
4756
4757         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4758 }
4759
4760 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4761                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4762 {
4763         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4764         struct ethtool_usrip4_spec *usr_ip4_spec;
4765         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4766         struct ethtool_usrip6_spec *usr_ip6_spec;
4767         struct ethhdr *ether_spec;
4768
4769         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4770                 return -EINVAL;
4771
4772         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4773                 return -EOPNOTSUPP;
4774
4775         if ((fs->flow_type & FLOW_EXT) &&
4776             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4777                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4778                 return -EOPNOTSUPP;
4779         }
4780
4781         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4782         case SCTP_V4_FLOW:
4783         case TCP_V4_FLOW:
4784         case UDP_V4_FLOW:
4785                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4786                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4787
4788                 if (!tcp_ip4_spec->ip4src)
4789                         *unused |= BIT(INNER_SRC_IP);
4790
4791                 if (!tcp_ip4_spec->ip4dst)
4792                         *unused |= BIT(INNER_DST_IP);
4793
4794                 if (!tcp_ip4_spec->psrc)
4795                         *unused |= BIT(INNER_SRC_PORT);
4796
4797                 if (!tcp_ip4_spec->pdst)
4798                         *unused |= BIT(INNER_DST_PORT);
4799
4800                 if (!tcp_ip4_spec->tos)
4801                         *unused |= BIT(INNER_IP_TOS);
4802
4803                 break;
4804         case IP_USER_FLOW:
4805                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4806                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4807                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4808
4809                 if (!usr_ip4_spec->ip4src)
4810                         *unused |= BIT(INNER_SRC_IP);
4811
4812                 if (!usr_ip4_spec->ip4dst)
4813                         *unused |= BIT(INNER_DST_IP);
4814
4815                 if (!usr_ip4_spec->tos)
4816                         *unused |= BIT(INNER_IP_TOS);
4817
4818                 if (!usr_ip4_spec->proto)
4819                         *unused |= BIT(INNER_IP_PROTO);
4820
4821                 if (usr_ip4_spec->l4_4_bytes)
4822                         return -EOPNOTSUPP;
4823
4824                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4825                         return -EOPNOTSUPP;
4826
4827                 break;
4828         case SCTP_V6_FLOW:
4829         case TCP_V6_FLOW:
4830         case UDP_V6_FLOW:
4831                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4832                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4833                         BIT(INNER_IP_TOS);
4834
4835                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4836                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4837                         *unused |= BIT(INNER_SRC_IP);
4838
4839                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4840                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4841                         *unused |= BIT(INNER_DST_IP);
4842
4843                 if (!tcp_ip6_spec->psrc)
4844                         *unused |= BIT(INNER_SRC_PORT);
4845
4846                 if (!tcp_ip6_spec->pdst)
4847                         *unused |= BIT(INNER_DST_PORT);
4848
4849                 if (tcp_ip6_spec->tclass)
4850                         return -EOPNOTSUPP;
4851
4852                 break;
4853         case IPV6_USER_FLOW:
4854                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4855                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4856                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4857                         BIT(INNER_DST_PORT);
4858
4859                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4860                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4861                         *unused |= BIT(INNER_SRC_IP);
4862
4863                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4864                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4865                         *unused |= BIT(INNER_DST_IP);
4866
4867                 if (!usr_ip6_spec->l4_proto)
4868                         *unused |= BIT(INNER_IP_PROTO);
4869
4870                 if (usr_ip6_spec->tclass)
4871                         return -EOPNOTSUPP;
4872
4873                 if (usr_ip6_spec->l4_4_bytes)
4874                         return -EOPNOTSUPP;
4875
4876                 break;
4877         case ETHER_FLOW:
4878                 ether_spec = &fs->h_u.ether_spec;
4879                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4880                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4881                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4882
4883                 if (is_zero_ether_addr(ether_spec->h_source))
4884                         *unused |= BIT(INNER_SRC_MAC);
4885
4886                 if (is_zero_ether_addr(ether_spec->h_dest))
4887                         *unused |= BIT(INNER_DST_MAC);
4888
4889                 if (!ether_spec->h_proto)
4890                         *unused |= BIT(INNER_ETH_TYPE);
4891
4892                 break;
4893         default:
4894                 return -EOPNOTSUPP;
4895         }
4896
4897         if ((fs->flow_type & FLOW_EXT)) {
4898                 if (fs->h_ext.vlan_etype)
4899                         return -EOPNOTSUPP;
4900                 if (!fs->h_ext.vlan_tci)
4901                         *unused |= BIT(INNER_VLAN_TAG_FST);
4902
4903                 if (fs->m_ext.vlan_tci) {
4904                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4905                                 return -EINVAL;
4906                 }
4907         } else {
4908                 *unused |= BIT(INNER_VLAN_TAG_FST);
4909         }
4910
4911         if (fs->flow_type & FLOW_MAC_EXT) {
4912                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4913                         return -EOPNOTSUPP;
4914
4915                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4916                         *unused |= BIT(INNER_DST_MAC);
4917                 else
4918                         *unused &= ~(BIT(INNER_DST_MAC));
4919         }
4920
4921         return 0;
4922 }
4923
4924 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4925 {
4926         struct hclge_fd_rule *rule = NULL;
4927         struct hlist_node *node2;
4928
4929         spin_lock_bh(&hdev->fd_rule_lock);
4930         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4931                 if (rule->location >= location)
4932                         break;
4933         }
4934
4935         spin_unlock_bh(&hdev->fd_rule_lock);
4936
4937         return  rule && rule->location == location;
4938 }
4939
4940 /* make sure being called after lock up with fd_rule_lock */
4941 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4942                                      struct hclge_fd_rule *new_rule,
4943                                      u16 location,
4944                                      bool is_add)
4945 {
4946         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4947         struct hlist_node *node2;
4948
4949         if (is_add && !new_rule)
4950                 return -EINVAL;
4951
4952         hlist_for_each_entry_safe(rule, node2,
4953                                   &hdev->fd_rule_list, rule_node) {
4954                 if (rule->location >= location)
4955                         break;
4956                 parent = rule;
4957         }
4958
4959         if (rule && rule->location == location) {
4960                 hlist_del(&rule->rule_node);
4961                 kfree(rule);
4962                 hdev->hclge_fd_rule_num--;
4963
4964                 if (!is_add) {
4965                         if (!hdev->hclge_fd_rule_num)
4966                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4967                         clear_bit(location, hdev->fd_bmap);
4968
4969                         return 0;
4970                 }
4971         } else if (!is_add) {
4972                 dev_err(&hdev->pdev->dev,
4973                         "delete fail, rule %d is inexistent\n",
4974                         location);
4975                 return -EINVAL;
4976         }
4977
4978         INIT_HLIST_NODE(&new_rule->rule_node);
4979
4980         if (parent)
4981                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4982         else
4983                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4984
4985         set_bit(location, hdev->fd_bmap);
4986         hdev->hclge_fd_rule_num++;
4987         hdev->fd_active_type = new_rule->rule_type;
4988
4989         return 0;
4990 }
4991
4992 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4993                               struct ethtool_rx_flow_spec *fs,
4994                               struct hclge_fd_rule *rule)
4995 {
4996         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4997
4998         switch (flow_type) {
4999         case SCTP_V4_FLOW:
5000         case TCP_V4_FLOW:
5001         case UDP_V4_FLOW:
5002                 rule->tuples.src_ip[3] =
5003                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5004                 rule->tuples_mask.src_ip[3] =
5005                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5006
5007                 rule->tuples.dst_ip[3] =
5008                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5009                 rule->tuples_mask.dst_ip[3] =
5010                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5011
5012                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5013                 rule->tuples_mask.src_port =
5014                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5015
5016                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5017                 rule->tuples_mask.dst_port =
5018                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5019
5020                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5021                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5022
5023                 rule->tuples.ether_proto = ETH_P_IP;
5024                 rule->tuples_mask.ether_proto = 0xFFFF;
5025
5026                 break;
5027         case IP_USER_FLOW:
5028                 rule->tuples.src_ip[3] =
5029                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5030                 rule->tuples_mask.src_ip[3] =
5031                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5032
5033                 rule->tuples.dst_ip[3] =
5034                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5035                 rule->tuples_mask.dst_ip[3] =
5036                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5037
5038                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5039                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5040
5041                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5042                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5043
5044                 rule->tuples.ether_proto = ETH_P_IP;
5045                 rule->tuples_mask.ether_proto = 0xFFFF;
5046
5047                 break;
5048         case SCTP_V6_FLOW:
5049         case TCP_V6_FLOW:
5050         case UDP_V6_FLOW:
5051                 be32_to_cpu_array(rule->tuples.src_ip,
5052                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
5053                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5054                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
5055
5056                 be32_to_cpu_array(rule->tuples.dst_ip,
5057                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
5058                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5059                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
5060
5061                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5062                 rule->tuples_mask.src_port =
5063                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5064
5065                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5066                 rule->tuples_mask.dst_port =
5067                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5068
5069                 rule->tuples.ether_proto = ETH_P_IPV6;
5070                 rule->tuples_mask.ether_proto = 0xFFFF;
5071
5072                 break;
5073         case IPV6_USER_FLOW:
5074                 be32_to_cpu_array(rule->tuples.src_ip,
5075                                   fs->h_u.usr_ip6_spec.ip6src, 4);
5076                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5077                                   fs->m_u.usr_ip6_spec.ip6src, 4);
5078
5079                 be32_to_cpu_array(rule->tuples.dst_ip,
5080                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
5081                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5082                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
5083
5084                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5085                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5086
5087                 rule->tuples.ether_proto = ETH_P_IPV6;
5088                 rule->tuples_mask.ether_proto = 0xFFFF;
5089
5090                 break;
5091         case ETHER_FLOW:
5092                 ether_addr_copy(rule->tuples.src_mac,
5093                                 fs->h_u.ether_spec.h_source);
5094                 ether_addr_copy(rule->tuples_mask.src_mac,
5095                                 fs->m_u.ether_spec.h_source);
5096
5097                 ether_addr_copy(rule->tuples.dst_mac,
5098                                 fs->h_u.ether_spec.h_dest);
5099                 ether_addr_copy(rule->tuples_mask.dst_mac,
5100                                 fs->m_u.ether_spec.h_dest);
5101
5102                 rule->tuples.ether_proto =
5103                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5104                 rule->tuples_mask.ether_proto =
5105                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5106
5107                 break;
5108         default:
5109                 return -EOPNOTSUPP;
5110         }
5111
5112         switch (flow_type) {
5113         case SCTP_V4_FLOW:
5114         case SCTP_V6_FLOW:
5115                 rule->tuples.ip_proto = IPPROTO_SCTP;
5116                 rule->tuples_mask.ip_proto = 0xFF;
5117                 break;
5118         case TCP_V4_FLOW:
5119         case TCP_V6_FLOW:
5120                 rule->tuples.ip_proto = IPPROTO_TCP;
5121                 rule->tuples_mask.ip_proto = 0xFF;
5122                 break;
5123         case UDP_V4_FLOW:
5124         case UDP_V6_FLOW:
5125                 rule->tuples.ip_proto = IPPROTO_UDP;
5126                 rule->tuples_mask.ip_proto = 0xFF;
5127                 break;
5128         default:
5129                 break;
5130         }
5131
5132         if ((fs->flow_type & FLOW_EXT)) {
5133                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5134                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5135         }
5136
5137         if (fs->flow_type & FLOW_MAC_EXT) {
5138                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5139                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5140         }
5141
5142         return 0;
5143 }
5144
5145 /* make sure being called after lock up with fd_rule_lock */
5146 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5147                                 struct hclge_fd_rule *rule)
5148 {
5149         int ret;
5150
5151         if (!rule) {
5152                 dev_err(&hdev->pdev->dev,
5153                         "The flow director rule is NULL\n");
5154                 return -EINVAL;
5155         }
5156
5157         /* it will never fail here, so needn't to check return value */
5158         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5159
5160         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5161         if (ret)
5162                 goto clear_rule;
5163
5164         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5165         if (ret)
5166                 goto clear_rule;
5167
5168         return 0;
5169
5170 clear_rule:
5171         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5172         return ret;
5173 }
5174
5175 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5176                               struct ethtool_rxnfc *cmd)
5177 {
5178         struct hclge_vport *vport = hclge_get_vport(handle);
5179         struct hclge_dev *hdev = vport->back;
5180         u16 dst_vport_id = 0, q_index = 0;
5181         struct ethtool_rx_flow_spec *fs;
5182         struct hclge_fd_rule *rule;
5183         u32 unused = 0;
5184         u8 action;
5185         int ret;
5186
5187         if (!hnae3_dev_fd_supported(hdev))
5188                 return -EOPNOTSUPP;
5189
5190         if (!hdev->fd_en) {
5191                 dev_warn(&hdev->pdev->dev,
5192                          "Please enable flow director first\n");
5193                 return -EOPNOTSUPP;
5194         }
5195
5196         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5197
5198         ret = hclge_fd_check_spec(hdev, fs, &unused);
5199         if (ret) {
5200                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5201                 return ret;
5202         }
5203
5204         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5205                 action = HCLGE_FD_ACTION_DROP_PACKET;
5206         } else {
5207                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5208                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5209                 u16 tqps;
5210
5211                 if (vf > hdev->num_req_vfs) {
5212                         dev_err(&hdev->pdev->dev,
5213                                 "Error: vf id (%d) > max vf num (%d)\n",
5214                                 vf, hdev->num_req_vfs);
5215                         return -EINVAL;
5216                 }
5217
5218                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5219                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5220
5221                 if (ring >= tqps) {
5222                         dev_err(&hdev->pdev->dev,
5223                                 "Error: queue id (%d) > max tqp num (%d)\n",
5224                                 ring, tqps - 1);
5225                         return -EINVAL;
5226                 }
5227
5228                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5229                 q_index = ring;
5230         }
5231
5232         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5233         if (!rule)
5234                 return -ENOMEM;
5235
5236         ret = hclge_fd_get_tuple(hdev, fs, rule);
5237         if (ret) {
5238                 kfree(rule);
5239                 return ret;
5240         }
5241
5242         rule->flow_type = fs->flow_type;
5243
5244         rule->location = fs->location;
5245         rule->unused_tuple = unused;
5246         rule->vf_id = dst_vport_id;
5247         rule->queue_id = q_index;
5248         rule->action = action;
5249         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5250
5251         /* to avoid rule conflict, when user configure rule by ethtool,
5252          * we need to clear all arfs rules
5253          */
5254         hclge_clear_arfs_rules(handle);
5255
5256         spin_lock_bh(&hdev->fd_rule_lock);
5257         ret = hclge_fd_config_rule(hdev, rule);
5258
5259         spin_unlock_bh(&hdev->fd_rule_lock);
5260
5261         return ret;
5262 }
5263
5264 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5265                               struct ethtool_rxnfc *cmd)
5266 {
5267         struct hclge_vport *vport = hclge_get_vport(handle);
5268         struct hclge_dev *hdev = vport->back;
5269         struct ethtool_rx_flow_spec *fs;
5270         int ret;
5271
5272         if (!hnae3_dev_fd_supported(hdev))
5273                 return -EOPNOTSUPP;
5274
5275         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5276
5277         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5278                 return -EINVAL;
5279
5280         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5281                 dev_err(&hdev->pdev->dev,
5282                         "Delete fail, rule %d is inexistent\n",
5283                         fs->location);
5284                 return -ENOENT;
5285         }
5286
5287         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5288                                    fs->location, NULL, false);
5289         if (ret)
5290                 return ret;
5291
5292         spin_lock_bh(&hdev->fd_rule_lock);
5293         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5294
5295         spin_unlock_bh(&hdev->fd_rule_lock);
5296
5297         return ret;
5298 }
5299
5300 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5301                                      bool clear_list)
5302 {
5303         struct hclge_vport *vport = hclge_get_vport(handle);
5304         struct hclge_dev *hdev = vport->back;
5305         struct hclge_fd_rule *rule;
5306         struct hlist_node *node;
5307         u16 location;
5308
5309         if (!hnae3_dev_fd_supported(hdev))
5310                 return;
5311
5312         spin_lock_bh(&hdev->fd_rule_lock);
5313         for_each_set_bit(location, hdev->fd_bmap,
5314                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5315                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5316                                      NULL, false);
5317
5318         if (clear_list) {
5319                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5320                                           rule_node) {
5321                         hlist_del(&rule->rule_node);
5322                         kfree(rule);
5323                 }
5324                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5325                 hdev->hclge_fd_rule_num = 0;
5326                 bitmap_zero(hdev->fd_bmap,
5327                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5328         }
5329
5330         spin_unlock_bh(&hdev->fd_rule_lock);
5331 }
5332
5333 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5334 {
5335         struct hclge_vport *vport = hclge_get_vport(handle);
5336         struct hclge_dev *hdev = vport->back;
5337         struct hclge_fd_rule *rule;
5338         struct hlist_node *node;
5339         int ret;
5340
5341         /* Return ok here, because reset error handling will check this
5342          * return value. If error is returned here, the reset process will
5343          * fail.
5344          */
5345         if (!hnae3_dev_fd_supported(hdev))
5346                 return 0;
5347
5348         /* if fd is disabled, should not restore it when reset */
5349         if (!hdev->fd_en)
5350                 return 0;
5351
5352         spin_lock_bh(&hdev->fd_rule_lock);
5353         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5354                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5355                 if (!ret)
5356                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5357
5358                 if (ret) {
5359                         dev_warn(&hdev->pdev->dev,
5360                                  "Restore rule %d failed, remove it\n",
5361                                  rule->location);
5362                         clear_bit(rule->location, hdev->fd_bmap);
5363                         hlist_del(&rule->rule_node);
5364                         kfree(rule);
5365                         hdev->hclge_fd_rule_num--;
5366                 }
5367         }
5368
5369         if (hdev->hclge_fd_rule_num)
5370                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5371
5372         spin_unlock_bh(&hdev->fd_rule_lock);
5373
5374         return 0;
5375 }
5376
5377 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5378                                  struct ethtool_rxnfc *cmd)
5379 {
5380         struct hclge_vport *vport = hclge_get_vport(handle);
5381         struct hclge_dev *hdev = vport->back;
5382
5383         if (!hnae3_dev_fd_supported(hdev))
5384                 return -EOPNOTSUPP;
5385
5386         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5387         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5388
5389         return 0;
5390 }
5391
5392 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5393                                   struct ethtool_rxnfc *cmd)
5394 {
5395         struct hclge_vport *vport = hclge_get_vport(handle);
5396         struct hclge_fd_rule *rule = NULL;
5397         struct hclge_dev *hdev = vport->back;
5398         struct ethtool_rx_flow_spec *fs;
5399         struct hlist_node *node2;
5400
5401         if (!hnae3_dev_fd_supported(hdev))
5402                 return -EOPNOTSUPP;
5403
5404         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5405
5406         spin_lock_bh(&hdev->fd_rule_lock);
5407
5408         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5409                 if (rule->location >= fs->location)
5410                         break;
5411         }
5412
5413         if (!rule || fs->location != rule->location) {
5414                 spin_unlock_bh(&hdev->fd_rule_lock);
5415
5416                 return -ENOENT;
5417         }
5418
5419         fs->flow_type = rule->flow_type;
5420         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5421         case SCTP_V4_FLOW:
5422         case TCP_V4_FLOW:
5423         case UDP_V4_FLOW:
5424                 fs->h_u.tcp_ip4_spec.ip4src =
5425                                 cpu_to_be32(rule->tuples.src_ip[3]);
5426                 fs->m_u.tcp_ip4_spec.ip4src =
5427                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5428                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5429
5430                 fs->h_u.tcp_ip4_spec.ip4dst =
5431                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5432                 fs->m_u.tcp_ip4_spec.ip4dst =
5433                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5434                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5435
5436                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5437                 fs->m_u.tcp_ip4_spec.psrc =
5438                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5439                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5440
5441                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5442                 fs->m_u.tcp_ip4_spec.pdst =
5443                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5444                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5445
5446                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5447                 fs->m_u.tcp_ip4_spec.tos =
5448                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5449                                 0 : rule->tuples_mask.ip_tos;
5450
5451                 break;
5452         case IP_USER_FLOW:
5453                 fs->h_u.usr_ip4_spec.ip4src =
5454                                 cpu_to_be32(rule->tuples.src_ip[3]);
5455                 fs->m_u.tcp_ip4_spec.ip4src =
5456                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5457                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5458
5459                 fs->h_u.usr_ip4_spec.ip4dst =
5460                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5461                 fs->m_u.usr_ip4_spec.ip4dst =
5462                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5463                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5464
5465                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5466                 fs->m_u.usr_ip4_spec.tos =
5467                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5468                                 0 : rule->tuples_mask.ip_tos;
5469
5470                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5471                 fs->m_u.usr_ip4_spec.proto =
5472                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5473                                 0 : rule->tuples_mask.ip_proto;
5474
5475                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5476
5477                 break;
5478         case SCTP_V6_FLOW:
5479         case TCP_V6_FLOW:
5480         case UDP_V6_FLOW:
5481                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5482                                   rule->tuples.src_ip, 4);
5483                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5484                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5485                 else
5486                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5487                                           rule->tuples_mask.src_ip, 4);
5488
5489                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5490                                   rule->tuples.dst_ip, 4);
5491                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5492                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5493                 else
5494                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5495                                           rule->tuples_mask.dst_ip, 4);
5496
5497                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5498                 fs->m_u.tcp_ip6_spec.psrc =
5499                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5500                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5501
5502                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5503                 fs->m_u.tcp_ip6_spec.pdst =
5504                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5505                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5506
5507                 break;
5508         case IPV6_USER_FLOW:
5509                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5510                                   rule->tuples.src_ip, 4);
5511                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5512                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5513                 else
5514                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5515                                           rule->tuples_mask.src_ip, 4);
5516
5517                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5518                                   rule->tuples.dst_ip, 4);
5519                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5520                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5521                 else
5522                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5523                                           rule->tuples_mask.dst_ip, 4);
5524
5525                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5526                 fs->m_u.usr_ip6_spec.l4_proto =
5527                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5528                                 0 : rule->tuples_mask.ip_proto;
5529
5530                 break;
5531         case ETHER_FLOW:
5532                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5533                                 rule->tuples.src_mac);
5534                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5535                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5536                 else
5537                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5538                                         rule->tuples_mask.src_mac);
5539
5540                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5541                                 rule->tuples.dst_mac);
5542                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5543                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5544                 else
5545                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5546                                         rule->tuples_mask.dst_mac);
5547
5548                 fs->h_u.ether_spec.h_proto =
5549                                 cpu_to_be16(rule->tuples.ether_proto);
5550                 fs->m_u.ether_spec.h_proto =
5551                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5552                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5553
5554                 break;
5555         default:
5556                 spin_unlock_bh(&hdev->fd_rule_lock);
5557                 return -EOPNOTSUPP;
5558         }
5559
5560         if (fs->flow_type & FLOW_EXT) {
5561                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5562                 fs->m_ext.vlan_tci =
5563                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5564                                 cpu_to_be16(VLAN_VID_MASK) :
5565                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5566         }
5567
5568         if (fs->flow_type & FLOW_MAC_EXT) {
5569                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5570                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5571                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5572                 else
5573                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5574                                         rule->tuples_mask.dst_mac);
5575         }
5576
5577         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5578                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5579         } else {
5580                 u64 vf_id;
5581
5582                 fs->ring_cookie = rule->queue_id;
5583                 vf_id = rule->vf_id;
5584                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5585                 fs->ring_cookie |= vf_id;
5586         }
5587
5588         spin_unlock_bh(&hdev->fd_rule_lock);
5589
5590         return 0;
5591 }
5592
5593 static int hclge_get_all_rules(struct hnae3_handle *handle,
5594                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5595 {
5596         struct hclge_vport *vport = hclge_get_vport(handle);
5597         struct hclge_dev *hdev = vport->back;
5598         struct hclge_fd_rule *rule;
5599         struct hlist_node *node2;
5600         int cnt = 0;
5601
5602         if (!hnae3_dev_fd_supported(hdev))
5603                 return -EOPNOTSUPP;
5604
5605         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5606
5607         spin_lock_bh(&hdev->fd_rule_lock);
5608         hlist_for_each_entry_safe(rule, node2,
5609                                   &hdev->fd_rule_list, rule_node) {
5610                 if (cnt == cmd->rule_cnt) {
5611                         spin_unlock_bh(&hdev->fd_rule_lock);
5612                         return -EMSGSIZE;
5613                 }
5614
5615                 rule_locs[cnt] = rule->location;
5616                 cnt++;
5617         }
5618
5619         spin_unlock_bh(&hdev->fd_rule_lock);
5620
5621         cmd->rule_cnt = cnt;
5622
5623         return 0;
5624 }
5625
5626 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5627                                      struct hclge_fd_rule_tuples *tuples)
5628 {
5629         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5630         tuples->ip_proto = fkeys->basic.ip_proto;
5631         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5632
5633         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5634                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5635                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5636         } else {
5637                 memcpy(tuples->src_ip,
5638                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5639                        sizeof(tuples->src_ip));
5640                 memcpy(tuples->dst_ip,
5641                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5642                        sizeof(tuples->dst_ip));
5643         }
5644 }
5645
5646 /* traverse all rules, check whether an existed rule has the same tuples */
5647 static struct hclge_fd_rule *
5648 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5649                           const struct hclge_fd_rule_tuples *tuples)
5650 {
5651         struct hclge_fd_rule *rule = NULL;
5652         struct hlist_node *node;
5653
5654         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5655                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5656                         return rule;
5657         }
5658
5659         return NULL;
5660 }
5661
5662 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5663                                      struct hclge_fd_rule *rule)
5664 {
5665         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5666                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5667                              BIT(INNER_SRC_PORT);
5668         rule->action = 0;
5669         rule->vf_id = 0;
5670         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5671         if (tuples->ether_proto == ETH_P_IP) {
5672                 if (tuples->ip_proto == IPPROTO_TCP)
5673                         rule->flow_type = TCP_V4_FLOW;
5674                 else
5675                         rule->flow_type = UDP_V4_FLOW;
5676         } else {
5677                 if (tuples->ip_proto == IPPROTO_TCP)
5678                         rule->flow_type = TCP_V6_FLOW;
5679                 else
5680                         rule->flow_type = UDP_V6_FLOW;
5681         }
5682         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5683         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5684 }
5685
5686 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5687                                       u16 flow_id, struct flow_keys *fkeys)
5688 {
5689         struct hclge_vport *vport = hclge_get_vport(handle);
5690         struct hclge_fd_rule_tuples new_tuples;
5691         struct hclge_dev *hdev = vport->back;
5692         struct hclge_fd_rule *rule;
5693         u16 tmp_queue_id;
5694         u16 bit_id;
5695         int ret;
5696
5697         if (!hnae3_dev_fd_supported(hdev))
5698                 return -EOPNOTSUPP;
5699
5700         memset(&new_tuples, 0, sizeof(new_tuples));
5701         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5702
5703         spin_lock_bh(&hdev->fd_rule_lock);
5704
5705         /* when there is already fd rule existed add by user,
5706          * arfs should not work
5707          */
5708         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5709                 spin_unlock_bh(&hdev->fd_rule_lock);
5710
5711                 return -EOPNOTSUPP;
5712         }
5713
5714         /* check is there flow director filter existed for this flow,
5715          * if not, create a new filter for it;
5716          * if filter exist with different queue id, modify the filter;
5717          * if filter exist with same queue id, do nothing
5718          */
5719         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5720         if (!rule) {
5721                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5722                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5723                         spin_unlock_bh(&hdev->fd_rule_lock);
5724
5725                         return -ENOSPC;
5726                 }
5727
5728                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5729                 if (!rule) {
5730                         spin_unlock_bh(&hdev->fd_rule_lock);
5731
5732                         return -ENOMEM;
5733                 }
5734
5735                 set_bit(bit_id, hdev->fd_bmap);
5736                 rule->location = bit_id;
5737                 rule->flow_id = flow_id;
5738                 rule->queue_id = queue_id;
5739                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5740                 ret = hclge_fd_config_rule(hdev, rule);
5741
5742                 spin_unlock_bh(&hdev->fd_rule_lock);
5743
5744                 if (ret)
5745                         return ret;
5746
5747                 return rule->location;
5748         }
5749
5750         spin_unlock_bh(&hdev->fd_rule_lock);
5751
5752         if (rule->queue_id == queue_id)
5753                 return rule->location;
5754
5755         tmp_queue_id = rule->queue_id;
5756         rule->queue_id = queue_id;
5757         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5758         if (ret) {
5759                 rule->queue_id = tmp_queue_id;
5760                 return ret;
5761         }
5762
5763         return rule->location;
5764 }
5765
5766 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5767 {
5768 #ifdef CONFIG_RFS_ACCEL
5769         struct hnae3_handle *handle = &hdev->vport[0].nic;
5770         struct hclge_fd_rule *rule;
5771         struct hlist_node *node;
5772         HLIST_HEAD(del_list);
5773
5774         spin_lock_bh(&hdev->fd_rule_lock);
5775         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5776                 spin_unlock_bh(&hdev->fd_rule_lock);
5777                 return;
5778         }
5779         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5780                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5781                                         rule->flow_id, rule->location)) {
5782                         hlist_del_init(&rule->rule_node);
5783                         hlist_add_head(&rule->rule_node, &del_list);
5784                         hdev->hclge_fd_rule_num--;
5785                         clear_bit(rule->location, hdev->fd_bmap);
5786                 }
5787         }
5788         spin_unlock_bh(&hdev->fd_rule_lock);
5789
5790         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5791                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5792                                      rule->location, NULL, false);
5793                 kfree(rule);
5794         }
5795 #endif
5796 }
5797
5798 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5799 {
5800 #ifdef CONFIG_RFS_ACCEL
5801         struct hclge_vport *vport = hclge_get_vport(handle);
5802         struct hclge_dev *hdev = vport->back;
5803
5804         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5805                 hclge_del_all_fd_entries(handle, true);
5806 #endif
5807 }
5808
5809 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5810 {
5811         struct hclge_vport *vport = hclge_get_vport(handle);
5812         struct hclge_dev *hdev = vport->back;
5813
5814         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5815                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5816 }
5817
5818 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5819 {
5820         struct hclge_vport *vport = hclge_get_vport(handle);
5821         struct hclge_dev *hdev = vport->back;
5822
5823         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5824 }
5825
5826 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5827 {
5828         struct hclge_vport *vport = hclge_get_vport(handle);
5829         struct hclge_dev *hdev = vport->back;
5830
5831         return hdev->rst_stats.hw_reset_done_cnt;
5832 }
5833
5834 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5835 {
5836         struct hclge_vport *vport = hclge_get_vport(handle);
5837         struct hclge_dev *hdev = vport->back;
5838         bool clear;
5839
5840         hdev->fd_en = enable;
5841         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5842         if (!enable)
5843                 hclge_del_all_fd_entries(handle, clear);
5844         else
5845                 hclge_restore_fd_entries(handle);
5846 }
5847
5848 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5849 {
5850         struct hclge_desc desc;
5851         struct hclge_config_mac_mode_cmd *req =
5852                 (struct hclge_config_mac_mode_cmd *)desc.data;
5853         u32 loop_en = 0;
5854         int ret;
5855
5856         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5857         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5858         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5859         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5860         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5861         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5862         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5863         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5864         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5865         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5866         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5867         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5868         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5869         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5870         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5871         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5872
5873         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5874         if (ret)
5875                 dev_err(&hdev->pdev->dev,
5876                         "mac enable fail, ret =%d.\n", ret);
5877 }
5878
5879 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5880 {
5881         struct hclge_config_mac_mode_cmd *req;
5882         struct hclge_desc desc;
5883         u32 loop_en;
5884         int ret;
5885
5886         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5887         /* 1 Read out the MAC mode config at first */
5888         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5889         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5890         if (ret) {
5891                 dev_err(&hdev->pdev->dev,
5892                         "mac loopback get fail, ret =%d.\n", ret);
5893                 return ret;
5894         }
5895
5896         /* 2 Then setup the loopback flag */
5897         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5898         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5899         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5900         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5901
5902         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5903
5904         /* 3 Config mac work mode with loopback flag
5905          * and its original configure parameters
5906          */
5907         hclge_cmd_reuse_desc(&desc, false);
5908         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5909         if (ret)
5910                 dev_err(&hdev->pdev->dev,
5911                         "mac loopback set fail, ret =%d.\n", ret);
5912         return ret;
5913 }
5914
5915 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5916                                      enum hnae3_loop loop_mode)
5917 {
5918 #define HCLGE_SERDES_RETRY_MS   10
5919 #define HCLGE_SERDES_RETRY_NUM  100
5920
5921 #define HCLGE_MAC_LINK_STATUS_MS   10
5922 #define HCLGE_MAC_LINK_STATUS_NUM  100
5923 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5924 #define HCLGE_MAC_LINK_STATUS_UP   1
5925
5926         struct hclge_serdes_lb_cmd *req;
5927         struct hclge_desc desc;
5928         int mac_link_ret = 0;
5929         int ret, i = 0;
5930         u8 loop_mode_b;
5931
5932         req = (struct hclge_serdes_lb_cmd *)desc.data;
5933         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5934
5935         switch (loop_mode) {
5936         case HNAE3_LOOP_SERIAL_SERDES:
5937                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5938                 break;
5939         case HNAE3_LOOP_PARALLEL_SERDES:
5940                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5941                 break;
5942         default:
5943                 dev_err(&hdev->pdev->dev,
5944                         "unsupported serdes loopback mode %d\n", loop_mode);
5945                 return -ENOTSUPP;
5946         }
5947
5948         if (en) {
5949                 req->enable = loop_mode_b;
5950                 req->mask = loop_mode_b;
5951                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5952         } else {
5953                 req->mask = loop_mode_b;
5954                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5955         }
5956
5957         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5958         if (ret) {
5959                 dev_err(&hdev->pdev->dev,
5960                         "serdes loopback set fail, ret = %d\n", ret);
5961                 return ret;
5962         }
5963
5964         do {
5965                 msleep(HCLGE_SERDES_RETRY_MS);
5966                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5967                                            true);
5968                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5969                 if (ret) {
5970                         dev_err(&hdev->pdev->dev,
5971                                 "serdes loopback get, ret = %d\n", ret);
5972                         return ret;
5973                 }
5974         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5975                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5976
5977         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5978                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5979                 return -EBUSY;
5980         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5981                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5982                 return -EIO;
5983         }
5984
5985         hclge_cfg_mac_mode(hdev, en);
5986
5987         i = 0;
5988         do {
5989                 /* serdes Internal loopback, independent of the network cable.*/
5990                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5991                 ret = hclge_get_mac_link_status(hdev);
5992                 if (ret == mac_link_ret)
5993                         return 0;
5994         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5995
5996         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5997
5998         return -EBUSY;
5999 }
6000
6001 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6002                             int stream_id, bool enable)
6003 {
6004         struct hclge_desc desc;
6005         struct hclge_cfg_com_tqp_queue_cmd *req =
6006                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6007         int ret;
6008
6009         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6010         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6011         req->stream_id = cpu_to_le16(stream_id);
6012         req->enable |= enable << HCLGE_TQP_ENABLE_B;
6013
6014         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6015         if (ret)
6016                 dev_err(&hdev->pdev->dev,
6017                         "Tqp enable fail, status =%d.\n", ret);
6018         return ret;
6019 }
6020
6021 static int hclge_set_loopback(struct hnae3_handle *handle,
6022                               enum hnae3_loop loop_mode, bool en)
6023 {
6024         struct hclge_vport *vport = hclge_get_vport(handle);
6025         struct hnae3_knic_private_info *kinfo;
6026         struct hclge_dev *hdev = vport->back;
6027         int i, ret;
6028
6029         switch (loop_mode) {
6030         case HNAE3_LOOP_APP:
6031                 ret = hclge_set_app_loopback(hdev, en);
6032                 break;
6033         case HNAE3_LOOP_SERIAL_SERDES:
6034         case HNAE3_LOOP_PARALLEL_SERDES:
6035                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6036                 break;
6037         default:
6038                 ret = -ENOTSUPP;
6039                 dev_err(&hdev->pdev->dev,
6040                         "loop_mode %d is not supported\n", loop_mode);
6041                 break;
6042         }
6043
6044         if (ret)
6045                 return ret;
6046
6047         kinfo = &vport->nic.kinfo;
6048         for (i = 0; i < kinfo->num_tqps; i++) {
6049                 ret = hclge_tqp_enable(hdev, i, 0, en);
6050                 if (ret)
6051                         return ret;
6052         }
6053
6054         return 0;
6055 }
6056
6057 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6058 {
6059         struct hclge_vport *vport = hclge_get_vport(handle);
6060         struct hnae3_knic_private_info *kinfo;
6061         struct hnae3_queue *queue;
6062         struct hclge_tqp *tqp;
6063         int i;
6064
6065         kinfo = &vport->nic.kinfo;
6066         for (i = 0; i < kinfo->num_tqps; i++) {
6067                 queue = handle->kinfo.tqp[i];
6068                 tqp = container_of(queue, struct hclge_tqp, q);
6069                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6070         }
6071 }
6072
6073 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6074 {
6075         struct hclge_vport *vport = hclge_get_vport(handle);
6076         struct hclge_dev *hdev = vport->back;
6077
6078         if (enable) {
6079                 mod_timer(&hdev->service_timer, jiffies + HZ);
6080         } else {
6081                 del_timer_sync(&hdev->service_timer);
6082                 cancel_work_sync(&hdev->service_task);
6083                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6084         }
6085 }
6086
6087 static int hclge_ae_start(struct hnae3_handle *handle)
6088 {
6089         struct hclge_vport *vport = hclge_get_vport(handle);
6090         struct hclge_dev *hdev = vport->back;
6091
6092         /* mac enable */
6093         hclge_cfg_mac_mode(hdev, true);
6094         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6095         hdev->hw.mac.link = 0;
6096
6097         /* reset tqp stats */
6098         hclge_reset_tqp_stats(handle);
6099
6100         hclge_mac_start_phy(hdev);
6101
6102         return 0;
6103 }
6104
6105 static void hclge_ae_stop(struct hnae3_handle *handle)
6106 {
6107         struct hclge_vport *vport = hclge_get_vport(handle);
6108         struct hclge_dev *hdev = vport->back;
6109         int i;
6110
6111         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6112
6113         hclge_clear_arfs_rules(handle);
6114
6115         /* If it is not PF reset, the firmware will disable the MAC,
6116          * so it only need to stop phy here.
6117          */
6118         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6119             hdev->reset_type != HNAE3_FUNC_RESET) {
6120                 hclge_mac_stop_phy(hdev);
6121                 return;
6122         }
6123
6124         for (i = 0; i < handle->kinfo.num_tqps; i++)
6125                 hclge_reset_tqp(handle, i);
6126
6127         /* Mac disable */
6128         hclge_cfg_mac_mode(hdev, false);
6129
6130         hclge_mac_stop_phy(hdev);
6131
6132         /* reset tqp stats */
6133         hclge_reset_tqp_stats(handle);
6134         hclge_update_link_status(hdev);
6135 }
6136
6137 int hclge_vport_start(struct hclge_vport *vport)
6138 {
6139         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6140         vport->last_active_jiffies = jiffies;
6141         return 0;
6142 }
6143
6144 void hclge_vport_stop(struct hclge_vport *vport)
6145 {
6146         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6147 }
6148
6149 static int hclge_client_start(struct hnae3_handle *handle)
6150 {
6151         struct hclge_vport *vport = hclge_get_vport(handle);
6152
6153         return hclge_vport_start(vport);
6154 }
6155
6156 static void hclge_client_stop(struct hnae3_handle *handle)
6157 {
6158         struct hclge_vport *vport = hclge_get_vport(handle);
6159
6160         hclge_vport_stop(vport);
6161 }
6162
6163 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6164                                          u16 cmdq_resp, u8  resp_code,
6165                                          enum hclge_mac_vlan_tbl_opcode op)
6166 {
6167         struct hclge_dev *hdev = vport->back;
6168         int return_status = -EIO;
6169
6170         if (cmdq_resp) {
6171                 dev_err(&hdev->pdev->dev,
6172                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6173                         cmdq_resp);
6174                 return -EIO;
6175         }
6176
6177         if (op == HCLGE_MAC_VLAN_ADD) {
6178                 if ((!resp_code) || (resp_code == 1)) {
6179                         return_status = 0;
6180                 } else if (resp_code == 2) {
6181                         return_status = -ENOSPC;
6182                         dev_err(&hdev->pdev->dev,
6183                                 "add mac addr failed for uc_overflow.\n");
6184                 } else if (resp_code == 3) {
6185                         return_status = -ENOSPC;
6186                         dev_err(&hdev->pdev->dev,
6187                                 "add mac addr failed for mc_overflow.\n");
6188                 } else {
6189                         dev_err(&hdev->pdev->dev,
6190                                 "add mac addr failed for undefined, code=%d.\n",
6191                                 resp_code);
6192                 }
6193         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6194                 if (!resp_code) {
6195                         return_status = 0;
6196                 } else if (resp_code == 1) {
6197                         return_status = -ENOENT;
6198                         dev_dbg(&hdev->pdev->dev,
6199                                 "remove mac addr failed for miss.\n");
6200                 } else {
6201                         dev_err(&hdev->pdev->dev,
6202                                 "remove mac addr failed for undefined, code=%d.\n",
6203                                 resp_code);
6204                 }
6205         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6206                 if (!resp_code) {
6207                         return_status = 0;
6208                 } else if (resp_code == 1) {
6209                         return_status = -ENOENT;
6210                         dev_dbg(&hdev->pdev->dev,
6211                                 "lookup mac addr failed for miss.\n");
6212                 } else {
6213                         dev_err(&hdev->pdev->dev,
6214                                 "lookup mac addr failed for undefined, code=%d.\n",
6215                                 resp_code);
6216                 }
6217         } else {
6218                 return_status = -EINVAL;
6219                 dev_err(&hdev->pdev->dev,
6220                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6221                         op);
6222         }
6223
6224         return return_status;
6225 }
6226
6227 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6228 {
6229         int word_num;
6230         int bit_num;
6231
6232         if (vfid > 255 || vfid < 0)
6233                 return -EIO;
6234
6235         if (vfid >= 0 && vfid <= 191) {
6236                 word_num = vfid / 32;
6237                 bit_num  = vfid % 32;
6238                 if (clr)
6239                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6240                 else
6241                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6242         } else {
6243                 word_num = (vfid - 192) / 32;
6244                 bit_num  = vfid % 32;
6245                 if (clr)
6246                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6247                 else
6248                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6249         }
6250
6251         return 0;
6252 }
6253
6254 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6255 {
6256 #define HCLGE_DESC_NUMBER 3
6257 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6258         int i, j;
6259
6260         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6261                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6262                         if (desc[i].data[j])
6263                                 return false;
6264
6265         return true;
6266 }
6267
6268 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6269                                    const u8 *addr, bool is_mc)
6270 {
6271         const unsigned char *mac_addr = addr;
6272         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6273                        (mac_addr[0]) | (mac_addr[1] << 8);
6274         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6275
6276         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6277         if (is_mc) {
6278                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6279                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6280         }
6281
6282         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6283         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6284 }
6285
6286 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6287                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6288 {
6289         struct hclge_dev *hdev = vport->back;
6290         struct hclge_desc desc;
6291         u8 resp_code;
6292         u16 retval;
6293         int ret;
6294
6295         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6296
6297         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6298
6299         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6300         if (ret) {
6301                 dev_err(&hdev->pdev->dev,
6302                         "del mac addr failed for cmd_send, ret =%d.\n",
6303                         ret);
6304                 return ret;
6305         }
6306         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6307         retval = le16_to_cpu(desc.retval);
6308
6309         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6310                                              HCLGE_MAC_VLAN_REMOVE);
6311 }
6312
6313 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6314                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6315                                      struct hclge_desc *desc,
6316                                      bool is_mc)
6317 {
6318         struct hclge_dev *hdev = vport->back;
6319         u8 resp_code;
6320         u16 retval;
6321         int ret;
6322
6323         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6324         if (is_mc) {
6325                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6326                 memcpy(desc[0].data,
6327                        req,
6328                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6329                 hclge_cmd_setup_basic_desc(&desc[1],
6330                                            HCLGE_OPC_MAC_VLAN_ADD,
6331                                            true);
6332                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6333                 hclge_cmd_setup_basic_desc(&desc[2],
6334                                            HCLGE_OPC_MAC_VLAN_ADD,
6335                                            true);
6336                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6337         } else {
6338                 memcpy(desc[0].data,
6339                        req,
6340                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6341                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6342         }
6343         if (ret) {
6344                 dev_err(&hdev->pdev->dev,
6345                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6346                         ret);
6347                 return ret;
6348         }
6349         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6350         retval = le16_to_cpu(desc[0].retval);
6351
6352         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6353                                              HCLGE_MAC_VLAN_LKUP);
6354 }
6355
6356 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6357                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6358                                   struct hclge_desc *mc_desc)
6359 {
6360         struct hclge_dev *hdev = vport->back;
6361         int cfg_status;
6362         u8 resp_code;
6363         u16 retval;
6364         int ret;
6365
6366         if (!mc_desc) {
6367                 struct hclge_desc desc;
6368
6369                 hclge_cmd_setup_basic_desc(&desc,
6370                                            HCLGE_OPC_MAC_VLAN_ADD,
6371                                            false);
6372                 memcpy(desc.data, req,
6373                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6374                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6375                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6376                 retval = le16_to_cpu(desc.retval);
6377
6378                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6379                                                            resp_code,
6380                                                            HCLGE_MAC_VLAN_ADD);
6381         } else {
6382                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6383                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6384                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6385                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6386                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6387                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6388                 memcpy(mc_desc[0].data, req,
6389                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6390                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6391                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6392                 retval = le16_to_cpu(mc_desc[0].retval);
6393
6394                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6395                                                            resp_code,
6396                                                            HCLGE_MAC_VLAN_ADD);
6397         }
6398
6399         if (ret) {
6400                 dev_err(&hdev->pdev->dev,
6401                         "add mac addr failed for cmd_send, ret =%d.\n",
6402                         ret);
6403                 return ret;
6404         }
6405
6406         return cfg_status;
6407 }
6408
6409 static int hclge_init_umv_space(struct hclge_dev *hdev)
6410 {
6411         u16 allocated_size = 0;
6412         int ret;
6413
6414         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6415                                   true);
6416         if (ret)
6417                 return ret;
6418
6419         if (allocated_size < hdev->wanted_umv_size)
6420                 dev_warn(&hdev->pdev->dev,
6421                          "Alloc umv space failed, want %d, get %d\n",
6422                          hdev->wanted_umv_size, allocated_size);
6423
6424         mutex_init(&hdev->umv_mutex);
6425         hdev->max_umv_size = allocated_size;
6426         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6427         hdev->share_umv_size = hdev->priv_umv_size +
6428                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6429
6430         return 0;
6431 }
6432
6433 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6434 {
6435         int ret;
6436
6437         if (hdev->max_umv_size > 0) {
6438                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6439                                           false);
6440                 if (ret)
6441                         return ret;
6442                 hdev->max_umv_size = 0;
6443         }
6444         mutex_destroy(&hdev->umv_mutex);
6445
6446         return 0;
6447 }
6448
6449 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6450                                u16 *allocated_size, bool is_alloc)
6451 {
6452         struct hclge_umv_spc_alc_cmd *req;
6453         struct hclge_desc desc;
6454         int ret;
6455
6456         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6457         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6458         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6459         req->space_size = cpu_to_le32(space_size);
6460
6461         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6462         if (ret) {
6463                 dev_err(&hdev->pdev->dev,
6464                         "%s umv space failed for cmd_send, ret =%d\n",
6465                         is_alloc ? "allocate" : "free", ret);
6466                 return ret;
6467         }
6468
6469         if (is_alloc && allocated_size)
6470                 *allocated_size = le32_to_cpu(desc.data[1]);
6471
6472         return 0;
6473 }
6474
6475 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6476 {
6477         struct hclge_vport *vport;
6478         int i;
6479
6480         for (i = 0; i < hdev->num_alloc_vport; i++) {
6481                 vport = &hdev->vport[i];
6482                 vport->used_umv_num = 0;
6483         }
6484
6485         mutex_lock(&hdev->umv_mutex);
6486         hdev->share_umv_size = hdev->priv_umv_size +
6487                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6488         mutex_unlock(&hdev->umv_mutex);
6489 }
6490
6491 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6492 {
6493         struct hclge_dev *hdev = vport->back;
6494         bool is_full;
6495
6496         mutex_lock(&hdev->umv_mutex);
6497         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6498                    hdev->share_umv_size == 0);
6499         mutex_unlock(&hdev->umv_mutex);
6500
6501         return is_full;
6502 }
6503
6504 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6505 {
6506         struct hclge_dev *hdev = vport->back;
6507
6508         mutex_lock(&hdev->umv_mutex);
6509         if (is_free) {
6510                 if (vport->used_umv_num > hdev->priv_umv_size)
6511                         hdev->share_umv_size++;
6512
6513                 if (vport->used_umv_num > 0)
6514                         vport->used_umv_num--;
6515         } else {
6516                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6517                     hdev->share_umv_size > 0)
6518                         hdev->share_umv_size--;
6519                 vport->used_umv_num++;
6520         }
6521         mutex_unlock(&hdev->umv_mutex);
6522 }
6523
6524 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6525                              const unsigned char *addr)
6526 {
6527         struct hclge_vport *vport = hclge_get_vport(handle);
6528
6529         return hclge_add_uc_addr_common(vport, addr);
6530 }
6531
6532 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6533                              const unsigned char *addr)
6534 {
6535         struct hclge_dev *hdev = vport->back;
6536         struct hclge_mac_vlan_tbl_entry_cmd req;
6537         struct hclge_desc desc;
6538         u16 egress_port = 0;
6539         int ret;
6540
6541         /* mac addr check */
6542         if (is_zero_ether_addr(addr) ||
6543             is_broadcast_ether_addr(addr) ||
6544             is_multicast_ether_addr(addr)) {
6545                 dev_err(&hdev->pdev->dev,
6546                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6547                          addr,
6548                          is_zero_ether_addr(addr),
6549                          is_broadcast_ether_addr(addr),
6550                          is_multicast_ether_addr(addr));
6551                 return -EINVAL;
6552         }
6553
6554         memset(&req, 0, sizeof(req));
6555
6556         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6557                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6558
6559         req.egress_port = cpu_to_le16(egress_port);
6560
6561         hclge_prepare_mac_addr(&req, addr, false);
6562
6563         /* Lookup the mac address in the mac_vlan table, and add
6564          * it if the entry is inexistent. Repeated unicast entry
6565          * is not allowed in the mac vlan table.
6566          */
6567         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6568         if (ret == -ENOENT) {
6569                 if (!hclge_is_umv_space_full(vport)) {
6570                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6571                         if (!ret)
6572                                 hclge_update_umv_space(vport, false);
6573                         return ret;
6574                 }
6575
6576                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6577                         hdev->priv_umv_size);
6578
6579                 return -ENOSPC;
6580         }
6581
6582         /* check if we just hit the duplicate */
6583         if (!ret) {
6584                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6585                          vport->vport_id, addr);
6586                 return 0;
6587         }
6588
6589         dev_err(&hdev->pdev->dev,
6590                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6591                 addr);
6592
6593         return ret;
6594 }
6595
6596 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6597                             const unsigned char *addr)
6598 {
6599         struct hclge_vport *vport = hclge_get_vport(handle);
6600
6601         return hclge_rm_uc_addr_common(vport, addr);
6602 }
6603
6604 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6605                             const unsigned char *addr)
6606 {
6607         struct hclge_dev *hdev = vport->back;
6608         struct hclge_mac_vlan_tbl_entry_cmd req;
6609         int ret;
6610
6611         /* mac addr check */
6612         if (is_zero_ether_addr(addr) ||
6613             is_broadcast_ether_addr(addr) ||
6614             is_multicast_ether_addr(addr)) {
6615                 dev_dbg(&hdev->pdev->dev,
6616                         "Remove mac err! invalid mac:%pM.\n",
6617                          addr);
6618                 return -EINVAL;
6619         }
6620
6621         memset(&req, 0, sizeof(req));
6622         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6623         hclge_prepare_mac_addr(&req, addr, false);
6624         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6625         if (!ret)
6626                 hclge_update_umv_space(vport, true);
6627
6628         return ret;
6629 }
6630
6631 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6632                              const unsigned char *addr)
6633 {
6634         struct hclge_vport *vport = hclge_get_vport(handle);
6635
6636         return hclge_add_mc_addr_common(vport, addr);
6637 }
6638
6639 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6640                              const unsigned char *addr)
6641 {
6642         struct hclge_dev *hdev = vport->back;
6643         struct hclge_mac_vlan_tbl_entry_cmd req;
6644         struct hclge_desc desc[3];
6645         int status;
6646
6647         /* mac addr check */
6648         if (!is_multicast_ether_addr(addr)) {
6649                 dev_err(&hdev->pdev->dev,
6650                         "Add mc mac err! invalid mac:%pM.\n",
6651                          addr);
6652                 return -EINVAL;
6653         }
6654         memset(&req, 0, sizeof(req));
6655         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6656         hclge_prepare_mac_addr(&req, addr, true);
6657         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6658         if (!status) {
6659                 /* This mac addr exist, update VFID for it */
6660                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6661                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6662         } else {
6663                 /* This mac addr do not exist, add new entry for it */
6664                 memset(desc[0].data, 0, sizeof(desc[0].data));
6665                 memset(desc[1].data, 0, sizeof(desc[0].data));
6666                 memset(desc[2].data, 0, sizeof(desc[0].data));
6667                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6668                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6669         }
6670
6671         if (status == -ENOSPC)
6672                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6673
6674         return status;
6675 }
6676
6677 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6678                             const unsigned char *addr)
6679 {
6680         struct hclge_vport *vport = hclge_get_vport(handle);
6681
6682         return hclge_rm_mc_addr_common(vport, addr);
6683 }
6684
6685 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6686                             const unsigned char *addr)
6687 {
6688         struct hclge_dev *hdev = vport->back;
6689         struct hclge_mac_vlan_tbl_entry_cmd req;
6690         enum hclge_cmd_status status;
6691         struct hclge_desc desc[3];
6692
6693         /* mac addr check */
6694         if (!is_multicast_ether_addr(addr)) {
6695                 dev_dbg(&hdev->pdev->dev,
6696                         "Remove mc mac err! invalid mac:%pM.\n",
6697                          addr);
6698                 return -EINVAL;
6699         }
6700
6701         memset(&req, 0, sizeof(req));
6702         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6703         hclge_prepare_mac_addr(&req, addr, true);
6704         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6705         if (!status) {
6706                 /* This mac addr exist, remove this handle's VFID for it */
6707                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6708
6709                 if (hclge_is_all_function_id_zero(desc))
6710                         /* All the vfid is zero, so need to delete this entry */
6711                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6712                 else
6713                         /* Not all the vfid is zero, update the vfid */
6714                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6715
6716         } else {
6717                 /* Maybe this mac address is in mta table, but it cannot be
6718                  * deleted here because an entry of mta represents an address
6719                  * range rather than a specific address. the delete action to
6720                  * all entries will take effect in update_mta_status called by
6721                  * hns3_nic_set_rx_mode.
6722                  */
6723                 status = 0;
6724         }
6725
6726         return status;
6727 }
6728
6729 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6730                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6731 {
6732         struct hclge_vport_mac_addr_cfg *mac_cfg;
6733         struct list_head *list;
6734
6735         if (!vport->vport_id)
6736                 return;
6737
6738         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6739         if (!mac_cfg)
6740                 return;
6741
6742         mac_cfg->hd_tbl_status = true;
6743         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6744
6745         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6746                &vport->uc_mac_list : &vport->mc_mac_list;
6747
6748         list_add_tail(&mac_cfg->node, list);
6749 }
6750
6751 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6752                               bool is_write_tbl,
6753                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6754 {
6755         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6756         struct list_head *list;
6757         bool uc_flag, mc_flag;
6758
6759         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6760                &vport->uc_mac_list : &vport->mc_mac_list;
6761
6762         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6763         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6764
6765         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6766                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6767                         if (uc_flag && mac_cfg->hd_tbl_status)
6768                                 hclge_rm_uc_addr_common(vport, mac_addr);
6769
6770                         if (mc_flag && mac_cfg->hd_tbl_status)
6771                                 hclge_rm_mc_addr_common(vport, mac_addr);
6772
6773                         list_del(&mac_cfg->node);
6774                         kfree(mac_cfg);
6775                         break;
6776                 }
6777         }
6778 }
6779
6780 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6781                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6782 {
6783         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6784         struct list_head *list;
6785
6786         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6787                &vport->uc_mac_list : &vport->mc_mac_list;
6788
6789         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6790                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6791                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6792
6793                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6794                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6795
6796                 mac_cfg->hd_tbl_status = false;
6797                 if (is_del_list) {
6798                         list_del(&mac_cfg->node);
6799                         kfree(mac_cfg);
6800                 }
6801         }
6802 }
6803
6804 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6805 {
6806         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6807         struct hclge_vport *vport;
6808         int i;
6809
6810         mutex_lock(&hdev->vport_cfg_mutex);
6811         for (i = 0; i < hdev->num_alloc_vport; i++) {
6812                 vport = &hdev->vport[i];
6813                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6814                         list_del(&mac->node);
6815                         kfree(mac);
6816                 }
6817
6818                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6819                         list_del(&mac->node);
6820                         kfree(mac);
6821                 }
6822         }
6823         mutex_unlock(&hdev->vport_cfg_mutex);
6824 }
6825
6826 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6827                                               u16 cmdq_resp, u8 resp_code)
6828 {
6829 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6830 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6831 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6832 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6833
6834         int return_status;
6835
6836         if (cmdq_resp) {
6837                 dev_err(&hdev->pdev->dev,
6838                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6839                         cmdq_resp);
6840                 return -EIO;
6841         }
6842
6843         switch (resp_code) {
6844         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6845         case HCLGE_ETHERTYPE_ALREADY_ADD:
6846                 return_status = 0;
6847                 break;
6848         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6849                 dev_err(&hdev->pdev->dev,
6850                         "add mac ethertype failed for manager table overflow.\n");
6851                 return_status = -EIO;
6852                 break;
6853         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6854                 dev_err(&hdev->pdev->dev,
6855                         "add mac ethertype failed for key conflict.\n");
6856                 return_status = -EIO;
6857                 break;
6858         default:
6859                 dev_err(&hdev->pdev->dev,
6860                         "add mac ethertype failed for undefined, code=%d.\n",
6861                         resp_code);
6862                 return_status = -EIO;
6863         }
6864
6865         return return_status;
6866 }
6867
6868 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6869                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6870 {
6871         struct hclge_desc desc;
6872         u8 resp_code;
6873         u16 retval;
6874         int ret;
6875
6876         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6877         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6878
6879         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6880         if (ret) {
6881                 dev_err(&hdev->pdev->dev,
6882                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6883                         ret);
6884                 return ret;
6885         }
6886
6887         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6888         retval = le16_to_cpu(desc.retval);
6889
6890         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6891 }
6892
6893 static int init_mgr_tbl(struct hclge_dev *hdev)
6894 {
6895         int ret;
6896         int i;
6897
6898         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6899                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6900                 if (ret) {
6901                         dev_err(&hdev->pdev->dev,
6902                                 "add mac ethertype failed, ret =%d.\n",
6903                                 ret);
6904                         return ret;
6905                 }
6906         }
6907
6908         return 0;
6909 }
6910
6911 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6912 {
6913         struct hclge_vport *vport = hclge_get_vport(handle);
6914         struct hclge_dev *hdev = vport->back;
6915
6916         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6917 }
6918
6919 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6920                               bool is_first)
6921 {
6922         const unsigned char *new_addr = (const unsigned char *)p;
6923         struct hclge_vport *vport = hclge_get_vport(handle);
6924         struct hclge_dev *hdev = vport->back;
6925         int ret;
6926
6927         /* mac addr check */
6928         if (is_zero_ether_addr(new_addr) ||
6929             is_broadcast_ether_addr(new_addr) ||
6930             is_multicast_ether_addr(new_addr)) {
6931                 dev_err(&hdev->pdev->dev,
6932                         "Change uc mac err! invalid mac:%p.\n",
6933                          new_addr);
6934                 return -EINVAL;
6935         }
6936
6937         if ((!is_first || is_kdump_kernel()) &&
6938             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6939                 dev_warn(&hdev->pdev->dev,
6940                          "remove old uc mac address fail.\n");
6941
6942         ret = hclge_add_uc_addr(handle, new_addr);
6943         if (ret) {
6944                 dev_err(&hdev->pdev->dev,
6945                         "add uc mac address fail, ret =%d.\n",
6946                         ret);
6947
6948                 if (!is_first &&
6949                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6950                         dev_err(&hdev->pdev->dev,
6951                                 "restore uc mac address fail.\n");
6952
6953                 return -EIO;
6954         }
6955
6956         ret = hclge_pause_addr_cfg(hdev, new_addr);
6957         if (ret) {
6958                 dev_err(&hdev->pdev->dev,
6959                         "configure mac pause address fail, ret =%d.\n",
6960                         ret);
6961                 return -EIO;
6962         }
6963
6964         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6965
6966         return 0;
6967 }
6968
6969 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6970                           int cmd)
6971 {
6972         struct hclge_vport *vport = hclge_get_vport(handle);
6973         struct hclge_dev *hdev = vport->back;
6974
6975         if (!hdev->hw.mac.phydev)
6976                 return -EOPNOTSUPP;
6977
6978         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6979 }
6980
6981 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6982                                       u8 fe_type, bool filter_en, u8 vf_id)
6983 {
6984         struct hclge_vlan_filter_ctrl_cmd *req;
6985         struct hclge_desc desc;
6986         int ret;
6987
6988         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6989
6990         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6991         req->vlan_type = vlan_type;
6992         req->vlan_fe = filter_en ? fe_type : 0;
6993         req->vf_id = vf_id;
6994
6995         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6996         if (ret)
6997                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6998                         ret);
6999
7000         return ret;
7001 }
7002
7003 #define HCLGE_FILTER_TYPE_VF            0
7004 #define HCLGE_FILTER_TYPE_PORT          1
7005 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7006 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7007 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7008 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7009 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7010 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7011                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7012 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7013                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7014
7015 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7016 {
7017         struct hclge_vport *vport = hclge_get_vport(handle);
7018         struct hclge_dev *hdev = vport->back;
7019
7020         if (hdev->pdev->revision >= 0x21) {
7021                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7022                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7023                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7024                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7025         } else {
7026                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7027                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7028                                            0);
7029         }
7030         if (enable)
7031                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7032         else
7033                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7034 }
7035
7036 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7037                                     bool is_kill, u16 vlan, u8 qos,
7038                                     __be16 proto)
7039 {
7040 #define HCLGE_MAX_VF_BYTES  16
7041         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7042         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7043         struct hclge_desc desc[2];
7044         u8 vf_byte_val;
7045         u8 vf_byte_off;
7046         int ret;
7047
7048         hclge_cmd_setup_basic_desc(&desc[0],
7049                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7050         hclge_cmd_setup_basic_desc(&desc[1],
7051                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7052
7053         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7054
7055         vf_byte_off = vfid / 8;
7056         vf_byte_val = 1 << (vfid % 8);
7057
7058         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7059         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7060
7061         req0->vlan_id  = cpu_to_le16(vlan);
7062         req0->vlan_cfg = is_kill;
7063
7064         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7065                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7066         else
7067                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7068
7069         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7070         if (ret) {
7071                 dev_err(&hdev->pdev->dev,
7072                         "Send vf vlan command fail, ret =%d.\n",
7073                         ret);
7074                 return ret;
7075         }
7076
7077         if (!is_kill) {
7078 #define HCLGE_VF_VLAN_NO_ENTRY  2
7079                 if (!req0->resp_code || req0->resp_code == 1)
7080                         return 0;
7081
7082                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7083                         dev_warn(&hdev->pdev->dev,
7084                                  "vf vlan table is full, vf vlan filter is disabled\n");
7085                         return 0;
7086                 }
7087
7088                 dev_err(&hdev->pdev->dev,
7089                         "Add vf vlan filter fail, ret =%d.\n",
7090                         req0->resp_code);
7091         } else {
7092 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7093                 if (!req0->resp_code)
7094                         return 0;
7095
7096                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7097                         dev_warn(&hdev->pdev->dev,
7098                                  "vlan %d filter is not in vf vlan table\n",
7099                                  vlan);
7100                         return 0;
7101                 }
7102
7103                 dev_err(&hdev->pdev->dev,
7104                         "Kill vf vlan filter fail, ret =%d.\n",
7105                         req0->resp_code);
7106         }
7107
7108         return -EIO;
7109 }
7110
7111 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7112                                       u16 vlan_id, bool is_kill)
7113 {
7114         struct hclge_vlan_filter_pf_cfg_cmd *req;
7115         struct hclge_desc desc;
7116         u8 vlan_offset_byte_val;
7117         u8 vlan_offset_byte;
7118         u8 vlan_offset_160;
7119         int ret;
7120
7121         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7122
7123         vlan_offset_160 = vlan_id / 160;
7124         vlan_offset_byte = (vlan_id % 160) / 8;
7125         vlan_offset_byte_val = 1 << (vlan_id % 8);
7126
7127         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7128         req->vlan_offset = vlan_offset_160;
7129         req->vlan_cfg = is_kill;
7130         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7131
7132         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7133         if (ret)
7134                 dev_err(&hdev->pdev->dev,
7135                         "port vlan command, send fail, ret =%d.\n", ret);
7136         return ret;
7137 }
7138
7139 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7140                                     u16 vport_id, u16 vlan_id, u8 qos,
7141                                     bool is_kill)
7142 {
7143         u16 vport_idx, vport_num = 0;
7144         int ret;
7145
7146         if (is_kill && !vlan_id)
7147                 return 0;
7148
7149         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7150                                        0, proto);
7151         if (ret) {
7152                 dev_err(&hdev->pdev->dev,
7153                         "Set %d vport vlan filter config fail, ret =%d.\n",
7154                         vport_id, ret);
7155                 return ret;
7156         }
7157
7158         /* vlan 0 may be added twice when 8021q module is enabled */
7159         if (!is_kill && !vlan_id &&
7160             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7161                 return 0;
7162
7163         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7164                 dev_err(&hdev->pdev->dev,
7165                         "Add port vlan failed, vport %d is already in vlan %d\n",
7166                         vport_id, vlan_id);
7167                 return -EINVAL;
7168         }
7169
7170         if (is_kill &&
7171             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7172                 dev_err(&hdev->pdev->dev,
7173                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7174                         vport_id, vlan_id);
7175                 return -EINVAL;
7176         }
7177
7178         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7179                 vport_num++;
7180
7181         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7182                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7183                                                  is_kill);
7184
7185         return ret;
7186 }
7187
7188 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7189 {
7190         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7191         struct hclge_vport_vtag_tx_cfg_cmd *req;
7192         struct hclge_dev *hdev = vport->back;
7193         struct hclge_desc desc;
7194         int status;
7195
7196         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7197
7198         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7199         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7200         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7201         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7202                       vcfg->accept_tag1 ? 1 : 0);
7203         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7204                       vcfg->accept_untag1 ? 1 : 0);
7205         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7206                       vcfg->accept_tag2 ? 1 : 0);
7207         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7208                       vcfg->accept_untag2 ? 1 : 0);
7209         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7210                       vcfg->insert_tag1_en ? 1 : 0);
7211         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7212                       vcfg->insert_tag2_en ? 1 : 0);
7213         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7214
7215         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7216         req->vf_bitmap[req->vf_offset] =
7217                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7218
7219         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7220         if (status)
7221                 dev_err(&hdev->pdev->dev,
7222                         "Send port txvlan cfg command fail, ret =%d\n",
7223                         status);
7224
7225         return status;
7226 }
7227
7228 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7229 {
7230         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7231         struct hclge_vport_vtag_rx_cfg_cmd *req;
7232         struct hclge_dev *hdev = vport->back;
7233         struct hclge_desc desc;
7234         int status;
7235
7236         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7237
7238         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7239         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7240                       vcfg->strip_tag1_en ? 1 : 0);
7241         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7242                       vcfg->strip_tag2_en ? 1 : 0);
7243         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7244                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7245         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7246                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7247
7248         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7249         req->vf_bitmap[req->vf_offset] =
7250                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7251
7252         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7253         if (status)
7254                 dev_err(&hdev->pdev->dev,
7255                         "Send port rxvlan cfg command fail, ret =%d\n",
7256                         status);
7257
7258         return status;
7259 }
7260
7261 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7262                                   u16 port_base_vlan_state,
7263                                   u16 vlan_tag)
7264 {
7265         int ret;
7266
7267         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7268                 vport->txvlan_cfg.accept_tag1 = true;
7269                 vport->txvlan_cfg.insert_tag1_en = false;
7270                 vport->txvlan_cfg.default_tag1 = 0;
7271         } else {
7272                 vport->txvlan_cfg.accept_tag1 = false;
7273                 vport->txvlan_cfg.insert_tag1_en = true;
7274                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7275         }
7276
7277         vport->txvlan_cfg.accept_untag1 = true;
7278
7279         /* accept_tag2 and accept_untag2 are not supported on
7280          * pdev revision(0x20), new revision support them,
7281          * this two fields can not be configured by user.
7282          */
7283         vport->txvlan_cfg.accept_tag2 = true;
7284         vport->txvlan_cfg.accept_untag2 = true;
7285         vport->txvlan_cfg.insert_tag2_en = false;
7286         vport->txvlan_cfg.default_tag2 = 0;
7287
7288         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7289                 vport->rxvlan_cfg.strip_tag1_en = false;
7290                 vport->rxvlan_cfg.strip_tag2_en =
7291                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7292         } else {
7293                 vport->rxvlan_cfg.strip_tag1_en =
7294                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7295                 vport->rxvlan_cfg.strip_tag2_en = true;
7296         }
7297         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7298         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7299
7300         ret = hclge_set_vlan_tx_offload_cfg(vport);
7301         if (ret)
7302                 return ret;
7303
7304         return hclge_set_vlan_rx_offload_cfg(vport);
7305 }
7306
7307 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7308 {
7309         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7310         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7311         struct hclge_desc desc;
7312         int status;
7313
7314         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7315         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7316         rx_req->ot_fst_vlan_type =
7317                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7318         rx_req->ot_sec_vlan_type =
7319                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7320         rx_req->in_fst_vlan_type =
7321                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7322         rx_req->in_sec_vlan_type =
7323                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7324
7325         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7326         if (status) {
7327                 dev_err(&hdev->pdev->dev,
7328                         "Send rxvlan protocol type command fail, ret =%d\n",
7329                         status);
7330                 return status;
7331         }
7332
7333         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7334
7335         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7336         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7337         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7338
7339         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7340         if (status)
7341                 dev_err(&hdev->pdev->dev,
7342                         "Send txvlan protocol type command fail, ret =%d\n",
7343                         status);
7344
7345         return status;
7346 }
7347
7348 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7349 {
7350 #define HCLGE_DEF_VLAN_TYPE             0x8100
7351
7352         struct hnae3_handle *handle = &hdev->vport[0].nic;
7353         struct hclge_vport *vport;
7354         int ret;
7355         int i;
7356
7357         if (hdev->pdev->revision >= 0x21) {
7358                 /* for revision 0x21, vf vlan filter is per function */
7359                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7360                         vport = &hdev->vport[i];
7361                         ret = hclge_set_vlan_filter_ctrl(hdev,
7362                                                          HCLGE_FILTER_TYPE_VF,
7363                                                          HCLGE_FILTER_FE_EGRESS,
7364                                                          true,
7365                                                          vport->vport_id);
7366                         if (ret)
7367                                 return ret;
7368                 }
7369
7370                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7371                                                  HCLGE_FILTER_FE_INGRESS, true,
7372                                                  0);
7373                 if (ret)
7374                         return ret;
7375         } else {
7376                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7377                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7378                                                  true, 0);
7379                 if (ret)
7380                         return ret;
7381         }
7382
7383         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7384
7385         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7386         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7387         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7388         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7389         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7390         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7391
7392         ret = hclge_set_vlan_protocol_type(hdev);
7393         if (ret)
7394                 return ret;
7395
7396         for (i = 0; i < hdev->num_alloc_vport; i++) {
7397                 u16 vlan_tag;
7398
7399                 vport = &hdev->vport[i];
7400                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7401
7402                 ret = hclge_vlan_offload_cfg(vport,
7403                                              vport->port_base_vlan_cfg.state,
7404                                              vlan_tag);
7405                 if (ret)
7406                         return ret;
7407         }
7408
7409         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7410 }
7411
7412 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7413                                        bool writen_to_tbl)
7414 {
7415         struct hclge_vport_vlan_cfg *vlan;
7416
7417         /* vlan 0 is reserved */
7418         if (!vlan_id)
7419                 return;
7420
7421         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7422         if (!vlan)
7423                 return;
7424
7425         vlan->hd_tbl_status = writen_to_tbl;
7426         vlan->vlan_id = vlan_id;
7427
7428         list_add_tail(&vlan->node, &vport->vlan_list);
7429 }
7430
7431 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7432 {
7433         struct hclge_vport_vlan_cfg *vlan, *tmp;
7434         struct hclge_dev *hdev = vport->back;
7435         int ret;
7436
7437         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7438                 if (!vlan->hd_tbl_status) {
7439                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7440                                                        vport->vport_id,
7441                                                        vlan->vlan_id, 0, false);
7442                         if (ret) {
7443                                 dev_err(&hdev->pdev->dev,
7444                                         "restore vport vlan list failed, ret=%d\n",
7445                                         ret);
7446                                 return ret;
7447                         }
7448                 }
7449                 vlan->hd_tbl_status = true;
7450         }
7451
7452         return 0;
7453 }
7454
7455 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7456                                       bool is_write_tbl)
7457 {
7458         struct hclge_vport_vlan_cfg *vlan, *tmp;
7459         struct hclge_dev *hdev = vport->back;
7460
7461         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7462                 if (vlan->vlan_id == vlan_id) {
7463                         if (is_write_tbl && vlan->hd_tbl_status)
7464                                 hclge_set_vlan_filter_hw(hdev,
7465                                                          htons(ETH_P_8021Q),
7466                                                          vport->vport_id,
7467                                                          vlan_id, 0,
7468                                                          true);
7469
7470                         list_del(&vlan->node);
7471                         kfree(vlan);
7472                         break;
7473                 }
7474         }
7475 }
7476
7477 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7478 {
7479         struct hclge_vport_vlan_cfg *vlan, *tmp;
7480         struct hclge_dev *hdev = vport->back;
7481
7482         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7483                 if (vlan->hd_tbl_status)
7484                         hclge_set_vlan_filter_hw(hdev,
7485                                                  htons(ETH_P_8021Q),
7486                                                  vport->vport_id,
7487                                                  vlan->vlan_id, 0,
7488                                                  true);
7489
7490                 vlan->hd_tbl_status = false;
7491                 if (is_del_list) {
7492                         list_del(&vlan->node);
7493                         kfree(vlan);
7494                 }
7495         }
7496 }
7497
7498 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7499 {
7500         struct hclge_vport_vlan_cfg *vlan, *tmp;
7501         struct hclge_vport *vport;
7502         int i;
7503
7504         mutex_lock(&hdev->vport_cfg_mutex);
7505         for (i = 0; i < hdev->num_alloc_vport; i++) {
7506                 vport = &hdev->vport[i];
7507                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7508                         list_del(&vlan->node);
7509                         kfree(vlan);
7510                 }
7511         }
7512         mutex_unlock(&hdev->vport_cfg_mutex);
7513 }
7514
7515 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7516 {
7517         struct hclge_vport *vport = hclge_get_vport(handle);
7518
7519         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7520                 vport->rxvlan_cfg.strip_tag1_en = false;
7521                 vport->rxvlan_cfg.strip_tag2_en = enable;
7522         } else {
7523                 vport->rxvlan_cfg.strip_tag1_en = enable;
7524                 vport->rxvlan_cfg.strip_tag2_en = true;
7525         }
7526         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7527         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7528         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7529
7530         return hclge_set_vlan_rx_offload_cfg(vport);
7531 }
7532
7533 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7534                                             u16 port_base_vlan_state,
7535                                             struct hclge_vlan_info *new_info,
7536                                             struct hclge_vlan_info *old_info)
7537 {
7538         struct hclge_dev *hdev = vport->back;
7539         int ret;
7540
7541         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7542                 hclge_rm_vport_all_vlan_table(vport, false);
7543                 return hclge_set_vlan_filter_hw(hdev,
7544                                                  htons(new_info->vlan_proto),
7545                                                  vport->vport_id,
7546                                                  new_info->vlan_tag,
7547                                                  new_info->qos, false);
7548         }
7549
7550         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7551                                        vport->vport_id, old_info->vlan_tag,
7552                                        old_info->qos, true);
7553         if (ret)
7554                 return ret;
7555
7556         return hclge_add_vport_all_vlan_table(vport);
7557 }
7558
7559 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7560                                     struct hclge_vlan_info *vlan_info)
7561 {
7562         struct hnae3_handle *nic = &vport->nic;
7563         struct hclge_vlan_info *old_vlan_info;
7564         struct hclge_dev *hdev = vport->back;
7565         int ret;
7566
7567         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7568
7569         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7570         if (ret)
7571                 return ret;
7572
7573         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7574                 /* add new VLAN tag */
7575                 ret = hclge_set_vlan_filter_hw(hdev,
7576                                                htons(vlan_info->vlan_proto),
7577                                                vport->vport_id,
7578                                                vlan_info->vlan_tag,
7579                                                vlan_info->qos, false);
7580                 if (ret)
7581                         return ret;
7582
7583                 /* remove old VLAN tag */
7584                 ret = hclge_set_vlan_filter_hw(hdev,
7585                                                htons(old_vlan_info->vlan_proto),
7586                                                vport->vport_id,
7587                                                old_vlan_info->vlan_tag,
7588                                                old_vlan_info->qos, true);
7589                 if (ret)
7590                         return ret;
7591
7592                 goto update;
7593         }
7594
7595         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7596                                                old_vlan_info);
7597         if (ret)
7598                 return ret;
7599
7600         /* update state only when disable/enable port based VLAN */
7601         vport->port_base_vlan_cfg.state = state;
7602         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7603                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7604         else
7605                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7606
7607 update:
7608         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7609         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7610         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7611
7612         return 0;
7613 }
7614
7615 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7616                                           enum hnae3_port_base_vlan_state state,
7617                                           u16 vlan)
7618 {
7619         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7620                 if (!vlan)
7621                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7622                 else
7623                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7624         } else {
7625                 if (!vlan)
7626                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7627                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7628                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7629                 else
7630                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7631         }
7632 }
7633
7634 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7635                                     u16 vlan, u8 qos, __be16 proto)
7636 {
7637         struct hclge_vport *vport = hclge_get_vport(handle);
7638         struct hclge_dev *hdev = vport->back;
7639         struct hclge_vlan_info vlan_info;
7640         u16 state;
7641         int ret;
7642
7643         if (hdev->pdev->revision == 0x20)
7644                 return -EOPNOTSUPP;
7645
7646         /* qos is a 3 bits value, so can not be bigger than 7 */
7647         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7648                 return -EINVAL;
7649         if (proto != htons(ETH_P_8021Q))
7650                 return -EPROTONOSUPPORT;
7651
7652         vport = &hdev->vport[vfid];
7653         state = hclge_get_port_base_vlan_state(vport,
7654                                                vport->port_base_vlan_cfg.state,
7655                                                vlan);
7656         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7657                 return 0;
7658
7659         vlan_info.vlan_tag = vlan;
7660         vlan_info.qos = qos;
7661         vlan_info.vlan_proto = ntohs(proto);
7662
7663         /* update port based VLAN for PF */
7664         if (!vfid) {
7665                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7666                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7667                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7668
7669                 return ret;
7670         }
7671
7672         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7673                 return hclge_update_port_base_vlan_cfg(vport, state,
7674                                                        &vlan_info);
7675         } else {
7676                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7677                                                         (u8)vfid, state,
7678                                                         vlan, qos,
7679                                                         ntohs(proto));
7680                 return ret;
7681         }
7682 }
7683
7684 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7685                           u16 vlan_id, bool is_kill)
7686 {
7687         struct hclge_vport *vport = hclge_get_vport(handle);
7688         struct hclge_dev *hdev = vport->back;
7689         bool writen_to_tbl = false;
7690         int ret = 0;
7691
7692         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7693          * filter entry. In this case, we don't update VLAN filter table
7694          * when user add new VLAN or remove exist VLAN, just update the vport
7695          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7696          * table until port based VLAN disabled
7697          */
7698         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7699                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7700                                                vlan_id, 0, is_kill);
7701                 writen_to_tbl = true;
7702         }
7703
7704         if (ret)
7705                 return ret;
7706
7707         if (is_kill)
7708                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7709         else
7710                 hclge_add_vport_vlan_table(vport, vlan_id,
7711                                            writen_to_tbl);
7712
7713         return 0;
7714 }
7715
7716 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7717 {
7718         struct hclge_config_max_frm_size_cmd *req;
7719         struct hclge_desc desc;
7720
7721         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7722
7723         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7724         req->max_frm_size = cpu_to_le16(new_mps);
7725         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7726
7727         return hclge_cmd_send(&hdev->hw, &desc, 1);
7728 }
7729
7730 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7731 {
7732         struct hclge_vport *vport = hclge_get_vport(handle);
7733
7734         return hclge_set_vport_mtu(vport, new_mtu);
7735 }
7736
7737 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7738 {
7739         struct hclge_dev *hdev = vport->back;
7740         int i, max_frm_size, ret = 0;
7741
7742         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7743         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7744             max_frm_size > HCLGE_MAC_MAX_FRAME)
7745                 return -EINVAL;
7746
7747         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7748         mutex_lock(&hdev->vport_lock);
7749         /* VF's mps must fit within hdev->mps */
7750         if (vport->vport_id && max_frm_size > hdev->mps) {
7751                 mutex_unlock(&hdev->vport_lock);
7752                 return -EINVAL;
7753         } else if (vport->vport_id) {
7754                 vport->mps = max_frm_size;
7755                 mutex_unlock(&hdev->vport_lock);
7756                 return 0;
7757         }
7758
7759         /* PF's mps must be greater then VF's mps */
7760         for (i = 1; i < hdev->num_alloc_vport; i++)
7761                 if (max_frm_size < hdev->vport[i].mps) {
7762                         mutex_unlock(&hdev->vport_lock);
7763                         return -EINVAL;
7764                 }
7765
7766         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7767
7768         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7769         if (ret) {
7770                 dev_err(&hdev->pdev->dev,
7771                         "Change mtu fail, ret =%d\n", ret);
7772                 goto out;
7773         }
7774
7775         hdev->mps = max_frm_size;
7776         vport->mps = max_frm_size;
7777
7778         ret = hclge_buffer_alloc(hdev);
7779         if (ret)
7780                 dev_err(&hdev->pdev->dev,
7781                         "Allocate buffer fail, ret =%d\n", ret);
7782
7783 out:
7784         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7785         mutex_unlock(&hdev->vport_lock);
7786         return ret;
7787 }
7788
7789 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7790                                     bool enable)
7791 {
7792         struct hclge_reset_tqp_queue_cmd *req;
7793         struct hclge_desc desc;
7794         int ret;
7795
7796         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7797
7798         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7799         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7800         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7801
7802         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7803         if (ret) {
7804                 dev_err(&hdev->pdev->dev,
7805                         "Send tqp reset cmd error, status =%d\n", ret);
7806                 return ret;
7807         }
7808
7809         return 0;
7810 }
7811
7812 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7813 {
7814         struct hclge_reset_tqp_queue_cmd *req;
7815         struct hclge_desc desc;
7816         int ret;
7817
7818         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7819
7820         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7821         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7822
7823         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7824         if (ret) {
7825                 dev_err(&hdev->pdev->dev,
7826                         "Get reset status error, status =%d\n", ret);
7827                 return ret;
7828         }
7829
7830         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7831 }
7832
7833 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7834 {
7835         struct hnae3_queue *queue;
7836         struct hclge_tqp *tqp;
7837
7838         queue = handle->kinfo.tqp[queue_id];
7839         tqp = container_of(queue, struct hclge_tqp, q);
7840
7841         return tqp->index;
7842 }
7843
7844 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7845 {
7846         struct hclge_vport *vport = hclge_get_vport(handle);
7847         struct hclge_dev *hdev = vport->back;
7848         int reset_try_times = 0;
7849         int reset_status;
7850         u16 queue_gid;
7851         int ret = 0;
7852
7853         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7854
7855         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7856         if (ret) {
7857                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7858                 return ret;
7859         }
7860
7861         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7862         if (ret) {
7863                 dev_err(&hdev->pdev->dev,
7864                         "Send reset tqp cmd fail, ret = %d\n", ret);
7865                 return ret;
7866         }
7867
7868         reset_try_times = 0;
7869         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7870                 /* Wait for tqp hw reset */
7871                 msleep(20);
7872                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7873                 if (reset_status)
7874                         break;
7875         }
7876
7877         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7878                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7879                 return ret;
7880         }
7881
7882         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7883         if (ret)
7884                 dev_err(&hdev->pdev->dev,
7885                         "Deassert the soft reset fail, ret = %d\n", ret);
7886
7887         return ret;
7888 }
7889
7890 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7891 {
7892         struct hclge_dev *hdev = vport->back;
7893         int reset_try_times = 0;
7894         int reset_status;
7895         u16 queue_gid;
7896         int ret;
7897
7898         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7899
7900         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7901         if (ret) {
7902                 dev_warn(&hdev->pdev->dev,
7903                          "Send reset tqp cmd fail, ret = %d\n", ret);
7904                 return;
7905         }
7906
7907         reset_try_times = 0;
7908         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7909                 /* Wait for tqp hw reset */
7910                 msleep(20);
7911                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7912                 if (reset_status)
7913                         break;
7914         }
7915
7916         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7917                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7918                 return;
7919         }
7920
7921         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7922         if (ret)
7923                 dev_warn(&hdev->pdev->dev,
7924                          "Deassert the soft reset fail, ret = %d\n", ret);
7925 }
7926
7927 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7928 {
7929         struct hclge_vport *vport = hclge_get_vport(handle);
7930         struct hclge_dev *hdev = vport->back;
7931
7932         return hdev->fw_version;
7933 }
7934
7935 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7936 {
7937         struct phy_device *phydev = hdev->hw.mac.phydev;
7938
7939         if (!phydev)
7940                 return;
7941
7942         phy_set_asym_pause(phydev, rx_en, tx_en);
7943 }
7944
7945 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7946 {
7947         int ret;
7948
7949         if (rx_en && tx_en)
7950                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7951         else if (rx_en && !tx_en)
7952                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7953         else if (!rx_en && tx_en)
7954                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7955         else
7956                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7957
7958         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7959                 return 0;
7960
7961         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7962         if (ret) {
7963                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7964                         ret);
7965                 return ret;
7966         }
7967
7968         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7969
7970         return 0;
7971 }
7972
7973 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7974 {
7975         struct phy_device *phydev = hdev->hw.mac.phydev;
7976         u16 remote_advertising = 0;
7977         u16 local_advertising = 0;
7978         u32 rx_pause, tx_pause;
7979         u8 flowctl;
7980
7981         if (!phydev->link || !phydev->autoneg)
7982                 return 0;
7983
7984         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7985
7986         if (phydev->pause)
7987                 remote_advertising = LPA_PAUSE_CAP;
7988
7989         if (phydev->asym_pause)
7990                 remote_advertising |= LPA_PAUSE_ASYM;
7991
7992         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7993                                            remote_advertising);
7994         tx_pause = flowctl & FLOW_CTRL_TX;
7995         rx_pause = flowctl & FLOW_CTRL_RX;
7996
7997         if (phydev->duplex == HCLGE_MAC_HALF) {
7998                 tx_pause = 0;
7999                 rx_pause = 0;
8000         }
8001
8002         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8003 }
8004
8005 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8006                                  u32 *rx_en, u32 *tx_en)
8007 {
8008         struct hclge_vport *vport = hclge_get_vport(handle);
8009         struct hclge_dev *hdev = vport->back;
8010
8011         *auto_neg = hclge_get_autoneg(handle);
8012
8013         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8014                 *rx_en = 0;
8015                 *tx_en = 0;
8016                 return;
8017         }
8018
8019         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8020                 *rx_en = 1;
8021                 *tx_en = 0;
8022         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8023                 *tx_en = 1;
8024                 *rx_en = 0;
8025         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8026                 *rx_en = 1;
8027                 *tx_en = 1;
8028         } else {
8029                 *rx_en = 0;
8030                 *tx_en = 0;
8031         }
8032 }
8033
8034 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8035                                 u32 rx_en, u32 tx_en)
8036 {
8037         struct hclge_vport *vport = hclge_get_vport(handle);
8038         struct hclge_dev *hdev = vport->back;
8039         struct phy_device *phydev = hdev->hw.mac.phydev;
8040         u32 fc_autoneg;
8041
8042         fc_autoneg = hclge_get_autoneg(handle);
8043         if (auto_neg != fc_autoneg) {
8044                 dev_info(&hdev->pdev->dev,
8045                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8046                 return -EOPNOTSUPP;
8047         }
8048
8049         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8050                 dev_info(&hdev->pdev->dev,
8051                          "Priority flow control enabled. Cannot set link flow control.\n");
8052                 return -EOPNOTSUPP;
8053         }
8054
8055         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8056
8057         if (!fc_autoneg)
8058                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8059
8060         if (phydev)
8061                 return phy_start_aneg(phydev);
8062
8063         if (hdev->pdev->revision == 0x20)
8064                 return -EOPNOTSUPP;
8065
8066         return hclge_restart_autoneg(handle);
8067 }
8068
8069 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8070                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8071 {
8072         struct hclge_vport *vport = hclge_get_vport(handle);
8073         struct hclge_dev *hdev = vport->back;
8074
8075         if (speed)
8076                 *speed = hdev->hw.mac.speed;
8077         if (duplex)
8078                 *duplex = hdev->hw.mac.duplex;
8079         if (auto_neg)
8080                 *auto_neg = hdev->hw.mac.autoneg;
8081 }
8082
8083 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8084                                  u8 *module_type)
8085 {
8086         struct hclge_vport *vport = hclge_get_vport(handle);
8087         struct hclge_dev *hdev = vport->back;
8088
8089         if (media_type)
8090                 *media_type = hdev->hw.mac.media_type;
8091
8092         if (module_type)
8093                 *module_type = hdev->hw.mac.module_type;
8094 }
8095
8096 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8097                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8098 {
8099         struct hclge_vport *vport = hclge_get_vport(handle);
8100         struct hclge_dev *hdev = vport->back;
8101         struct phy_device *phydev = hdev->hw.mac.phydev;
8102         int mdix_ctrl, mdix, retval, is_resolved;
8103
8104         if (!phydev) {
8105                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8106                 *tp_mdix = ETH_TP_MDI_INVALID;
8107                 return;
8108         }
8109
8110         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8111
8112         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8113         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8114                                     HCLGE_PHY_MDIX_CTRL_S);
8115
8116         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8117         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8118         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8119
8120         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8121
8122         switch (mdix_ctrl) {
8123         case 0x0:
8124                 *tp_mdix_ctrl = ETH_TP_MDI;
8125                 break;
8126         case 0x1:
8127                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8128                 break;
8129         case 0x3:
8130                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8131                 break;
8132         default:
8133                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8134                 break;
8135         }
8136
8137         if (!is_resolved)
8138                 *tp_mdix = ETH_TP_MDI_INVALID;
8139         else if (mdix)
8140                 *tp_mdix = ETH_TP_MDI_X;
8141         else
8142                 *tp_mdix = ETH_TP_MDI;
8143 }
8144
8145 static void hclge_info_show(struct hclge_dev *hdev)
8146 {
8147         struct device *dev = &hdev->pdev->dev;
8148
8149         dev_info(dev, "PF info begin:\n");
8150
8151         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8152         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8153         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8154         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8155         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8156         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8157         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8158         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8159         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8160         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8161         dev_info(dev, "This is %s PF\n",
8162                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8163         dev_info(dev, "DCB %s\n",
8164                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8165         dev_info(dev, "MQPRIO %s\n",
8166                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8167
8168         dev_info(dev, "PF info end.\n");
8169 }
8170
8171 static int hclge_init_client_instance(struct hnae3_client *client,
8172                                       struct hnae3_ae_dev *ae_dev)
8173 {
8174         struct hclge_dev *hdev = ae_dev->priv;
8175         struct hclge_vport *vport;
8176         int i, ret;
8177
8178         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8179                 vport = &hdev->vport[i];
8180
8181                 switch (client->type) {
8182                 case HNAE3_CLIENT_KNIC:
8183
8184                         hdev->nic_client = client;
8185                         vport->nic.client = client;
8186                         ret = client->ops->init_instance(&vport->nic);
8187                         if (ret)
8188                                 goto clear_nic;
8189
8190                         hnae3_set_client_init_flag(client, ae_dev, 1);
8191                         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8192
8193                         if (netif_msg_drv(&hdev->vport->nic))
8194                                 hclge_info_show(hdev);
8195
8196                         if (hdev->roce_client &&
8197                             hnae3_dev_roce_supported(hdev)) {
8198                                 struct hnae3_client *rc = hdev->roce_client;
8199
8200                                 ret = hclge_init_roce_base_info(vport);
8201                                 if (ret)
8202                                         goto clear_roce;
8203
8204                                 ret = rc->ops->init_instance(&vport->roce);
8205                                 if (ret)
8206                                         goto clear_roce;
8207
8208                                 hnae3_set_client_init_flag(hdev->roce_client,
8209                                                            ae_dev, 1);
8210                         }
8211
8212                         break;
8213                 case HNAE3_CLIENT_UNIC:
8214                         hdev->nic_client = client;
8215                         vport->nic.client = client;
8216
8217                         ret = client->ops->init_instance(&vport->nic);
8218                         if (ret)
8219                                 goto clear_nic;
8220
8221                         hnae3_set_client_init_flag(client, ae_dev, 1);
8222
8223                         break;
8224                 case HNAE3_CLIENT_ROCE:
8225                         if (hnae3_dev_roce_supported(hdev)) {
8226                                 hdev->roce_client = client;
8227                                 vport->roce.client = client;
8228                         }
8229
8230                         if (hdev->roce_client && hdev->nic_client) {
8231                                 ret = hclge_init_roce_base_info(vport);
8232                                 if (ret)
8233                                         goto clear_roce;
8234
8235                                 ret = client->ops->init_instance(&vport->roce);
8236                                 if (ret)
8237                                         goto clear_roce;
8238
8239                                 hnae3_set_client_init_flag(client, ae_dev, 1);
8240                         }
8241
8242                         break;
8243                 default:
8244                         return -EINVAL;
8245                 }
8246         }
8247
8248         return 0;
8249
8250 clear_nic:
8251         hdev->nic_client = NULL;
8252         vport->nic.client = NULL;
8253         return ret;
8254 clear_roce:
8255         hdev->roce_client = NULL;
8256         vport->roce.client = NULL;
8257         return ret;
8258 }
8259
8260 static void hclge_uninit_client_instance(struct hnae3_client *client,
8261                                          struct hnae3_ae_dev *ae_dev)
8262 {
8263         struct hclge_dev *hdev = ae_dev->priv;
8264         struct hclge_vport *vport;
8265         int i;
8266
8267         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8268                 vport = &hdev->vport[i];
8269                 if (hdev->roce_client) {
8270                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8271                                                                 0);
8272                         hdev->roce_client = NULL;
8273                         vport->roce.client = NULL;
8274                 }
8275                 if (client->type == HNAE3_CLIENT_ROCE)
8276                         return;
8277                 if (hdev->nic_client && client->ops->uninit_instance) {
8278                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8279                         client->ops->uninit_instance(&vport->nic, 0);
8280                         hdev->nic_client = NULL;
8281                         vport->nic.client = NULL;
8282                 }
8283         }
8284 }
8285
8286 static int hclge_pci_init(struct hclge_dev *hdev)
8287 {
8288         struct pci_dev *pdev = hdev->pdev;
8289         struct hclge_hw *hw;
8290         int ret;
8291
8292         ret = pci_enable_device(pdev);
8293         if (ret) {
8294                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8295                 return ret;
8296         }
8297
8298         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8299         if (ret) {
8300                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8301                 if (ret) {
8302                         dev_err(&pdev->dev,
8303                                 "can't set consistent PCI DMA");
8304                         goto err_disable_device;
8305                 }
8306                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8307         }
8308
8309         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8310         if (ret) {
8311                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8312                 goto err_disable_device;
8313         }
8314
8315         pci_set_master(pdev);
8316         hw = &hdev->hw;
8317         hw->io_base = pcim_iomap(pdev, 2, 0);
8318         if (!hw->io_base) {
8319                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8320                 ret = -ENOMEM;
8321                 goto err_clr_master;
8322         }
8323
8324         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8325
8326         return 0;
8327 err_clr_master:
8328         pci_clear_master(pdev);
8329         pci_release_regions(pdev);
8330 err_disable_device:
8331         pci_disable_device(pdev);
8332
8333         return ret;
8334 }
8335
8336 static void hclge_pci_uninit(struct hclge_dev *hdev)
8337 {
8338         struct pci_dev *pdev = hdev->pdev;
8339
8340         pcim_iounmap(pdev, hdev->hw.io_base);
8341         pci_free_irq_vectors(pdev);
8342         pci_clear_master(pdev);
8343         pci_release_mem_regions(pdev);
8344         pci_disable_device(pdev);
8345 }
8346
8347 static void hclge_state_init(struct hclge_dev *hdev)
8348 {
8349         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8350         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8351         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8352         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8353         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8354         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8355 }
8356
8357 static void hclge_state_uninit(struct hclge_dev *hdev)
8358 {
8359         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8360
8361         if (hdev->service_timer.function)
8362                 del_timer_sync(&hdev->service_timer);
8363         if (hdev->reset_timer.function)
8364                 del_timer_sync(&hdev->reset_timer);
8365         if (hdev->service_task.func)
8366                 cancel_work_sync(&hdev->service_task);
8367         if (hdev->rst_service_task.func)
8368                 cancel_work_sync(&hdev->rst_service_task);
8369         if (hdev->mbx_service_task.func)
8370                 cancel_work_sync(&hdev->mbx_service_task);
8371 }
8372
8373 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8374 {
8375 #define HCLGE_FLR_WAIT_MS       100
8376 #define HCLGE_FLR_WAIT_CNT      50
8377         struct hclge_dev *hdev = ae_dev->priv;
8378         int cnt = 0;
8379
8380         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8381         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8382         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8383         hclge_reset_event(hdev->pdev, NULL);
8384
8385         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8386                cnt++ < HCLGE_FLR_WAIT_CNT)
8387                 msleep(HCLGE_FLR_WAIT_MS);
8388
8389         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8390                 dev_err(&hdev->pdev->dev,
8391                         "flr wait down timeout: %d\n", cnt);
8392 }
8393
8394 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8395 {
8396         struct hclge_dev *hdev = ae_dev->priv;
8397
8398         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8399 }
8400
8401 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8402 {
8403         struct pci_dev *pdev = ae_dev->pdev;
8404         struct hclge_dev *hdev;
8405         int ret;
8406
8407         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8408         if (!hdev) {
8409                 ret = -ENOMEM;
8410                 goto out;
8411         }
8412
8413         hdev->pdev = pdev;
8414         hdev->ae_dev = ae_dev;
8415         hdev->reset_type = HNAE3_NONE_RESET;
8416         hdev->reset_level = HNAE3_FUNC_RESET;
8417         ae_dev->priv = hdev;
8418         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8419
8420         mutex_init(&hdev->vport_lock);
8421         mutex_init(&hdev->vport_cfg_mutex);
8422         spin_lock_init(&hdev->fd_rule_lock);
8423
8424         ret = hclge_pci_init(hdev);
8425         if (ret) {
8426                 dev_err(&pdev->dev, "PCI init failed\n");
8427                 goto out;
8428         }
8429
8430         /* Firmware command queue initialize */
8431         ret = hclge_cmd_queue_init(hdev);
8432         if (ret) {
8433                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8434                 goto err_pci_uninit;
8435         }
8436
8437         /* Firmware command initialize */
8438         ret = hclge_cmd_init(hdev);
8439         if (ret)
8440                 goto err_cmd_uninit;
8441
8442         ret = hclge_get_cap(hdev);
8443         if (ret) {
8444                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8445                         ret);
8446                 goto err_cmd_uninit;
8447         }
8448
8449         ret = hclge_configure(hdev);
8450         if (ret) {
8451                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8452                 goto err_cmd_uninit;
8453         }
8454
8455         ret = hclge_init_msi(hdev);
8456         if (ret) {
8457                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8458                 goto err_cmd_uninit;
8459         }
8460
8461         ret = hclge_misc_irq_init(hdev);
8462         if (ret) {
8463                 dev_err(&pdev->dev,
8464                         "Misc IRQ(vector0) init error, ret = %d.\n",
8465                         ret);
8466                 goto err_msi_uninit;
8467         }
8468
8469         ret = hclge_alloc_tqps(hdev);
8470         if (ret) {
8471                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8472                 goto err_msi_irq_uninit;
8473         }
8474
8475         ret = hclge_alloc_vport(hdev);
8476         if (ret) {
8477                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8478                 goto err_msi_irq_uninit;
8479         }
8480
8481         ret = hclge_map_tqp(hdev);
8482         if (ret) {
8483                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8484                 goto err_msi_irq_uninit;
8485         }
8486
8487         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8488                 ret = hclge_mac_mdio_config(hdev);
8489                 if (ret) {
8490                         dev_err(&hdev->pdev->dev,
8491                                 "mdio config fail ret=%d\n", ret);
8492                         goto err_msi_irq_uninit;
8493                 }
8494         }
8495
8496         ret = hclge_init_umv_space(hdev);
8497         if (ret) {
8498                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8499                 goto err_mdiobus_unreg;
8500         }
8501
8502         ret = hclge_mac_init(hdev);
8503         if (ret) {
8504                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8505                 goto err_mdiobus_unreg;
8506         }
8507
8508         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8509         if (ret) {
8510                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8511                 goto err_mdiobus_unreg;
8512         }
8513
8514         ret = hclge_config_gro(hdev, true);
8515         if (ret)
8516                 goto err_mdiobus_unreg;
8517
8518         ret = hclge_init_vlan_config(hdev);
8519         if (ret) {
8520                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8521                 goto err_mdiobus_unreg;
8522         }
8523
8524         ret = hclge_tm_schd_init(hdev);
8525         if (ret) {
8526                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8527                 goto err_mdiobus_unreg;
8528         }
8529
8530         hclge_rss_init_cfg(hdev);
8531         ret = hclge_rss_init_hw(hdev);
8532         if (ret) {
8533                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8534                 goto err_mdiobus_unreg;
8535         }
8536
8537         ret = init_mgr_tbl(hdev);
8538         if (ret) {
8539                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8540                 goto err_mdiobus_unreg;
8541         }
8542
8543         ret = hclge_init_fd_config(hdev);
8544         if (ret) {
8545                 dev_err(&pdev->dev,
8546                         "fd table init fail, ret=%d\n", ret);
8547                 goto err_mdiobus_unreg;
8548         }
8549
8550         ret = hclge_hw_error_set_state(hdev, true);
8551         if (ret) {
8552                 dev_err(&pdev->dev,
8553                         "fail(%d) to enable hw error interrupts\n", ret);
8554                 goto err_mdiobus_unreg;
8555         }
8556
8557         INIT_KFIFO(hdev->mac_tnl_log);
8558
8559         hclge_dcb_ops_set(hdev);
8560
8561         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8562         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8563         INIT_WORK(&hdev->service_task, hclge_service_task);
8564         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8565         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8566
8567         hclge_clear_all_event_cause(hdev);
8568
8569         /* Enable MISC vector(vector0) */
8570         hclge_enable_vector(&hdev->misc_vector, true);
8571
8572         hclge_state_init(hdev);
8573         hdev->last_reset_time = jiffies;
8574
8575         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8576         return 0;
8577
8578 err_mdiobus_unreg:
8579         if (hdev->hw.mac.phydev)
8580                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8581 err_msi_irq_uninit:
8582         hclge_misc_irq_uninit(hdev);
8583 err_msi_uninit:
8584         pci_free_irq_vectors(pdev);
8585 err_cmd_uninit:
8586         hclge_cmd_uninit(hdev);
8587 err_pci_uninit:
8588         pcim_iounmap(pdev, hdev->hw.io_base);
8589         pci_clear_master(pdev);
8590         pci_release_regions(pdev);
8591         pci_disable_device(pdev);
8592 out:
8593         return ret;
8594 }
8595
8596 static void hclge_stats_clear(struct hclge_dev *hdev)
8597 {
8598         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8599 }
8600
8601 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8602 {
8603         struct hclge_vport *vport = hdev->vport;
8604         int i;
8605
8606         for (i = 0; i < hdev->num_alloc_vport; i++) {
8607                 hclge_vport_stop(vport);
8608                 vport++;
8609         }
8610 }
8611
8612 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8613 {
8614         struct hclge_dev *hdev = ae_dev->priv;
8615         struct pci_dev *pdev = ae_dev->pdev;
8616         int ret;
8617
8618         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8619
8620         hclge_stats_clear(hdev);
8621         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8622
8623         ret = hclge_cmd_init(hdev);
8624         if (ret) {
8625                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8626                 return ret;
8627         }
8628
8629         ret = hclge_map_tqp(hdev);
8630         if (ret) {
8631                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8632                 return ret;
8633         }
8634
8635         hclge_reset_umv_space(hdev);
8636
8637         ret = hclge_mac_init(hdev);
8638         if (ret) {
8639                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8640                 return ret;
8641         }
8642
8643         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8644         if (ret) {
8645                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8646                 return ret;
8647         }
8648
8649         ret = hclge_config_gro(hdev, true);
8650         if (ret)
8651                 return ret;
8652
8653         ret = hclge_init_vlan_config(hdev);
8654         if (ret) {
8655                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8656                 return ret;
8657         }
8658
8659         ret = hclge_tm_init_hw(hdev, true);
8660         if (ret) {
8661                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8662                 return ret;
8663         }
8664
8665         ret = hclge_rss_init_hw(hdev);
8666         if (ret) {
8667                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8668                 return ret;
8669         }
8670
8671         ret = hclge_init_fd_config(hdev);
8672         if (ret) {
8673                 dev_err(&pdev->dev,
8674                         "fd table init fail, ret=%d\n", ret);
8675                 return ret;
8676         }
8677
8678         /* Re-enable the hw error interrupts because
8679          * the interrupts get disabled on core/global reset.
8680          */
8681         ret = hclge_hw_error_set_state(hdev, true);
8682         if (ret) {
8683                 dev_err(&pdev->dev,
8684                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8685                 return ret;
8686         }
8687
8688         hclge_reset_vport_state(hdev);
8689
8690         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8691                  HCLGE_DRIVER_NAME);
8692
8693         return 0;
8694 }
8695
8696 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8697 {
8698         struct hclge_dev *hdev = ae_dev->priv;
8699         struct hclge_mac *mac = &hdev->hw.mac;
8700
8701         hclge_state_uninit(hdev);
8702
8703         if (mac->phydev)
8704                 mdiobus_unregister(mac->mdio_bus);
8705
8706         hclge_uninit_umv_space(hdev);
8707
8708         /* Disable MISC vector(vector0) */
8709         hclge_enable_vector(&hdev->misc_vector, false);
8710         synchronize_irq(hdev->misc_vector.vector_irq);
8711
8712         hclge_config_mac_tnl_int(hdev, false);
8713         hclge_hw_error_set_state(hdev, false);
8714         hclge_cmd_uninit(hdev);
8715         hclge_misc_irq_uninit(hdev);
8716         hclge_pci_uninit(hdev);
8717         mutex_destroy(&hdev->vport_lock);
8718         hclge_uninit_vport_mac_table(hdev);
8719         hclge_uninit_vport_vlan_table(hdev);
8720         mutex_destroy(&hdev->vport_cfg_mutex);
8721         ae_dev->priv = NULL;
8722 }
8723
8724 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8725 {
8726         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8727         struct hclge_vport *vport = hclge_get_vport(handle);
8728         struct hclge_dev *hdev = vport->back;
8729
8730         return min_t(u32, hdev->rss_size_max,
8731                      vport->alloc_tqps / kinfo->num_tc);
8732 }
8733
8734 static void hclge_get_channels(struct hnae3_handle *handle,
8735                                struct ethtool_channels *ch)
8736 {
8737         ch->max_combined = hclge_get_max_channels(handle);
8738         ch->other_count = 1;
8739         ch->max_other = 1;
8740         ch->combined_count = handle->kinfo.rss_size;
8741 }
8742
8743 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8744                                         u16 *alloc_tqps, u16 *max_rss_size)
8745 {
8746         struct hclge_vport *vport = hclge_get_vport(handle);
8747         struct hclge_dev *hdev = vport->back;
8748
8749         *alloc_tqps = vport->alloc_tqps;
8750         *max_rss_size = hdev->rss_size_max;
8751 }
8752
8753 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8754                               bool rxfh_configured)
8755 {
8756         struct hclge_vport *vport = hclge_get_vport(handle);
8757         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8758         struct hclge_dev *hdev = vport->back;
8759         int cur_rss_size = kinfo->rss_size;
8760         int cur_tqps = kinfo->num_tqps;
8761         u16 tc_offset[HCLGE_MAX_TC_NUM];
8762         u16 tc_valid[HCLGE_MAX_TC_NUM];
8763         u16 tc_size[HCLGE_MAX_TC_NUM];
8764         u16 roundup_size;
8765         u32 *rss_indir;
8766         int ret, i;
8767
8768         kinfo->req_rss_size = new_tqps_num;
8769
8770         ret = hclge_tm_vport_map_update(hdev);
8771         if (ret) {
8772                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8773                 return ret;
8774         }
8775
8776         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8777         roundup_size = ilog2(roundup_size);
8778         /* Set the RSS TC mode according to the new RSS size */
8779         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8780                 tc_valid[i] = 0;
8781
8782                 if (!(hdev->hw_tc_map & BIT(i)))
8783                         continue;
8784
8785                 tc_valid[i] = 1;
8786                 tc_size[i] = roundup_size;
8787                 tc_offset[i] = kinfo->rss_size * i;
8788         }
8789         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8790         if (ret)
8791                 return ret;
8792
8793         /* RSS indirection table has been configuared by user */
8794         if (rxfh_configured)
8795                 goto out;
8796
8797         /* Reinitializes the rss indirect table according to the new RSS size */
8798         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8799         if (!rss_indir)
8800                 return -ENOMEM;
8801
8802         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8803                 rss_indir[i] = i % kinfo->rss_size;
8804
8805         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8806         if (ret)
8807                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8808                         ret);
8809
8810         kfree(rss_indir);
8811
8812 out:
8813         if (!ret)
8814                 dev_info(&hdev->pdev->dev,
8815                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8816                          cur_rss_size, kinfo->rss_size,
8817                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8818
8819         return ret;
8820 }
8821
8822 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8823                               u32 *regs_num_64_bit)
8824 {
8825         struct hclge_desc desc;
8826         u32 total_num;
8827         int ret;
8828
8829         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8830         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8831         if (ret) {
8832                 dev_err(&hdev->pdev->dev,
8833                         "Query register number cmd failed, ret = %d.\n", ret);
8834                 return ret;
8835         }
8836
8837         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8838         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8839
8840         total_num = *regs_num_32_bit + *regs_num_64_bit;
8841         if (!total_num)
8842                 return -EINVAL;
8843
8844         return 0;
8845 }
8846
8847 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8848                                  void *data)
8849 {
8850 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8851
8852         struct hclge_desc *desc;
8853         u32 *reg_val = data;
8854         __le32 *desc_data;
8855         int cmd_num;
8856         int i, k, n;
8857         int ret;
8858
8859         if (regs_num == 0)
8860                 return 0;
8861
8862         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8863         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8864         if (!desc)
8865                 return -ENOMEM;
8866
8867         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8868         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8869         if (ret) {
8870                 dev_err(&hdev->pdev->dev,
8871                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8872                 kfree(desc);
8873                 return ret;
8874         }
8875
8876         for (i = 0; i < cmd_num; i++) {
8877                 if (i == 0) {
8878                         desc_data = (__le32 *)(&desc[i].data[0]);
8879                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8880                 } else {
8881                         desc_data = (__le32 *)(&desc[i]);
8882                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8883                 }
8884                 for (k = 0; k < n; k++) {
8885                         *reg_val++ = le32_to_cpu(*desc_data++);
8886
8887                         regs_num--;
8888                         if (!regs_num)
8889                                 break;
8890                 }
8891         }
8892
8893         kfree(desc);
8894         return 0;
8895 }
8896
8897 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8898                                  void *data)
8899 {
8900 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8901
8902         struct hclge_desc *desc;
8903         u64 *reg_val = data;
8904         __le64 *desc_data;
8905         int cmd_num;
8906         int i, k, n;
8907         int ret;
8908
8909         if (regs_num == 0)
8910                 return 0;
8911
8912         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8913         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8914         if (!desc)
8915                 return -ENOMEM;
8916
8917         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8918         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8919         if (ret) {
8920                 dev_err(&hdev->pdev->dev,
8921                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8922                 kfree(desc);
8923                 return ret;
8924         }
8925
8926         for (i = 0; i < cmd_num; i++) {
8927                 if (i == 0) {
8928                         desc_data = (__le64 *)(&desc[i].data[0]);
8929                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8930                 } else {
8931                         desc_data = (__le64 *)(&desc[i]);
8932                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8933                 }
8934                 for (k = 0; k < n; k++) {
8935                         *reg_val++ = le64_to_cpu(*desc_data++);
8936
8937                         regs_num--;
8938                         if (!regs_num)
8939                                 break;
8940                 }
8941         }
8942
8943         kfree(desc);
8944         return 0;
8945 }
8946
8947 #define MAX_SEPARATE_NUM        4
8948 #define SEPARATOR_VALUE         0xFFFFFFFF
8949 #define REG_NUM_PER_LINE        4
8950 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8951
8952 static int hclge_get_regs_len(struct hnae3_handle *handle)
8953 {
8954         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8955         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8956         struct hclge_vport *vport = hclge_get_vport(handle);
8957         struct hclge_dev *hdev = vport->back;
8958         u32 regs_num_32_bit, regs_num_64_bit;
8959         int ret;
8960
8961         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8962         if (ret) {
8963                 dev_err(&hdev->pdev->dev,
8964                         "Get register number failed, ret = %d.\n", ret);
8965                 return -EOPNOTSUPP;
8966         }
8967
8968         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8969         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8970         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8971         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8972
8973         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8974                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8975                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8976 }
8977
8978 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8979                            void *data)
8980 {
8981         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8982         struct hclge_vport *vport = hclge_get_vport(handle);
8983         struct hclge_dev *hdev = vport->back;
8984         u32 regs_num_32_bit, regs_num_64_bit;
8985         int i, j, reg_um, separator_num;
8986         u32 *reg = data;
8987         int ret;
8988
8989         *version = hdev->fw_version;
8990
8991         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8992         if (ret) {
8993                 dev_err(&hdev->pdev->dev,
8994                         "Get register number failed, ret = %d.\n", ret);
8995                 return;
8996         }
8997
8998         /* fetching per-PF registers valus from PF PCIe register space */
8999         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9000         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9001         for (i = 0; i < reg_um; i++)
9002                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9003         for (i = 0; i < separator_num; i++)
9004                 *reg++ = SEPARATOR_VALUE;
9005
9006         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9007         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9008         for (i = 0; i < reg_um; i++)
9009                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9010         for (i = 0; i < separator_num; i++)
9011                 *reg++ = SEPARATOR_VALUE;
9012
9013         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9014         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9015         for (j = 0; j < kinfo->num_tqps; j++) {
9016                 for (i = 0; i < reg_um; i++)
9017                         *reg++ = hclge_read_dev(&hdev->hw,
9018                                                 ring_reg_addr_list[i] +
9019                                                 0x200 * j);
9020                 for (i = 0; i < separator_num; i++)
9021                         *reg++ = SEPARATOR_VALUE;
9022         }
9023
9024         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9025         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9026         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9027                 for (i = 0; i < reg_um; i++)
9028                         *reg++ = hclge_read_dev(&hdev->hw,
9029                                                 tqp_intr_reg_addr_list[i] +
9030                                                 4 * j);
9031                 for (i = 0; i < separator_num; i++)
9032                         *reg++ = SEPARATOR_VALUE;
9033         }
9034
9035         /* fetching PF common registers values from firmware */
9036         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9037         if (ret) {
9038                 dev_err(&hdev->pdev->dev,
9039                         "Get 32 bit register failed, ret = %d.\n", ret);
9040                 return;
9041         }
9042
9043         reg += regs_num_32_bit;
9044         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9045         if (ret)
9046                 dev_err(&hdev->pdev->dev,
9047                         "Get 64 bit register failed, ret = %d.\n", ret);
9048 }
9049
9050 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9051 {
9052         struct hclge_set_led_state_cmd *req;
9053         struct hclge_desc desc;
9054         int ret;
9055
9056         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9057
9058         req = (struct hclge_set_led_state_cmd *)desc.data;
9059         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9060                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9061
9062         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9063         if (ret)
9064                 dev_err(&hdev->pdev->dev,
9065                         "Send set led state cmd error, ret =%d\n", ret);
9066
9067         return ret;
9068 }
9069
9070 enum hclge_led_status {
9071         HCLGE_LED_OFF,
9072         HCLGE_LED_ON,
9073         HCLGE_LED_NO_CHANGE = 0xFF,
9074 };
9075
9076 static int hclge_set_led_id(struct hnae3_handle *handle,
9077                             enum ethtool_phys_id_state status)
9078 {
9079         struct hclge_vport *vport = hclge_get_vport(handle);
9080         struct hclge_dev *hdev = vport->back;
9081
9082         switch (status) {
9083         case ETHTOOL_ID_ACTIVE:
9084                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9085         case ETHTOOL_ID_INACTIVE:
9086                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9087         default:
9088                 return -EINVAL;
9089         }
9090 }
9091
9092 static void hclge_get_link_mode(struct hnae3_handle *handle,
9093                                 unsigned long *supported,
9094                                 unsigned long *advertising)
9095 {
9096         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9097         struct hclge_vport *vport = hclge_get_vport(handle);
9098         struct hclge_dev *hdev = vport->back;
9099         unsigned int idx = 0;
9100
9101         for (; idx < size; idx++) {
9102                 supported[idx] = hdev->hw.mac.supported[idx];
9103                 advertising[idx] = hdev->hw.mac.advertising[idx];
9104         }
9105 }
9106
9107 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9108 {
9109         struct hclge_vport *vport = hclge_get_vport(handle);
9110         struct hclge_dev *hdev = vport->back;
9111
9112         return hclge_config_gro(hdev, enable);
9113 }
9114
9115 static const struct hnae3_ae_ops hclge_ops = {
9116         .init_ae_dev = hclge_init_ae_dev,
9117         .uninit_ae_dev = hclge_uninit_ae_dev,
9118         .flr_prepare = hclge_flr_prepare,
9119         .flr_done = hclge_flr_done,
9120         .init_client_instance = hclge_init_client_instance,
9121         .uninit_client_instance = hclge_uninit_client_instance,
9122         .map_ring_to_vector = hclge_map_ring_to_vector,
9123         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9124         .get_vector = hclge_get_vector,
9125         .put_vector = hclge_put_vector,
9126         .set_promisc_mode = hclge_set_promisc_mode,
9127         .set_loopback = hclge_set_loopback,
9128         .start = hclge_ae_start,
9129         .stop = hclge_ae_stop,
9130         .client_start = hclge_client_start,
9131         .client_stop = hclge_client_stop,
9132         .get_status = hclge_get_status,
9133         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9134         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9135         .get_media_type = hclge_get_media_type,
9136         .check_port_speed = hclge_check_port_speed,
9137         .get_fec = hclge_get_fec,
9138         .set_fec = hclge_set_fec,
9139         .get_rss_key_size = hclge_get_rss_key_size,
9140         .get_rss_indir_size = hclge_get_rss_indir_size,
9141         .get_rss = hclge_get_rss,
9142         .set_rss = hclge_set_rss,
9143         .set_rss_tuple = hclge_set_rss_tuple,
9144         .get_rss_tuple = hclge_get_rss_tuple,
9145         .get_tc_size = hclge_get_tc_size,
9146         .get_mac_addr = hclge_get_mac_addr,
9147         .set_mac_addr = hclge_set_mac_addr,
9148         .do_ioctl = hclge_do_ioctl,
9149         .add_uc_addr = hclge_add_uc_addr,
9150         .rm_uc_addr = hclge_rm_uc_addr,
9151         .add_mc_addr = hclge_add_mc_addr,
9152         .rm_mc_addr = hclge_rm_mc_addr,
9153         .set_autoneg = hclge_set_autoneg,
9154         .get_autoneg = hclge_get_autoneg,
9155         .restart_autoneg = hclge_restart_autoneg,
9156         .get_pauseparam = hclge_get_pauseparam,
9157         .set_pauseparam = hclge_set_pauseparam,
9158         .set_mtu = hclge_set_mtu,
9159         .reset_queue = hclge_reset_tqp,
9160         .get_stats = hclge_get_stats,
9161         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9162         .update_stats = hclge_update_stats,
9163         .get_strings = hclge_get_strings,
9164         .get_sset_count = hclge_get_sset_count,
9165         .get_fw_version = hclge_get_fw_version,
9166         .get_mdix_mode = hclge_get_mdix_mode,
9167         .enable_vlan_filter = hclge_enable_vlan_filter,
9168         .set_vlan_filter = hclge_set_vlan_filter,
9169         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9170         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9171         .reset_event = hclge_reset_event,
9172         .set_default_reset_request = hclge_set_def_reset_request,
9173         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9174         .set_channels = hclge_set_channels,
9175         .get_channels = hclge_get_channels,
9176         .get_regs_len = hclge_get_regs_len,
9177         .get_regs = hclge_get_regs,
9178         .set_led_id = hclge_set_led_id,
9179         .get_link_mode = hclge_get_link_mode,
9180         .add_fd_entry = hclge_add_fd_entry,
9181         .del_fd_entry = hclge_del_fd_entry,
9182         .del_all_fd_entries = hclge_del_all_fd_entries,
9183         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9184         .get_fd_rule_info = hclge_get_fd_rule_info,
9185         .get_fd_all_rules = hclge_get_all_rules,
9186         .restore_fd_rules = hclge_restore_fd_entries,
9187         .enable_fd = hclge_enable_fd,
9188         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9189         .dbg_run_cmd = hclge_dbg_run_cmd,
9190         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9191         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9192         .ae_dev_resetting = hclge_ae_dev_resetting,
9193         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9194         .set_gro_en = hclge_gro_en,
9195         .get_global_queue_id = hclge_covert_handle_qid_global,
9196         .set_timer_task = hclge_set_timer_task,
9197         .mac_connect_phy = hclge_mac_connect_phy,
9198         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9199 };
9200
9201 static struct hnae3_ae_algo ae_algo = {
9202         .ops = &hclge_ops,
9203         .pdev_id_table = ae_algo_pci_tbl,
9204 };
9205
9206 static int hclge_init(void)
9207 {
9208         pr_info("%s is initializing\n", HCLGE_NAME);
9209
9210         hnae3_register_ae_algo(&ae_algo);
9211
9212         return 0;
9213 }
9214
9215 static void hclge_exit(void)
9216 {
9217         hnae3_unregister_ae_algo(&ae_algo);
9218 }
9219 module_init(hclge_init);
9220 module_exit(hclge_exit);
9221
9222 MODULE_LICENSE("GPL");
9223 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9224 MODULE_DESCRIPTION("HCLGE Driver");
9225 MODULE_VERSION(HCLGE_MOD_VERSION);