]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
net: hns3: use HCLGE_STATE_ROCE_REGISTERED to indicate PF ROCE client has registered
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
40
41 static struct hnae3_ae_algo ae_algo;
42
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51         /* required last entry */
52         {0, }
53 };
54
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
56
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58                                          HCLGE_CMDQ_TX_ADDR_H_REG,
59                                          HCLGE_CMDQ_TX_DEPTH_REG,
60                                          HCLGE_CMDQ_TX_TAIL_REG,
61                                          HCLGE_CMDQ_TX_HEAD_REG,
62                                          HCLGE_CMDQ_RX_ADDR_L_REG,
63                                          HCLGE_CMDQ_RX_ADDR_H_REG,
64                                          HCLGE_CMDQ_RX_DEPTH_REG,
65                                          HCLGE_CMDQ_RX_TAIL_REG,
66                                          HCLGE_CMDQ_RX_HEAD_REG,
67                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
68                                          HCLGE_CMDQ_INTR_STS_REG,
69                                          HCLGE_CMDQ_INTR_EN_REG,
70                                          HCLGE_CMDQ_INTR_GEN_REG};
71
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73                                            HCLGE_VECTOR0_OTER_EN_REG,
74                                            HCLGE_MISC_RESET_STS_REG,
75                                            HCLGE_MISC_VECTOR_INT_STS,
76                                            HCLGE_GLOBAL_RESET_REG,
77                                            HCLGE_FUN_RST_ING,
78                                            HCLGE_GRO_EN_REG};
79
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81                                          HCLGE_RING_RX_ADDR_H_REG,
82                                          HCLGE_RING_RX_BD_NUM_REG,
83                                          HCLGE_RING_RX_BD_LENGTH_REG,
84                                          HCLGE_RING_RX_MERGE_EN_REG,
85                                          HCLGE_RING_RX_TAIL_REG,
86                                          HCLGE_RING_RX_HEAD_REG,
87                                          HCLGE_RING_RX_FBD_NUM_REG,
88                                          HCLGE_RING_RX_OFFSET_REG,
89                                          HCLGE_RING_RX_FBD_OFFSET_REG,
90                                          HCLGE_RING_RX_STASH_REG,
91                                          HCLGE_RING_RX_BD_ERR_REG,
92                                          HCLGE_RING_TX_ADDR_L_REG,
93                                          HCLGE_RING_TX_ADDR_H_REG,
94                                          HCLGE_RING_TX_BD_NUM_REG,
95                                          HCLGE_RING_TX_PRIORITY_REG,
96                                          HCLGE_RING_TX_TC_REG,
97                                          HCLGE_RING_TX_MERGE_EN_REG,
98                                          HCLGE_RING_TX_TAIL_REG,
99                                          HCLGE_RING_TX_HEAD_REG,
100                                          HCLGE_RING_TX_FBD_NUM_REG,
101                                          HCLGE_RING_TX_OFFSET_REG,
102                                          HCLGE_RING_TX_EBD_NUM_REG,
103                                          HCLGE_RING_TX_EBD_OFFSET_REG,
104                                          HCLGE_RING_TX_BD_ERR_REG,
105                                          HCLGE_RING_EN_REG};
106
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108                                              HCLGE_TQP_INTR_GL0_REG,
109                                              HCLGE_TQP_INTR_GL1_REG,
110                                              HCLGE_TQP_INTR_GL2_REG,
111                                              HCLGE_TQP_INTR_RL_REG};
112
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
114         "App    Loopback test",
115         "Serdes serial Loopback test",
116         "Serdes parallel Loopback test",
117         "Phy    Loopback test"
118 };
119
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121         {"mac_tx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123         {"mac_rx_mac_pause_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125         {"mac_tx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127         {"mac_rx_control_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129         {"mac_tx_pfc_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131         {"mac_tx_pfc_pri0_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133         {"mac_tx_pfc_pri1_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135         {"mac_tx_pfc_pri2_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137         {"mac_tx_pfc_pri3_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139         {"mac_tx_pfc_pri4_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141         {"mac_tx_pfc_pri5_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143         {"mac_tx_pfc_pri6_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145         {"mac_tx_pfc_pri7_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147         {"mac_rx_pfc_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149         {"mac_rx_pfc_pri0_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151         {"mac_rx_pfc_pri1_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153         {"mac_rx_pfc_pri2_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155         {"mac_rx_pfc_pri3_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157         {"mac_rx_pfc_pri4_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159         {"mac_rx_pfc_pri5_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161         {"mac_rx_pfc_pri6_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163         {"mac_rx_pfc_pri7_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165         {"mac_tx_total_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167         {"mac_tx_total_oct_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169         {"mac_tx_good_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171         {"mac_tx_bad_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173         {"mac_tx_good_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175         {"mac_tx_bad_oct_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177         {"mac_tx_uni_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179         {"mac_tx_multi_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181         {"mac_tx_broad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183         {"mac_tx_undersize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185         {"mac_tx_oversize_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187         {"mac_tx_64_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189         {"mac_tx_65_127_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191         {"mac_tx_128_255_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193         {"mac_tx_256_511_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195         {"mac_tx_512_1023_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197         {"mac_tx_1024_1518_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199         {"mac_tx_1519_2047_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201         {"mac_tx_2048_4095_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203         {"mac_tx_4096_8191_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205         {"mac_tx_8192_9216_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207         {"mac_tx_9217_12287_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209         {"mac_tx_12288_16383_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211         {"mac_tx_1519_max_good_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213         {"mac_tx_1519_max_bad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215         {"mac_rx_total_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217         {"mac_rx_total_oct_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219         {"mac_rx_good_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221         {"mac_rx_bad_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223         {"mac_rx_good_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225         {"mac_rx_bad_oct_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227         {"mac_rx_uni_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229         {"mac_rx_multi_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231         {"mac_rx_broad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233         {"mac_rx_undersize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235         {"mac_rx_oversize_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237         {"mac_rx_64_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239         {"mac_rx_65_127_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241         {"mac_rx_128_255_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243         {"mac_rx_256_511_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245         {"mac_rx_512_1023_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247         {"mac_rx_1024_1518_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249         {"mac_rx_1519_2047_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251         {"mac_rx_2048_4095_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253         {"mac_rx_4096_8191_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255         {"mac_rx_8192_9216_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257         {"mac_rx_9217_12287_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259         {"mac_rx_12288_16383_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261         {"mac_rx_1519_max_good_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263         {"mac_rx_1519_max_bad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
265
266         {"mac_tx_fragment_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268         {"mac_tx_undermin_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270         {"mac_tx_jabber_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272         {"mac_tx_err_all_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274         {"mac_tx_from_app_good_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276         {"mac_tx_from_app_bad_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278         {"mac_rx_fragment_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280         {"mac_rx_undermin_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282         {"mac_rx_jabber_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284         {"mac_rx_fcs_err_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286         {"mac_rx_send_app_good_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288         {"mac_rx_send_app_bad_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 };
291
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
293         {
294                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298                 .i_port_bitmap = 0x1,
299         },
300 };
301
302 static const u8 hclge_hash_key[] = {
303         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 };
309
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
311 {
312 #define HCLGE_MAC_CMD_NUM 21
313
314         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
316         __le64 *desc_data;
317         int i, k, n;
318         int ret;
319
320         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
322         if (ret) {
323                 dev_err(&hdev->pdev->dev,
324                         "Get MAC pkt stats fail, status = %d.\n", ret);
325
326                 return ret;
327         }
328
329         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330                 /* for special opcode 0032, only the first desc has the head */
331                 if (unlikely(i == 0)) {
332                         desc_data = (__le64 *)(&desc[i].data[0]);
333                         n = HCLGE_RD_FIRST_STATS_NUM;
334                 } else {
335                         desc_data = (__le64 *)(&desc[i]);
336                         n = HCLGE_RD_OTHER_STATS_NUM;
337                 }
338
339                 for (k = 0; k < n; k++) {
340                         *data += le64_to_cpu(*desc_data);
341                         data++;
342                         desc_data++;
343                 }
344         }
345
346         return 0;
347 }
348
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
350 {
351         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352         struct hclge_desc *desc;
353         __le64 *desc_data;
354         u16 i, k, n;
355         int ret;
356
357         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358         if (!desc)
359                 return -ENOMEM;
360         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
362         if (ret) {
363                 kfree(desc);
364                 return ret;
365         }
366
367         for (i = 0; i < desc_num; i++) {
368                 /* for special opcode 0034, only the first desc has the head */
369                 if (i == 0) {
370                         desc_data = (__le64 *)(&desc[i].data[0]);
371                         n = HCLGE_RD_FIRST_STATS_NUM;
372                 } else {
373                         desc_data = (__le64 *)(&desc[i]);
374                         n = HCLGE_RD_OTHER_STATS_NUM;
375                 }
376
377                 for (k = 0; k < n; k++) {
378                         *data += le64_to_cpu(*desc_data);
379                         data++;
380                         desc_data++;
381                 }
382         }
383
384         kfree(desc);
385
386         return 0;
387 }
388
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
390 {
391         struct hclge_desc desc;
392         __le32 *desc_data;
393         u32 reg_num;
394         int ret;
395
396         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398         if (ret)
399                 return ret;
400
401         desc_data = (__le32 *)(&desc.data[0]);
402         reg_num = le32_to_cpu(*desc_data);
403
404         *desc_num = 1 + ((reg_num - 3) >> 2) +
405                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
406
407         return 0;
408 }
409
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
411 {
412         u32 desc_num;
413         int ret;
414
415         ret = hclge_mac_query_reg_num(hdev, &desc_num);
416
417         /* The firmware supports the new statistics acquisition method */
418         if (!ret)
419                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420         else if (ret == -EOPNOTSUPP)
421                 ret = hclge_mac_update_stats_defective(hdev);
422         else
423                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
424
425         return ret;
426 }
427
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
429 {
430         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431         struct hclge_vport *vport = hclge_get_vport(handle);
432         struct hclge_dev *hdev = vport->back;
433         struct hnae3_queue *queue;
434         struct hclge_desc desc[1];
435         struct hclge_tqp *tqp;
436         int ret, i;
437
438         for (i = 0; i < kinfo->num_tqps; i++) {
439                 queue = handle->kinfo.tqp[i];
440                 tqp = container_of(queue, struct hclge_tqp, q);
441                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442                 hclge_cmd_setup_basic_desc(&desc[0],
443                                            HCLGE_OPC_QUERY_RX_STATUS,
444                                            true);
445
446                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
448                 if (ret) {
449                         dev_err(&hdev->pdev->dev,
450                                 "Query tqp stat fail, status = %d,queue = %d\n",
451                                 ret,    i);
452                         return ret;
453                 }
454                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455                         le32_to_cpu(desc[0].data[1]);
456         }
457
458         for (i = 0; i < kinfo->num_tqps; i++) {
459                 queue = handle->kinfo.tqp[i];
460                 tqp = container_of(queue, struct hclge_tqp, q);
461                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462                 hclge_cmd_setup_basic_desc(&desc[0],
463                                            HCLGE_OPC_QUERY_TX_STATUS,
464                                            true);
465
466                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
468                 if (ret) {
469                         dev_err(&hdev->pdev->dev,
470                                 "Query tqp stat fail, status = %d,queue = %d\n",
471                                 ret, i);
472                         return ret;
473                 }
474                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475                         le32_to_cpu(desc[0].data[1]);
476         }
477
478         return 0;
479 }
480
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
482 {
483         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484         struct hclge_tqp *tqp;
485         u64 *buff = data;
486         int i;
487
488         for (i = 0; i < kinfo->num_tqps; i++) {
489                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491         }
492
493         for (i = 0; i < kinfo->num_tqps; i++) {
494                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
496         }
497
498         return buff;
499 }
500
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
502 {
503         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504
505         return kinfo->num_tqps * (2);
506 }
507
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
509 {
510         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
511         u8 *buff = data;
512         int i = 0;
513
514         for (i = 0; i < kinfo->num_tqps; i++) {
515                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516                         struct hclge_tqp, q);
517                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
518                          tqp->index);
519                 buff = buff + ETH_GSTRING_LEN;
520         }
521
522         for (i = 0; i < kinfo->num_tqps; i++) {
523                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524                         struct hclge_tqp, q);
525                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
526                          tqp->index);
527                 buff = buff + ETH_GSTRING_LEN;
528         }
529
530         return buff;
531 }
532
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534                                  const struct hclge_comm_stats_str strs[],
535                                  int size, u64 *data)
536 {
537         u64 *buf = data;
538         u32 i;
539
540         for (i = 0; i < size; i++)
541                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
542
543         return buf + size;
544 }
545
546 static u8 *hclge_comm_get_strings(u32 stringset,
547                                   const struct hclge_comm_stats_str strs[],
548                                   int size, u8 *data)
549 {
550         char *buff = (char *)data;
551         u32 i;
552
553         if (stringset != ETH_SS_STATS)
554                 return buff;
555
556         for (i = 0; i < size; i++) {
557                 snprintf(buff, ETH_GSTRING_LEN,
558                          strs[i].desc);
559                 buff = buff + ETH_GSTRING_LEN;
560         }
561
562         return (u8 *)buff;
563 }
564
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
566 {
567         struct hnae3_handle *handle;
568         int status;
569
570         handle = &hdev->vport[0].nic;
571         if (handle->client) {
572                 status = hclge_tqps_update_stats(handle);
573                 if (status) {
574                         dev_err(&hdev->pdev->dev,
575                                 "Update TQPS stats fail, status = %d.\n",
576                                 status);
577                 }
578         }
579
580         status = hclge_mac_update_stats(hdev);
581         if (status)
582                 dev_err(&hdev->pdev->dev,
583                         "Update MAC stats fail, status = %d.\n", status);
584 }
585
586 static void hclge_update_stats(struct hnae3_handle *handle,
587                                struct net_device_stats *net_stats)
588 {
589         struct hclge_vport *vport = hclge_get_vport(handle);
590         struct hclge_dev *hdev = vport->back;
591         int status;
592
593         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594                 return;
595
596         status = hclge_mac_update_stats(hdev);
597         if (status)
598                 dev_err(&hdev->pdev->dev,
599                         "Update MAC stats fail, status = %d.\n",
600                         status);
601
602         status = hclge_tqps_update_stats(handle);
603         if (status)
604                 dev_err(&hdev->pdev->dev,
605                         "Update TQPS stats fail, status = %d.\n",
606                         status);
607
608         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 }
610
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614                 HNAE3_SUPPORT_PHY_LOOPBACK |\
615                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
617
618         struct hclge_vport *vport = hclge_get_vport(handle);
619         struct hclge_dev *hdev = vport->back;
620         int count = 0;
621
622         /* Loopback test support rules:
623          * mac: only GE mode support
624          * serdes: all mac mode will support include GE/XGE/LGE/CGE
625          * phy: only support when phy device exist on board
626          */
627         if (stringset == ETH_SS_TEST) {
628                 /* clear loopback bit flags at first */
629                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630                 if (hdev->pdev->revision >= 0x21 ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
634                         count += 1;
635                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
636                 }
637
638                 count += 2;
639                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641         } else if (stringset == ETH_SS_STATS) {
642                 count = ARRAY_SIZE(g_mac_stats_string) +
643                         hclge_tqps_get_sset_count(handle, stringset);
644         }
645
646         return count;
647 }
648
649 static void hclge_get_strings(struct hnae3_handle *handle,
650                               u32 stringset,
651                               u8 *data)
652 {
653         u8 *p = (char *)data;
654         int size;
655
656         if (stringset == ETH_SS_STATS) {
657                 size = ARRAY_SIZE(g_mac_stats_string);
658                 p = hclge_comm_get_strings(stringset,
659                                            g_mac_stats_string,
660                                            size,
661                                            p);
662                 p = hclge_tqps_get_strings(handle, p);
663         } else if (stringset == ETH_SS_TEST) {
664                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
665                         memcpy(p,
666                                hns3_nic_test_strs[HNAE3_LOOP_APP],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671                         memcpy(p,
672                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
677                         memcpy(p,
678                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
679                                ETH_GSTRING_LEN);
680                         p += ETH_GSTRING_LEN;
681                 }
682                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
683                         memcpy(p,
684                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
685                                ETH_GSTRING_LEN);
686                         p += ETH_GSTRING_LEN;
687                 }
688         }
689 }
690
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
692 {
693         struct hclge_vport *vport = hclge_get_vport(handle);
694         struct hclge_dev *hdev = vport->back;
695         u64 *p;
696
697         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
698                                  g_mac_stats_string,
699                                  ARRAY_SIZE(g_mac_stats_string),
700                                  data);
701         p = hclge_tqps_get_stats(handle, p);
702 }
703
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705                                      u64 *rx_cnt)
706 {
707         struct hclge_vport *vport = hclge_get_vport(handle);
708         struct hclge_dev *hdev = vport->back;
709
710         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 }
713
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715                                    struct hclge_func_status_cmd *status)
716 {
717         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718                 return -EINVAL;
719
720         /* Set the pf to main pf */
721         if (status->pf_state & HCLGE_PF_STATE_MAIN)
722                 hdev->flag |= HCLGE_FLAG_MAIN;
723         else
724                 hdev->flag &= ~HCLGE_FLAG_MAIN;
725
726         return 0;
727 }
728
729 static int hclge_query_function_status(struct hclge_dev *hdev)
730 {
731         struct hclge_func_status_cmd *req;
732         struct hclge_desc desc;
733         int timeout = 0;
734         int ret;
735
736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737         req = (struct hclge_func_status_cmd *)desc.data;
738
739         do {
740                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741                 if (ret) {
742                         dev_err(&hdev->pdev->dev,
743                                 "query function status failed %d.\n",
744                                 ret);
745
746                         return ret;
747                 }
748
749                 /* Check pf reset is done */
750                 if (req->pf_state)
751                         break;
752                 usleep_range(1000, 2000);
753         } while (timeout++ < 5);
754
755         ret = hclge_parse_func_status(hdev, req);
756
757         return ret;
758 }
759
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
761 {
762         struct hclge_pf_res_cmd *req;
763         struct hclge_desc desc;
764         int ret;
765
766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768         if (ret) {
769                 dev_err(&hdev->pdev->dev,
770                         "query pf resource failed %d.\n", ret);
771                 return ret;
772         }
773
774         req = (struct hclge_pf_res_cmd *)desc.data;
775         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
777
778         if (req->tx_buf_size)
779                 hdev->tx_buf_size =
780                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
781         else
782                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
783
784         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
785
786         if (req->dv_buf_size)
787                 hdev->dv_buf_size =
788                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
789         else
790                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
791
792         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
793
794         if (hnae3_dev_roce_supported(hdev)) {
795                 hdev->roce_base_msix_offset =
796                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
798                 hdev->num_roce_msi =
799                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
801
802                 /* PF should have NIC vectors and Roce vectors,
803                  * NIC vectors are queued before Roce vectors.
804                  */
805                 hdev->num_msi = hdev->num_roce_msi  +
806                                 hdev->roce_base_msix_offset;
807         } else {
808                 hdev->num_msi =
809                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
811         }
812
813         return 0;
814 }
815
816 static int hclge_parse_speed(int speed_cmd, int *speed)
817 {
818         switch (speed_cmd) {
819         case 6:
820                 *speed = HCLGE_MAC_SPEED_10M;
821                 break;
822         case 7:
823                 *speed = HCLGE_MAC_SPEED_100M;
824                 break;
825         case 0:
826                 *speed = HCLGE_MAC_SPEED_1G;
827                 break;
828         case 1:
829                 *speed = HCLGE_MAC_SPEED_10G;
830                 break;
831         case 2:
832                 *speed = HCLGE_MAC_SPEED_25G;
833                 break;
834         case 3:
835                 *speed = HCLGE_MAC_SPEED_40G;
836                 break;
837         case 4:
838                 *speed = HCLGE_MAC_SPEED_50G;
839                 break;
840         case 5:
841                 *speed = HCLGE_MAC_SPEED_100G;
842                 break;
843         default:
844                 return -EINVAL;
845         }
846
847         return 0;
848 }
849
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
851 {
852         struct hclge_vport *vport = hclge_get_vport(handle);
853         struct hclge_dev *hdev = vport->back;
854         u32 speed_ability = hdev->hw.mac.speed_ability;
855         u32 speed_bit = 0;
856
857         switch (speed) {
858         case HCLGE_MAC_SPEED_10M:
859                 speed_bit = HCLGE_SUPPORT_10M_BIT;
860                 break;
861         case HCLGE_MAC_SPEED_100M:
862                 speed_bit = HCLGE_SUPPORT_100M_BIT;
863                 break;
864         case HCLGE_MAC_SPEED_1G:
865                 speed_bit = HCLGE_SUPPORT_1G_BIT;
866                 break;
867         case HCLGE_MAC_SPEED_10G:
868                 speed_bit = HCLGE_SUPPORT_10G_BIT;
869                 break;
870         case HCLGE_MAC_SPEED_25G:
871                 speed_bit = HCLGE_SUPPORT_25G_BIT;
872                 break;
873         case HCLGE_MAC_SPEED_40G:
874                 speed_bit = HCLGE_SUPPORT_40G_BIT;
875                 break;
876         case HCLGE_MAC_SPEED_50G:
877                 speed_bit = HCLGE_SUPPORT_50G_BIT;
878                 break;
879         case HCLGE_MAC_SPEED_100G:
880                 speed_bit = HCLGE_SUPPORT_100G_BIT;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885
886         if (speed_bit & speed_ability)
887                 return 0;
888
889         return -EINVAL;
890 }
891
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
893 {
894         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
896                                  mac->supported);
897         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
899                                  mac->supported);
900         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
902                                  mac->supported);
903         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
905                                  mac->supported);
906         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
908                                  mac->supported);
909 }
910
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
912 {
913         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
915                                  mac->supported);
916         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
918                                  mac->supported);
919         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
924                                  mac->supported);
925         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
927                                  mac->supported);
928 }
929
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
931 {
932         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
934                                  mac->supported);
935         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
937                                  mac->supported);
938         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
940                                  mac->supported);
941         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
943                                  mac->supported);
944         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
946                                  mac->supported);
947 }
948
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
950 {
951         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
953                                  mac->supported);
954         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
956                                  mac->supported);
957         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
962                                  mac->supported);
963         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
965                                  mac->supported);
966         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
968                                  mac->supported);
969 }
970
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
972 {
973         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
975
976         switch (mac->speed) {
977         case HCLGE_MAC_SPEED_10G:
978         case HCLGE_MAC_SPEED_40G:
979                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980                                  mac->supported);
981                 mac->fec_ability =
982                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
983                 break;
984         case HCLGE_MAC_SPEED_25G:
985         case HCLGE_MAC_SPEED_50G:
986                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987                                  mac->supported);
988                 mac->fec_ability =
989                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990                         BIT(HNAE3_FEC_AUTO);
991                 break;
992         case HCLGE_MAC_SPEED_100G:
993                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995                 break;
996         default:
997                 mac->fec_ability = 0;
998                 break;
999         }
1000 }
1001
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003                                         u8 speed_ability)
1004 {
1005         struct hclge_mac *mac = &hdev->hw.mac;
1006
1007         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009                                  mac->supported);
1010
1011         hclge_convert_setting_sr(mac, speed_ability);
1012         hclge_convert_setting_lr(mac, speed_ability);
1013         hclge_convert_setting_cr(mac, speed_ability);
1014         if (hdev->pdev->revision >= 0x21)
1015                 hclge_convert_setting_fec(mac);
1016
1017         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 }
1021
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023                                             u8 speed_ability)
1024 {
1025         struct hclge_mac *mac = &hdev->hw.mac;
1026
1027         hclge_convert_setting_kr(mac, speed_ability);
1028         if (hdev->pdev->revision >= 0x21)
1029                 hclge_convert_setting_fec(mac);
1030         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 }
1034
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036                                          u8 speed_ability)
1037 {
1038         unsigned long *supported = hdev->hw.mac.supported;
1039
1040         /* default to support all speed for GE port */
1041         if (!speed_ability)
1042                 speed_ability = HCLGE_SUPPORT_GE;
1043
1044         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046                                  supported);
1047
1048         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1050                                  supported);
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1052                                  supported);
1053         }
1054
1055         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058         }
1059
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 }
1064
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1066 {
1067         u8 media_type = hdev->hw.mac.media_type;
1068
1069         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072                 hclge_parse_copper_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1075 }
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1077 {
1078         struct hclge_cfg_param_cmd *req;
1079         u64 mac_addr_tmp_high;
1080         u64 mac_addr_tmp;
1081         int i;
1082
1083         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1084
1085         /* get the configuration */
1086         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087                                               HCLGE_CFG_VMDQ_M,
1088                                               HCLGE_CFG_VMDQ_S);
1089         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                             HCLGE_CFG_TQP_DESC_N_M,
1093                                             HCLGE_CFG_TQP_DESC_N_S);
1094
1095         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096                                         HCLGE_CFG_PHY_ADDR_M,
1097                                         HCLGE_CFG_PHY_ADDR_S);
1098         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099                                           HCLGE_CFG_MEDIA_TP_M,
1100                                           HCLGE_CFG_MEDIA_TP_S);
1101         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102                                           HCLGE_CFG_RX_BUF_LEN_M,
1103                                           HCLGE_CFG_RX_BUF_LEN_S);
1104         /* get mac_address */
1105         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107                                             HCLGE_CFG_MAC_ADDR_H_M,
1108                                             HCLGE_CFG_MAC_ADDR_H_S);
1109
1110         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1111
1112         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113                                              HCLGE_CFG_DEFAULT_SPEED_M,
1114                                              HCLGE_CFG_DEFAULT_SPEED_S);
1115         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116                                             HCLGE_CFG_RSS_SIZE_M,
1117                                             HCLGE_CFG_RSS_SIZE_S);
1118
1119         for (i = 0; i < ETH_ALEN; i++)
1120                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1121
1122         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1124
1125         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126                                              HCLGE_CFG_SPEED_ABILITY_M,
1127                                              HCLGE_CFG_SPEED_ABILITY_S);
1128         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1130                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1131         if (!cfg->umv_space)
1132                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 }
1134
1135 /* hclge_get_cfg: query the static parameter from flash
1136  * @hdev: pointer to struct hclge_dev
1137  * @hcfg: the config structure to be getted
1138  */
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1140 {
1141         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142         struct hclge_cfg_param_cmd *req;
1143         int i, ret;
1144
1145         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1146                 u32 offset = 0;
1147
1148                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1150                                            true);
1151                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153                 /* Len should be united by 4 bytes when send to hardware */
1154                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156                 req->offset = cpu_to_le32(offset);
1157         }
1158
1159         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1160         if (ret) {
1161                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1162                 return ret;
1163         }
1164
1165         hclge_parse_cfg(hcfg, desc);
1166
1167         return 0;
1168 }
1169
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1171 {
1172         int ret;
1173
1174         ret = hclge_query_function_status(hdev);
1175         if (ret) {
1176                 dev_err(&hdev->pdev->dev,
1177                         "query function status error %d.\n", ret);
1178                 return ret;
1179         }
1180
1181         /* get pf resource */
1182         ret = hclge_query_pf_resource(hdev);
1183         if (ret)
1184                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1185
1186         return ret;
1187 }
1188
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1190 {
1191 #define HCLGE_MIN_TX_DESC       64
1192 #define HCLGE_MIN_RX_DESC       64
1193
1194         if (!is_kdump_kernel())
1195                 return;
1196
1197         dev_info(&hdev->pdev->dev,
1198                  "Running kdump kernel. Using minimal resources\n");
1199
1200         /* minimal queue pairs equals to the number of vports */
1201         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1204 }
1205
1206 static int hclge_configure(struct hclge_dev *hdev)
1207 {
1208         struct hclge_cfg cfg;
1209         int ret, i;
1210
1211         ret = hclge_get_cfg(hdev, &cfg);
1212         if (ret) {
1213                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1214                 return ret;
1215         }
1216
1217         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218         hdev->base_tqp_pid = 0;
1219         hdev->rss_size_max = cfg.rss_size_max;
1220         hdev->rx_buf_len = cfg.rx_buf_len;
1221         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222         hdev->hw.mac.media_type = cfg.media_type;
1223         hdev->hw.mac.phy_addr = cfg.phy_addr;
1224         hdev->num_tx_desc = cfg.tqp_desc_num;
1225         hdev->num_rx_desc = cfg.tqp_desc_num;
1226         hdev->tm_info.num_pg = 1;
1227         hdev->tc_max = cfg.tc_num;
1228         hdev->tm_info.hw_pfc_map = 0;
1229         hdev->wanted_umv_size = cfg.umv_space;
1230
1231         if (hnae3_dev_fd_supported(hdev)) {
1232                 hdev->fd_en = true;
1233                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1234         }
1235
1236         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1237         if (ret) {
1238                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1239                 return ret;
1240         }
1241
1242         hclge_parse_link_mode(hdev, cfg.speed_ability);
1243
1244         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245             (hdev->tc_max < 1)) {
1246                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247                          hdev->tc_max);
1248                 hdev->tc_max = 1;
1249         }
1250
1251         /* Dev does not support DCB */
1252         if (!hnae3_dev_dcb_supported(hdev)) {
1253                 hdev->tc_max = 1;
1254                 hdev->pfc_max = 0;
1255         } else {
1256                 hdev->pfc_max = hdev->tc_max;
1257         }
1258
1259         hdev->tm_info.num_tc = 1;
1260
1261         /* Currently not support uncontiuous tc */
1262         for (i = 0; i < hdev->tm_info.num_tc; i++)
1263                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1264
1265         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1266
1267         hclge_init_kdump_kernel_config(hdev);
1268
1269         return ret;
1270 }
1271
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1273                             int tso_mss_max)
1274 {
1275         struct hclge_cfg_tso_status_cmd *req;
1276         struct hclge_desc desc;
1277         u16 tso_mss;
1278
1279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1280
1281         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1282
1283         tso_mss = 0;
1284         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286         req->tso_mss_min = cpu_to_le16(tso_mss);
1287
1288         tso_mss = 0;
1289         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291         req->tso_mss_max = cpu_to_le16(tso_mss);
1292
1293         return hclge_cmd_send(&hdev->hw, &desc, 1);
1294 }
1295
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1297 {
1298         struct hclge_cfg_gro_status_cmd *req;
1299         struct hclge_desc desc;
1300         int ret;
1301
1302         if (!hnae3_dev_gro_supported(hdev))
1303                 return 0;
1304
1305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1307
1308         req->gro_en = cpu_to_le16(en ? 1 : 0);
1309
1310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1311         if (ret)
1312                 dev_err(&hdev->pdev->dev,
1313                         "GRO hardware config cmd failed, ret = %d\n", ret);
1314
1315         return ret;
1316 }
1317
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1319 {
1320         struct hclge_tqp *tqp;
1321         int i;
1322
1323         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1325         if (!hdev->htqp)
1326                 return -ENOMEM;
1327
1328         tqp = hdev->htqp;
1329
1330         for (i = 0; i < hdev->num_tqps; i++) {
1331                 tqp->dev = &hdev->pdev->dev;
1332                 tqp->index = i;
1333
1334                 tqp->q.ae_algo = &ae_algo;
1335                 tqp->q.buf_size = hdev->rx_buf_len;
1336                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339                         i * HCLGE_TQP_REG_SIZE;
1340
1341                 tqp++;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1349 {
1350         struct hclge_tqp_map_cmd *req;
1351         struct hclge_desc desc;
1352         int ret;
1353
1354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1355
1356         req = (struct hclge_tqp_map_cmd *)desc.data;
1357         req->tqp_id = cpu_to_le16(tqp_pid);
1358         req->tqp_vf = func_id;
1359         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360                         1 << HCLGE_TQP_MAP_EN_B;
1361         req->tqp_vid = cpu_to_le16(tqp_vid);
1362
1363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1364         if (ret)
1365                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366
1367         return ret;
1368 }
1369
1370 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1371 {
1372         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373         struct hclge_dev *hdev = vport->back;
1374         int i, alloced;
1375
1376         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377              alloced < num_tqps; i++) {
1378                 if (!hdev->htqp[i].alloced) {
1379                         hdev->htqp[i].q.handle = &vport->nic;
1380                         hdev->htqp[i].q.tqp_index = alloced;
1381                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384                         hdev->htqp[i].alloced = true;
1385                         alloced++;
1386                 }
1387         }
1388         vport->alloc_tqps = alloced;
1389         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1391
1392         return 0;
1393 }
1394
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396                             u16 num_tx_desc, u16 num_rx_desc)
1397
1398 {
1399         struct hnae3_handle *nic = &vport->nic;
1400         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401         struct hclge_dev *hdev = vport->back;
1402         int ret;
1403
1404         kinfo->num_tx_desc = num_tx_desc;
1405         kinfo->num_rx_desc = num_rx_desc;
1406
1407         kinfo->rx_buf_len = hdev->rx_buf_len;
1408
1409         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1411         if (!kinfo->tqp)
1412                 return -ENOMEM;
1413
1414         ret = hclge_assign_tqp(vport, num_tqps);
1415         if (ret)
1416                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417
1418         return ret;
1419 }
1420
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422                                   struct hclge_vport *vport)
1423 {
1424         struct hnae3_handle *nic = &vport->nic;
1425         struct hnae3_knic_private_info *kinfo;
1426         u16 i;
1427
1428         kinfo = &nic->kinfo;
1429         for (i = 0; i < vport->alloc_tqps; i++) {
1430                 struct hclge_tqp *q =
1431                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1432                 bool is_pf;
1433                 int ret;
1434
1435                 is_pf = !(vport->vport_id);
1436                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1437                                              i, is_pf);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1446 {
1447         struct hclge_vport *vport = hdev->vport;
1448         u16 i, num_vport;
1449
1450         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451         for (i = 0; i < num_vport; i++) {
1452                 int ret;
1453
1454                 ret = hclge_map_tqp_to_vport(hdev, vport);
1455                 if (ret)
1456                         return ret;
1457
1458                 vport++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1465 {
1466         /* this would be initialized later */
1467 }
1468
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1470 {
1471         struct hnae3_handle *nic = &vport->nic;
1472         struct hclge_dev *hdev = vport->back;
1473         int ret;
1474
1475         nic->pdev = hdev->pdev;
1476         nic->ae_algo = &ae_algo;
1477         nic->numa_node_mask = hdev->numa_node_mask;
1478
1479         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480                 ret = hclge_knic_setup(vport, num_tqps,
1481                                        hdev->num_tx_desc, hdev->num_rx_desc);
1482
1483                 if (ret) {
1484                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1485                                 ret);
1486                         return ret;
1487                 }
1488         } else {
1489                 hclge_unic_setup(vport, num_tqps);
1490         }
1491
1492         return 0;
1493 }
1494
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1496 {
1497         struct pci_dev *pdev = hdev->pdev;
1498         struct hclge_vport *vport;
1499         u32 tqp_main_vport;
1500         u32 tqp_per_vport;
1501         int num_vport, i;
1502         int ret;
1503
1504         /* We need to alloc a vport for main NIC of PF */
1505         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1506
1507         if (hdev->num_tqps < num_vport) {
1508                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509                         hdev->num_tqps, num_vport);
1510                 return -EINVAL;
1511         }
1512
1513         /* Alloc the same number of TQPs for every vport */
1514         tqp_per_vport = hdev->num_tqps / num_vport;
1515         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1516
1517         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1518                              GFP_KERNEL);
1519         if (!vport)
1520                 return -ENOMEM;
1521
1522         hdev->vport = vport;
1523         hdev->num_alloc_vport = num_vport;
1524
1525         if (IS_ENABLED(CONFIG_PCI_IOV))
1526                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1527
1528         for (i = 0; i < num_vport; i++) {
1529                 vport->back = hdev;
1530                 vport->vport_id = i;
1531                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534                 INIT_LIST_HEAD(&vport->vlan_list);
1535                 INIT_LIST_HEAD(&vport->uc_mac_list);
1536                 INIT_LIST_HEAD(&vport->mc_mac_list);
1537
1538                 if (i == 0)
1539                         ret = hclge_vport_setup(vport, tqp_main_vport);
1540                 else
1541                         ret = hclge_vport_setup(vport, tqp_per_vport);
1542                 if (ret) {
1543                         dev_err(&pdev->dev,
1544                                 "vport setup failed for vport %d, %d\n",
1545                                 i, ret);
1546                         return ret;
1547                 }
1548
1549                 vport++;
1550         }
1551
1552         return 0;
1553 }
1554
1555 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556                                     struct hclge_pkt_buf_alloc *buf_alloc)
1557 {
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1561         struct hclge_tx_buff_alloc_cmd *req;
1562         struct hclge_desc desc;
1563         int ret;
1564         u8 i;
1565
1566         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1567
1568         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1571
1572                 req->tx_pkt_buff[i] =
1573                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1575         }
1576
1577         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1578         if (ret)
1579                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1580                         ret);
1581
1582         return ret;
1583 }
1584
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586                                  struct hclge_pkt_buf_alloc *buf_alloc)
1587 {
1588         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1589
1590         if (ret)
1591                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1592
1593         return ret;
1594 }
1595
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1597 {
1598         int i, cnt = 0;
1599
1600         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601                 if (hdev->hw_tc_map & BIT(i))
1602                         cnt++;
1603         return cnt;
1604 }
1605
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608                                   struct hclge_pkt_buf_alloc *buf_alloc)
1609 {
1610         struct hclge_priv_buf *priv;
1611         int i, cnt = 0;
1612
1613         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614                 priv = &buf_alloc->priv_buf[i];
1615                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616                     priv->enable)
1617                         cnt++;
1618         }
1619
1620         return cnt;
1621 }
1622
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625                                      struct hclge_pkt_buf_alloc *buf_alloc)
1626 {
1627         struct hclge_priv_buf *priv;
1628         int i, cnt = 0;
1629
1630         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631                 priv = &buf_alloc->priv_buf[i];
1632                 if (hdev->hw_tc_map & BIT(i) &&
1633                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1634                     priv->enable)
1635                         cnt++;
1636         }
1637
1638         return cnt;
1639 }
1640
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1642 {
1643         struct hclge_priv_buf *priv;
1644         u32 rx_priv = 0;
1645         int i;
1646
1647         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648                 priv = &buf_alloc->priv_buf[i];
1649                 if (priv->enable)
1650                         rx_priv += priv->buf_size;
1651         }
1652         return rx_priv;
1653 }
1654
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1656 {
1657         u32 i, total_tx_size = 0;
1658
1659         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1661
1662         return total_tx_size;
1663 }
1664
1665 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666                                 struct hclge_pkt_buf_alloc *buf_alloc,
1667                                 u32 rx_all)
1668 {
1669         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670         u32 tc_num = hclge_get_tc_num(hdev);
1671         u32 shared_buf, aligned_mps;
1672         u32 rx_priv;
1673         int i;
1674
1675         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1676
1677         if (hnae3_dev_dcb_supported(hdev))
1678                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1679         else
1680                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681                                         + hdev->dv_buf_size;
1682
1683         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685                              HCLGE_BUF_SIZE_UNIT);
1686
1687         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688         if (rx_all < rx_priv + shared_std)
1689                 return false;
1690
1691         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692         buf_alloc->s_buf.buf_size = shared_buf;
1693         if (hnae3_dev_dcb_supported(hdev)) {
1694                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1697         } else {
1698                 buf_alloc->s_buf.self.high = aligned_mps +
1699                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700                 buf_alloc->s_buf.self.low = aligned_mps;
1701         }
1702
1703         if (hnae3_dev_dcb_supported(hdev)) {
1704                 if (tc_num)
1705                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1706                 else
1707                         hi_thrd = shared_buf - hdev->dv_buf_size;
1708
1709                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711                 lo_thrd = hi_thrd - aligned_mps / 2;
1712         } else {
1713                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714                 lo_thrd = aligned_mps;
1715         }
1716
1717         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720         }
1721
1722         return true;
1723 }
1724
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726                                 struct hclge_pkt_buf_alloc *buf_alloc)
1727 {
1728         u32 i, total_size;
1729
1730         total_size = hdev->pkt_buf_size;
1731
1732         /* alloc tx buffer for all enabled tc */
1733         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1735
1736                 if (hdev->hw_tc_map & BIT(i)) {
1737                         if (total_size < hdev->tx_buf_size)
1738                                 return -ENOMEM;
1739
1740                         priv->tx_buf_size = hdev->tx_buf_size;
1741                 } else {
1742                         priv->tx_buf_size = 0;
1743                 }
1744
1745                 total_size -= priv->tx_buf_size;
1746         }
1747
1748         return 0;
1749 }
1750
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752                                   struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1756         int i;
1757
1758         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1760
1761                 priv->enable = 0;
1762                 priv->wl.low = 0;
1763                 priv->wl.high = 0;
1764                 priv->buf_size = 0;
1765
1766                 if (!(hdev->hw_tc_map & BIT(i)))
1767                         continue;
1768
1769                 priv->enable = 1;
1770
1771                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772                         priv->wl.low = max ? aligned_mps : 256;
1773                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774                                                 HCLGE_BUF_SIZE_UNIT);
1775                 } else {
1776                         priv->wl.low = 0;
1777                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1778                 }
1779
1780                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1781         }
1782
1783         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1784 }
1785
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787                                           struct hclge_pkt_buf_alloc *buf_alloc)
1788 {
1789         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1791         int i;
1792
1793         /* let the last to be cleared first */
1794         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1796
1797                 if (hdev->hw_tc_map & BIT(i) &&
1798                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799                         /* Clear the no pfc TC private buffer */
1800                         priv->wl.low = 0;
1801                         priv->wl.high = 0;
1802                         priv->buf_size = 0;
1803                         priv->enable = 0;
1804                         no_pfc_priv_num--;
1805                 }
1806
1807                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808                     no_pfc_priv_num == 0)
1809                         break;
1810         }
1811
1812         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1813 }
1814
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816                                         struct hclge_pkt_buf_alloc *buf_alloc)
1817 {
1818         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1820         int i;
1821
1822         /* let the last to be cleared first */
1823         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1825
1826                 if (hdev->hw_tc_map & BIT(i) &&
1827                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1828                         /* Reduce the number of pfc TC with private buffer */
1829                         priv->wl.low = 0;
1830                         priv->enable = 0;
1831                         priv->wl.high = 0;
1832                         priv->buf_size = 0;
1833                         pfc_priv_num--;
1834                 }
1835
1836                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1837                     pfc_priv_num == 0)
1838                         break;
1839         }
1840
1841         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1842 }
1843
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845  * @hdev: pointer to struct hclge_dev
1846  * @buf_alloc: pointer to buffer calculation data
1847  * @return: 0: calculate sucessful, negative: fail
1848  */
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850                                 struct hclge_pkt_buf_alloc *buf_alloc)
1851 {
1852         /* When DCB is not supported, rx private buffer is not allocated. */
1853         if (!hnae3_dev_dcb_supported(hdev)) {
1854                 u32 rx_all = hdev->pkt_buf_size;
1855
1856                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1858                         return -ENOMEM;
1859
1860                 return 0;
1861         }
1862
1863         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1864                 return 0;
1865
1866         /* try to decrease the buffer size */
1867         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1868                 return 0;
1869
1870         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1871                 return 0;
1872
1873         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1874                 return 0;
1875
1876         return -ENOMEM;
1877 }
1878
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880                                    struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882         struct hclge_rx_priv_buff_cmd *req;
1883         struct hclge_desc desc;
1884         int ret;
1885         int i;
1886
1887         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1889
1890         /* Alloc private buffer TCs */
1891         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1893
1894                 req->buf_num[i] =
1895                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1896                 req->buf_num[i] |=
1897                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1898         }
1899
1900         req->shared_buf =
1901                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1903
1904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1905         if (ret)
1906                 dev_err(&hdev->pdev->dev,
1907                         "rx private buffer alloc cmd failed %d\n", ret);
1908
1909         return ret;
1910 }
1911
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913                                    struct hclge_pkt_buf_alloc *buf_alloc)
1914 {
1915         struct hclge_rx_priv_wl_buf *req;
1916         struct hclge_priv_buf *priv;
1917         struct hclge_desc desc[2];
1918         int i, j;
1919         int ret;
1920
1921         for (i = 0; i < 2; i++) {
1922                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1923                                            false);
1924                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1925
1926                 /* The first descriptor set the NEXT bit to 1 */
1927                 if (i == 0)
1928                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1929                 else
1930                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1931
1932                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1934
1935                         priv = &buf_alloc->priv_buf[idx];
1936                         req->tc_wl[j].high =
1937                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938                         req->tc_wl[j].high |=
1939                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1940                         req->tc_wl[j].low =
1941                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942                         req->tc_wl[j].low |=
1943                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1944                 }
1945         }
1946
1947         /* Send 2 descriptor at one time */
1948         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1949         if (ret)
1950                 dev_err(&hdev->pdev->dev,
1951                         "rx private waterline config cmd failed %d\n",
1952                         ret);
1953         return ret;
1954 }
1955
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957                                     struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960         struct hclge_rx_com_thrd *req;
1961         struct hclge_desc desc[2];
1962         struct hclge_tc_thrd *tc;
1963         int i, j;
1964         int ret;
1965
1966         for (i = 0; i < 2; i++) {
1967                 hclge_cmd_setup_basic_desc(&desc[i],
1968                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1970
1971                 /* The first descriptor set the NEXT bit to 1 */
1972                 if (i == 0)
1973                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1974                 else
1975                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1976
1977                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1979
1980                         req->com_thrd[j].high =
1981                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982                         req->com_thrd[j].high |=
1983                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984                         req->com_thrd[j].low =
1985                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986                         req->com_thrd[j].low |=
1987                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1988                 }
1989         }
1990
1991         /* Send 2 descriptors at one time */
1992         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1993         if (ret)
1994                 dev_err(&hdev->pdev->dev,
1995                         "common threshold config cmd failed %d\n", ret);
1996         return ret;
1997 }
1998
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000                                   struct hclge_pkt_buf_alloc *buf_alloc)
2001 {
2002         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003         struct hclge_rx_com_wl *req;
2004         struct hclge_desc desc;
2005         int ret;
2006
2007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2008
2009         req = (struct hclge_rx_com_wl *)desc.data;
2010         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2012
2013         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2015
2016         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2017         if (ret)
2018                 dev_err(&hdev->pdev->dev,
2019                         "common waterline config cmd failed %d\n", ret);
2020
2021         return ret;
2022 }
2023
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2025 {
2026         struct hclge_pkt_buf_alloc *pkt_buf;
2027         int ret;
2028
2029         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2030         if (!pkt_buf)
2031                 return -ENOMEM;
2032
2033         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2034         if (ret) {
2035                 dev_err(&hdev->pdev->dev,
2036                         "could not calc tx buffer size for all TCs %d\n", ret);
2037                 goto out;
2038         }
2039
2040         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2041         if (ret) {
2042                 dev_err(&hdev->pdev->dev,
2043                         "could not alloc tx buffers %d\n", ret);
2044                 goto out;
2045         }
2046
2047         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2048         if (ret) {
2049                 dev_err(&hdev->pdev->dev,
2050                         "could not calc rx priv buffer size for all TCs %d\n",
2051                         ret);
2052                 goto out;
2053         }
2054
2055         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2056         if (ret) {
2057                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2058                         ret);
2059                 goto out;
2060         }
2061
2062         if (hnae3_dev_dcb_supported(hdev)) {
2063                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2064                 if (ret) {
2065                         dev_err(&hdev->pdev->dev,
2066                                 "could not configure rx private waterline %d\n",
2067                                 ret);
2068                         goto out;
2069                 }
2070
2071                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2072                 if (ret) {
2073                         dev_err(&hdev->pdev->dev,
2074                                 "could not configure common threshold %d\n",
2075                                 ret);
2076                         goto out;
2077                 }
2078         }
2079
2080         ret = hclge_common_wl_config(hdev, pkt_buf);
2081         if (ret)
2082                 dev_err(&hdev->pdev->dev,
2083                         "could not configure common waterline %d\n", ret);
2084
2085 out:
2086         kfree(pkt_buf);
2087         return ret;
2088 }
2089
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2091 {
2092         struct hnae3_handle *roce = &vport->roce;
2093         struct hnae3_handle *nic = &vport->nic;
2094
2095         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2096
2097         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098             vport->back->num_msi_left == 0)
2099                 return -EINVAL;
2100
2101         roce->rinfo.base_vector = vport->back->roce_base_vector;
2102
2103         roce->rinfo.netdev = nic->kinfo.netdev;
2104         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2105
2106         roce->pdev = nic->pdev;
2107         roce->ae_algo = nic->ae_algo;
2108         roce->numa_node_mask = nic->numa_node_mask;
2109
2110         return 0;
2111 }
2112
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2114 {
2115         struct pci_dev *pdev = hdev->pdev;
2116         int vectors;
2117         int i;
2118
2119         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2121         if (vectors < 0) {
2122                 dev_err(&pdev->dev,
2123                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2124                         vectors);
2125                 return vectors;
2126         }
2127         if (vectors < hdev->num_msi)
2128                 dev_warn(&hdev->pdev->dev,
2129                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130                          hdev->num_msi, vectors);
2131
2132         hdev->num_msi = vectors;
2133         hdev->num_msi_left = vectors;
2134         hdev->base_msi_vector = pdev->irq;
2135         hdev->roce_base_vector = hdev->base_msi_vector +
2136                                 hdev->roce_base_msix_offset;
2137
2138         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139                                            sizeof(u16), GFP_KERNEL);
2140         if (!hdev->vector_status) {
2141                 pci_free_irq_vectors(pdev);
2142                 return -ENOMEM;
2143         }
2144
2145         for (i = 0; i < hdev->num_msi; i++)
2146                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2147
2148         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149                                         sizeof(int), GFP_KERNEL);
2150         if (!hdev->vector_irq) {
2151                 pci_free_irq_vectors(pdev);
2152                 return -ENOMEM;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2159 {
2160
2161         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162                 duplex = HCLGE_MAC_FULL;
2163
2164         return duplex;
2165 }
2166
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2168                                       u8 duplex)
2169 {
2170         struct hclge_config_mac_speed_dup_cmd *req;
2171         struct hclge_desc desc;
2172         int ret;
2173
2174         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2175
2176         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2177
2178         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2179
2180         switch (speed) {
2181         case HCLGE_MAC_SPEED_10M:
2182                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183                                 HCLGE_CFG_SPEED_S, 6);
2184                 break;
2185         case HCLGE_MAC_SPEED_100M:
2186                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187                                 HCLGE_CFG_SPEED_S, 7);
2188                 break;
2189         case HCLGE_MAC_SPEED_1G:
2190                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191                                 HCLGE_CFG_SPEED_S, 0);
2192                 break;
2193         case HCLGE_MAC_SPEED_10G:
2194                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195                                 HCLGE_CFG_SPEED_S, 1);
2196                 break;
2197         case HCLGE_MAC_SPEED_25G:
2198                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199                                 HCLGE_CFG_SPEED_S, 2);
2200                 break;
2201         case HCLGE_MAC_SPEED_40G:
2202                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203                                 HCLGE_CFG_SPEED_S, 3);
2204                 break;
2205         case HCLGE_MAC_SPEED_50G:
2206                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207                                 HCLGE_CFG_SPEED_S, 4);
2208                 break;
2209         case HCLGE_MAC_SPEED_100G:
2210                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211                                 HCLGE_CFG_SPEED_S, 5);
2212                 break;
2213         default:
2214                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2215                 return -EINVAL;
2216         }
2217
2218         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2219                       1);
2220
2221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222         if (ret) {
2223                 dev_err(&hdev->pdev->dev,
2224                         "mac speed/duplex config cmd failed %d.\n", ret);
2225                 return ret;
2226         }
2227
2228         return 0;
2229 }
2230
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2232 {
2233         int ret;
2234
2235         duplex = hclge_check_speed_dup(duplex, speed);
2236         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2237                 return 0;
2238
2239         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2240         if (ret)
2241                 return ret;
2242
2243         hdev->hw.mac.speed = speed;
2244         hdev->hw.mac.duplex = duplex;
2245
2246         return 0;
2247 }
2248
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2250                                      u8 duplex)
2251 {
2252         struct hclge_vport *vport = hclge_get_vport(handle);
2253         struct hclge_dev *hdev = vport->back;
2254
2255         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2256 }
2257
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2259 {
2260         struct hclge_config_auto_neg_cmd *req;
2261         struct hclge_desc desc;
2262         u32 flag = 0;
2263         int ret;
2264
2265         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2266
2267         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2270
2271         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2272         if (ret)
2273                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2274                         ret);
2275
2276         return ret;
2277 }
2278
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2280 {
2281         struct hclge_vport *vport = hclge_get_vport(handle);
2282         struct hclge_dev *hdev = vport->back;
2283
2284         if (!hdev->hw.mac.support_autoneg) {
2285                 if (enable) {
2286                         dev_err(&hdev->pdev->dev,
2287                                 "autoneg is not supported by current port\n");
2288                         return -EOPNOTSUPP;
2289                 } else {
2290                         return 0;
2291                 }
2292         }
2293
2294         return hclge_set_autoneg_en(hdev, enable);
2295 }
2296
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2298 {
2299         struct hclge_vport *vport = hclge_get_vport(handle);
2300         struct hclge_dev *hdev = vport->back;
2301         struct phy_device *phydev = hdev->hw.mac.phydev;
2302
2303         if (phydev)
2304                 return phydev->autoneg;
2305
2306         return hdev->hw.mac.autoneg;
2307 }
2308
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2310 {
2311         struct hclge_vport *vport = hclge_get_vport(handle);
2312         struct hclge_dev *hdev = vport->back;
2313         int ret;
2314
2315         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2316
2317         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2318         if (ret)
2319                 return ret;
2320         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2321 }
2322
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2324 {
2325         struct hclge_config_fec_cmd *req;
2326         struct hclge_desc desc;
2327         int ret;
2328
2329         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2330
2331         req = (struct hclge_config_fec_cmd *)desc.data;
2332         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334         if (fec_mode & BIT(HNAE3_FEC_RS))
2335                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337         if (fec_mode & BIT(HNAE3_FEC_BASER))
2338                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2340
2341         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2342         if (ret)
2343                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2344
2345         return ret;
2346 }
2347
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2349 {
2350         struct hclge_vport *vport = hclge_get_vport(handle);
2351         struct hclge_dev *hdev = vport->back;
2352         struct hclge_mac *mac = &hdev->hw.mac;
2353         int ret;
2354
2355         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2357                 return -EINVAL;
2358         }
2359
2360         ret = hclge_set_fec_hw(hdev, fec_mode);
2361         if (ret)
2362                 return ret;
2363
2364         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2365         return 0;
2366 }
2367
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2369                           u8 *fec_mode)
2370 {
2371         struct hclge_vport *vport = hclge_get_vport(handle);
2372         struct hclge_dev *hdev = vport->back;
2373         struct hclge_mac *mac = &hdev->hw.mac;
2374
2375         if (fec_ability)
2376                 *fec_ability = mac->fec_ability;
2377         if (fec_mode)
2378                 *fec_mode = mac->fec_mode;
2379 }
2380
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2382 {
2383         struct hclge_mac *mac = &hdev->hw.mac;
2384         int ret;
2385
2386         hdev->support_sfp_query = true;
2387         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389                                          hdev->hw.mac.duplex);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "Config mac speed dup fail ret=%d\n", ret);
2393                 return ret;
2394         }
2395
2396         mac->link = 0;
2397
2398         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2400                 if (ret) {
2401                         dev_err(&hdev->pdev->dev,
2402                                 "Fec mode init fail, ret = %d\n", ret);
2403                         return ret;
2404                 }
2405         }
2406
2407         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2408         if (ret) {
2409                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2410                 return ret;
2411         }
2412
2413         ret = hclge_buffer_alloc(hdev);
2414         if (ret)
2415                 dev_err(&hdev->pdev->dev,
2416                         "allocate buffer fail, ret=%d\n", ret);
2417
2418         return ret;
2419 }
2420
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2422 {
2423         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425                 schedule_work(&hdev->mbx_service_task);
2426 }
2427
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2429 {
2430         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431                 schedule_work(&hdev->rst_service_task);
2432 }
2433
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2435 {
2436         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439                 (void)schedule_work(&hdev->service_task);
2440 }
2441
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2443 {
2444         struct hclge_link_status_cmd *req;
2445         struct hclge_desc desc;
2446         int link_status;
2447         int ret;
2448
2449         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2451         if (ret) {
2452                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2453                         ret);
2454                 return ret;
2455         }
2456
2457         req = (struct hclge_link_status_cmd *)desc.data;
2458         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2459
2460         return !!link_status;
2461 }
2462
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2464 {
2465         int mac_state;
2466         int link_stat;
2467
2468         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2469                 return 0;
2470
2471         mac_state = hclge_get_mac_link_status(hdev);
2472
2473         if (hdev->hw.mac.phydev) {
2474                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475                         link_stat = mac_state &
2476                                 hdev->hw.mac.phydev->link;
2477                 else
2478                         link_stat = 0;
2479
2480         } else {
2481                 link_stat = mac_state;
2482         }
2483
2484         return !!link_stat;
2485 }
2486
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2488 {
2489         struct hnae3_client *rclient = hdev->roce_client;
2490         struct hnae3_client *client = hdev->nic_client;
2491         struct hnae3_handle *rhandle;
2492         struct hnae3_handle *handle;
2493         int state;
2494         int i;
2495
2496         if (!client)
2497                 return;
2498         state = hclge_get_mac_phy_link(hdev);
2499         if (state != hdev->hw.mac.link) {
2500                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501                         handle = &hdev->vport[i].nic;
2502                         client->ops->link_status_change(handle, state);
2503                         hclge_config_mac_tnl_int(hdev, state);
2504                         rhandle = &hdev->vport[i].roce;
2505                         if (rclient && rclient->ops->link_status_change)
2506                                 rclient->ops->link_status_change(rhandle,
2507                                                                  state);
2508                 }
2509                 hdev->hw.mac.link = state;
2510         }
2511 }
2512
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2514 {
2515         /* update fec ability by speed */
2516         hclge_convert_setting_fec(mac);
2517
2518         /* firmware can not identify back plane type, the media type
2519          * read from configuration can help deal it
2520          */
2521         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2526
2527         if (mac->support_autoneg == true) {
2528                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529                 linkmode_copy(mac->advertising, mac->supported);
2530         } else {
2531                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2532                                    mac->supported);
2533                 linkmode_zero(mac->advertising);
2534         }
2535 }
2536
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2538 {
2539         struct hclge_sfp_info_cmd *resp = NULL;
2540         struct hclge_desc desc;
2541         int ret;
2542
2543         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544         resp = (struct hclge_sfp_info_cmd *)desc.data;
2545         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546         if (ret == -EOPNOTSUPP) {
2547                 dev_warn(&hdev->pdev->dev,
2548                          "IMP do not support get SFP speed %d\n", ret);
2549                 return ret;
2550         } else if (ret) {
2551                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2552                 return ret;
2553         }
2554
2555         *speed = le32_to_cpu(resp->speed);
2556
2557         return 0;
2558 }
2559
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2561 {
2562         struct hclge_sfp_info_cmd *resp;
2563         struct hclge_desc desc;
2564         int ret;
2565
2566         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567         resp = (struct hclge_sfp_info_cmd *)desc.data;
2568
2569         resp->query_type = QUERY_ACTIVE_SPEED;
2570
2571         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572         if (ret == -EOPNOTSUPP) {
2573                 dev_warn(&hdev->pdev->dev,
2574                          "IMP does not support get SFP info %d\n", ret);
2575                 return ret;
2576         } else if (ret) {
2577                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2578                 return ret;
2579         }
2580
2581         mac->speed = le32_to_cpu(resp->speed);
2582         /* if resp->speed_ability is 0, it means it's an old version
2583          * firmware, do not update these params
2584          */
2585         if (resp->speed_ability) {
2586                 mac->module_type = le32_to_cpu(resp->module_type);
2587                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588                 mac->autoneg = resp->autoneg;
2589                 mac->support_autoneg = resp->autoneg_ability;
2590                 if (!resp->active_fec)
2591                         mac->fec_mode = 0;
2592                 else
2593                         mac->fec_mode = BIT(resp->active_fec);
2594         } else {
2595                 mac->speed_type = QUERY_SFP_SPEED;
2596         }
2597
2598         return 0;
2599 }
2600
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2602 {
2603         struct hclge_mac *mac = &hdev->hw.mac;
2604         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2605         int ret;
2606
2607         /* get the port info from SFP cmd if not copper port */
2608         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2609                 return 0;
2610
2611         /* if IMP does not support get SFP/qSFP info, return directly */
2612         if (!hdev->support_sfp_query)
2613                 return 0;
2614
2615         if (hdev->pdev->revision >= 0x21)
2616                 ret = hclge_get_sfp_info(hdev, mac);
2617         else
2618                 ret = hclge_get_sfp_speed(hdev, &speed);
2619
2620         if (ret == -EOPNOTSUPP) {
2621                 hdev->support_sfp_query = false;
2622                 return ret;
2623         } else if (ret) {
2624                 return ret;
2625         }
2626
2627         if (hdev->pdev->revision >= 0x21) {
2628                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629                         hclge_update_port_capability(mac);
2630                         return 0;
2631                 }
2632                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2633                                                HCLGE_MAC_FULL);
2634         } else {
2635                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636                         return 0; /* do nothing if no SFP */
2637
2638                 /* must config full duplex for SFP */
2639                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2640         }
2641 }
2642
2643 static int hclge_get_status(struct hnae3_handle *handle)
2644 {
2645         struct hclge_vport *vport = hclge_get_vport(handle);
2646         struct hclge_dev *hdev = vport->back;
2647
2648         hclge_update_link_status(hdev);
2649
2650         return hdev->hw.mac.link;
2651 }
2652
2653 static void hclge_service_timer(struct timer_list *t)
2654 {
2655         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2656
2657         mod_timer(&hdev->service_timer, jiffies + HZ);
2658         hdev->hw_stats.stats_timer++;
2659         hdev->fd_arfs_expire_timer++;
2660         hclge_task_schedule(hdev);
2661 }
2662
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2664 {
2665         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2666
2667         /* Flush memory before next watchdog */
2668         smp_mb__before_atomic();
2669         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2670 }
2671
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2673 {
2674         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2675
2676         /* fetch the events from their corresponding regs */
2677         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679         msix_src_reg = hclge_read_dev(&hdev->hw,
2680                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2681
2682         /* Assumption: If by any chance reset and mailbox events are reported
2683          * together then we will only process reset event in this go and will
2684          * defer the processing of the mailbox events. Since, we would have not
2685          * cleared RX CMDQ event this time we would receive again another
2686          * interrupt from H/W just for the mailbox.
2687          */
2688
2689         /* check for vector0 reset event sources */
2690         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695                 hdev->rst_stats.imp_rst_cnt++;
2696                 return HCLGE_VECTOR0_EVENT_RST;
2697         }
2698
2699         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704                 hdev->rst_stats.global_rst_cnt++;
2705                 return HCLGE_VECTOR0_EVENT_RST;
2706         }
2707
2708         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713                 hdev->rst_stats.core_rst_cnt++;
2714                 return HCLGE_VECTOR0_EVENT_RST;
2715         }
2716
2717         /* check for vector0 msix event source */
2718         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2720                         msix_src_reg);
2721                 return HCLGE_VECTOR0_EVENT_ERR;
2722         }
2723
2724         /* check for vector0 mailbox(=CMDQ RX) event source */
2725         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727                 *clearval = cmdq_src_reg;
2728                 return HCLGE_VECTOR0_EVENT_MBX;
2729         }
2730
2731         /* print other vector0 event source */
2732         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733                 cmdq_src_reg, msix_src_reg);
2734         return HCLGE_VECTOR0_EVENT_OTHER;
2735 }
2736
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2738                                     u32 regclr)
2739 {
2740         switch (event_type) {
2741         case HCLGE_VECTOR0_EVENT_RST:
2742                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2743                 break;
2744         case HCLGE_VECTOR0_EVENT_MBX:
2745                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2746                 break;
2747         default:
2748                 break;
2749         }
2750 }
2751
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2753 {
2754         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2759 }
2760
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2762 {
2763         writel(enable ? 1 : 0, vector->addr);
2764 }
2765
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2767 {
2768         struct hclge_dev *hdev = data;
2769         u32 event_cause;
2770         u32 clearval;
2771
2772         hclge_enable_vector(&hdev->misc_vector, false);
2773         event_cause = hclge_check_event_cause(hdev, &clearval);
2774
2775         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776         switch (event_cause) {
2777         case HCLGE_VECTOR0_EVENT_ERR:
2778                 /* we do not know what type of reset is required now. This could
2779                  * only be decided after we fetch the type of errors which
2780                  * caused this event. Therefore, we will do below for now:
2781                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782                  *    have defered type of reset to be used.
2783                  * 2. Schedule the reset serivce task.
2784                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2785                  *    will fetch the correct type of reset.  This would be done
2786                  *    by first decoding the types of errors.
2787                  */
2788                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2789                 /* fall through */
2790         case HCLGE_VECTOR0_EVENT_RST:
2791                 hclge_reset_task_schedule(hdev);
2792                 break;
2793         case HCLGE_VECTOR0_EVENT_MBX:
2794                 /* If we are here then,
2795                  * 1. Either we are not handling any mbx task and we are not
2796                  *    scheduled as well
2797                  *                        OR
2798                  * 2. We could be handling a mbx task but nothing more is
2799                  *    scheduled.
2800                  * In both cases, we should schedule mbx task as there are more
2801                  * mbx messages reported by this interrupt.
2802                  */
2803                 hclge_mbx_task_schedule(hdev);
2804                 break;
2805         default:
2806                 dev_warn(&hdev->pdev->dev,
2807                          "received unknown or unhandled event of vector0\n");
2808                 break;
2809         }
2810
2811         /* clear the source of interrupt if it is not cause by reset */
2812         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813                 hclge_clear_event_cause(hdev, event_cause, clearval);
2814                 hclge_enable_vector(&hdev->misc_vector, true);
2815         }
2816
2817         return IRQ_HANDLED;
2818 }
2819
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2821 {
2822         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823                 dev_warn(&hdev->pdev->dev,
2824                          "vector(vector_id %d) has been freed.\n", vector_id);
2825                 return;
2826         }
2827
2828         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829         hdev->num_msi_left += 1;
2830         hdev->num_msi_used -= 1;
2831 }
2832
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2834 {
2835         struct hclge_misc_vector *vector = &hdev->misc_vector;
2836
2837         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2838
2839         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840         hdev->vector_status[0] = 0;
2841
2842         hdev->num_msi_left -= 1;
2843         hdev->num_msi_used += 1;
2844 }
2845
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2847 {
2848         int ret;
2849
2850         hclge_get_misc_vector(hdev);
2851
2852         /* this would be explicitly freed in the end */
2853         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854                           0, "hclge_misc", hdev);
2855         if (ret) {
2856                 hclge_free_vector(hdev, 0);
2857                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858                         hdev->misc_vector.vector_irq);
2859         }
2860
2861         return ret;
2862 }
2863
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2865 {
2866         free_irq(hdev->misc_vector.vector_irq, hdev);
2867         hclge_free_vector(hdev, 0);
2868 }
2869
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871                         enum hnae3_reset_notify_type type)
2872 {
2873         struct hnae3_client *client = hdev->nic_client;
2874         u16 i;
2875
2876         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2877             !client)
2878                 return 0;
2879
2880         if (!client->ops->reset_notify)
2881                 return -EOPNOTSUPP;
2882
2883         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2884                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2885                 int ret;
2886
2887                 ret = client->ops->reset_notify(handle, type);
2888                 if (ret) {
2889                         dev_err(&hdev->pdev->dev,
2890                                 "notify nic client failed %d(%d)\n", type, ret);
2891                         return ret;
2892                 }
2893         }
2894
2895         return 0;
2896 }
2897
2898 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2899                                     enum hnae3_reset_notify_type type)
2900 {
2901         struct hnae3_client *client = hdev->roce_client;
2902         int ret = 0;
2903         u16 i;
2904
2905         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2906             !client)
2907                 return 0;
2908
2909         if (!client->ops->reset_notify)
2910                 return -EOPNOTSUPP;
2911
2912         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2913                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2914
2915                 ret = client->ops->reset_notify(handle, type);
2916                 if (ret) {
2917                         dev_err(&hdev->pdev->dev,
2918                                 "notify roce client failed %d(%d)",
2919                                 type, ret);
2920                         return ret;
2921                 }
2922         }
2923
2924         return ret;
2925 }
2926
2927 static int hclge_reset_wait(struct hclge_dev *hdev)
2928 {
2929 #define HCLGE_RESET_WATI_MS     100
2930 #define HCLGE_RESET_WAIT_CNT    200
2931         u32 val, reg, reg_bit;
2932         u32 cnt = 0;
2933
2934         switch (hdev->reset_type) {
2935         case HNAE3_IMP_RESET:
2936                 reg = HCLGE_GLOBAL_RESET_REG;
2937                 reg_bit = HCLGE_IMP_RESET_BIT;
2938                 break;
2939         case HNAE3_GLOBAL_RESET:
2940                 reg = HCLGE_GLOBAL_RESET_REG;
2941                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2942                 break;
2943         case HNAE3_CORE_RESET:
2944                 reg = HCLGE_GLOBAL_RESET_REG;
2945                 reg_bit = HCLGE_CORE_RESET_BIT;
2946                 break;
2947         case HNAE3_FUNC_RESET:
2948                 reg = HCLGE_FUN_RST_ING;
2949                 reg_bit = HCLGE_FUN_RST_ING_B;
2950                 break;
2951         case HNAE3_FLR_RESET:
2952                 break;
2953         default:
2954                 dev_err(&hdev->pdev->dev,
2955                         "Wait for unsupported reset type: %d\n",
2956                         hdev->reset_type);
2957                 return -EINVAL;
2958         }
2959
2960         if (hdev->reset_type == HNAE3_FLR_RESET) {
2961                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2962                        cnt++ < HCLGE_RESET_WAIT_CNT)
2963                         msleep(HCLGE_RESET_WATI_MS);
2964
2965                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2966                         dev_err(&hdev->pdev->dev,
2967                                 "flr wait timeout: %d\n", cnt);
2968                         return -EBUSY;
2969                 }
2970
2971                 return 0;
2972         }
2973
2974         val = hclge_read_dev(&hdev->hw, reg);
2975         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2976                 msleep(HCLGE_RESET_WATI_MS);
2977                 val = hclge_read_dev(&hdev->hw, reg);
2978                 cnt++;
2979         }
2980
2981         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2982                 dev_warn(&hdev->pdev->dev,
2983                          "Wait for reset timeout: %d\n", hdev->reset_type);
2984                 return -EBUSY;
2985         }
2986
2987         return 0;
2988 }
2989
2990 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2991 {
2992         struct hclge_vf_rst_cmd *req;
2993         struct hclge_desc desc;
2994
2995         req = (struct hclge_vf_rst_cmd *)desc.data;
2996         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2997         req->dest_vfid = func_id;
2998
2999         if (reset)
3000                 req->vf_rst = 0x1;
3001
3002         return hclge_cmd_send(&hdev->hw, &desc, 1);
3003 }
3004
3005 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3006 {
3007         int i;
3008
3009         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3010                 struct hclge_vport *vport = &hdev->vport[i];
3011                 int ret;
3012
3013                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3014                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3015                 if (ret) {
3016                         dev_err(&hdev->pdev->dev,
3017                                 "set vf(%d) rst failed %d!\n",
3018                                 vport->vport_id, ret);
3019                         return ret;
3020                 }
3021
3022                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3023                         continue;
3024
3025                 /* Inform VF to process the reset.
3026                  * hclge_inform_reset_assert_to_vf may fail if VF
3027                  * driver is not loaded.
3028                  */
3029                 ret = hclge_inform_reset_assert_to_vf(vport);
3030                 if (ret)
3031                         dev_warn(&hdev->pdev->dev,
3032                                  "inform reset to vf(%d) failed %d!\n",
3033                                  vport->vport_id, ret);
3034         }
3035
3036         return 0;
3037 }
3038
3039 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3040 {
3041         struct hclge_desc desc;
3042         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3043         int ret;
3044
3045         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3046         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3047         req->fun_reset_vfid = func_id;
3048
3049         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3050         if (ret)
3051                 dev_err(&hdev->pdev->dev,
3052                         "send function reset cmd fail, status =%d\n", ret);
3053
3054         return ret;
3055 }
3056
3057 static void hclge_do_reset(struct hclge_dev *hdev)
3058 {
3059         struct hnae3_handle *handle = &hdev->vport[0].nic;
3060         struct pci_dev *pdev = hdev->pdev;
3061         u32 val;
3062
3063         if (hclge_get_hw_reset_stat(handle)) {
3064                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3065                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3066                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3067                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3068                 return;
3069         }
3070
3071         switch (hdev->reset_type) {
3072         case HNAE3_GLOBAL_RESET:
3073                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3074                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3075                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3076                 dev_info(&pdev->dev, "Global Reset requested\n");
3077                 break;
3078         case HNAE3_CORE_RESET:
3079                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3080                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3081                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3082                 dev_info(&pdev->dev, "Core Reset requested\n");
3083                 break;
3084         case HNAE3_FUNC_RESET:
3085                 dev_info(&pdev->dev, "PF Reset requested\n");
3086                 /* schedule again to check later */
3087                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3088                 hclge_reset_task_schedule(hdev);
3089                 break;
3090         case HNAE3_FLR_RESET:
3091                 dev_info(&pdev->dev, "FLR requested\n");
3092                 /* schedule again to check later */
3093                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3094                 hclge_reset_task_schedule(hdev);
3095                 break;
3096         default:
3097                 dev_warn(&pdev->dev,
3098                          "Unsupported reset type: %d\n", hdev->reset_type);
3099                 break;
3100         }
3101 }
3102
3103 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3104                                                    unsigned long *addr)
3105 {
3106         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3107
3108         /* first, resolve any unknown reset type to the known type(s) */
3109         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3110                 /* we will intentionally ignore any errors from this function
3111                  *  as we will end up in *some* reset request in any case
3112                  */
3113                 hclge_handle_hw_msix_error(hdev, addr);
3114                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3115                 /* We defered the clearing of the error event which caused
3116                  * interrupt since it was not posssible to do that in
3117                  * interrupt context (and this is the reason we introduced
3118                  * new UNKNOWN reset type). Now, the errors have been
3119                  * handled and cleared in hardware we can safely enable
3120                  * interrupts. This is an exception to the norm.
3121                  */
3122                 hclge_enable_vector(&hdev->misc_vector, true);
3123         }
3124
3125         /* return the highest priority reset level amongst all */
3126         if (test_bit(HNAE3_IMP_RESET, addr)) {
3127                 rst_level = HNAE3_IMP_RESET;
3128                 clear_bit(HNAE3_IMP_RESET, addr);
3129                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3130                 clear_bit(HNAE3_CORE_RESET, addr);
3131                 clear_bit(HNAE3_FUNC_RESET, addr);
3132         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3133                 rst_level = HNAE3_GLOBAL_RESET;
3134                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3135                 clear_bit(HNAE3_CORE_RESET, addr);
3136                 clear_bit(HNAE3_FUNC_RESET, addr);
3137         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3138                 rst_level = HNAE3_CORE_RESET;
3139                 clear_bit(HNAE3_CORE_RESET, addr);
3140                 clear_bit(HNAE3_FUNC_RESET, addr);
3141         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3142                 rst_level = HNAE3_FUNC_RESET;
3143                 clear_bit(HNAE3_FUNC_RESET, addr);
3144         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3145                 rst_level = HNAE3_FLR_RESET;
3146                 clear_bit(HNAE3_FLR_RESET, addr);
3147         }
3148
3149         if (hdev->reset_type != HNAE3_NONE_RESET &&
3150             rst_level < hdev->reset_type)
3151                 return HNAE3_NONE_RESET;
3152
3153         return rst_level;
3154 }
3155
3156 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3157 {
3158         u32 clearval = 0;
3159
3160         switch (hdev->reset_type) {
3161         case HNAE3_IMP_RESET:
3162                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3163                 break;
3164         case HNAE3_GLOBAL_RESET:
3165                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3166                 break;
3167         case HNAE3_CORE_RESET:
3168                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3169                 break;
3170         default:
3171                 break;
3172         }
3173
3174         if (!clearval)
3175                 return;
3176
3177         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3178         hclge_enable_vector(&hdev->misc_vector, true);
3179 }
3180
3181 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3182 {
3183         int ret = 0;
3184
3185         switch (hdev->reset_type) {
3186         case HNAE3_FUNC_RESET:
3187                 /* fall through */
3188         case HNAE3_FLR_RESET:
3189                 ret = hclge_set_all_vf_rst(hdev, true);
3190                 break;
3191         default:
3192                 break;
3193         }
3194
3195         return ret;
3196 }
3197
3198 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3199 {
3200         u32 reg_val;
3201         int ret = 0;
3202
3203         switch (hdev->reset_type) {
3204         case HNAE3_FUNC_RESET:
3205                 /* There is no mechanism for PF to know if VF has stopped IO
3206                  * for now, just wait 100 ms for VF to stop IO
3207                  */
3208                 msleep(100);
3209                 ret = hclge_func_reset_cmd(hdev, 0);
3210                 if (ret) {
3211                         dev_err(&hdev->pdev->dev,
3212                                 "asserting function reset fail %d!\n", ret);
3213                         return ret;
3214                 }
3215
3216                 /* After performaning pf reset, it is not necessary to do the
3217                  * mailbox handling or send any command to firmware, because
3218                  * any mailbox handling or command to firmware is only valid
3219                  * after hclge_cmd_init is called.
3220                  */
3221                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3222                 hdev->rst_stats.pf_rst_cnt++;
3223                 break;
3224         case HNAE3_FLR_RESET:
3225                 /* There is no mechanism for PF to know if VF has stopped IO
3226                  * for now, just wait 100 ms for VF to stop IO
3227                  */
3228                 msleep(100);
3229                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3230                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3231                 hdev->rst_stats.flr_rst_cnt++;
3232                 break;
3233         case HNAE3_IMP_RESET:
3234                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3235                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3236                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3237                 break;
3238         default:
3239                 break;
3240         }
3241
3242         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3243
3244         return ret;
3245 }
3246
3247 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3248 {
3249 #define MAX_RESET_FAIL_CNT 5
3250 #define RESET_UPGRADE_DELAY_SEC 10
3251
3252         if (hdev->reset_pending) {
3253                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3254                          hdev->reset_pending);
3255                 return true;
3256         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3257                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3258                     BIT(HCLGE_IMP_RESET_BIT))) {
3259                 dev_info(&hdev->pdev->dev,
3260                          "reset failed because IMP Reset is pending\n");
3261                 hclge_clear_reset_cause(hdev);
3262                 return false;
3263         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3264                 hdev->reset_fail_cnt++;
3265                 if (is_timeout) {
3266                         set_bit(hdev->reset_type, &hdev->reset_pending);
3267                         dev_info(&hdev->pdev->dev,
3268                                  "re-schedule to wait for hw reset done\n");
3269                         return true;
3270                 }
3271
3272                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3273                 hclge_clear_reset_cause(hdev);
3274                 mod_timer(&hdev->reset_timer,
3275                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3276
3277                 return false;
3278         }
3279
3280         hclge_clear_reset_cause(hdev);
3281         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3282         return false;
3283 }
3284
3285 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3286 {
3287         int ret = 0;
3288
3289         switch (hdev->reset_type) {
3290         case HNAE3_FUNC_RESET:
3291                 /* fall through */
3292         case HNAE3_FLR_RESET:
3293                 ret = hclge_set_all_vf_rst(hdev, false);
3294                 break;
3295         default:
3296                 break;
3297         }
3298
3299         return ret;
3300 }
3301
3302 static void hclge_reset(struct hclge_dev *hdev)
3303 {
3304         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3305         bool is_timeout = false;
3306         int ret;
3307
3308         /* Initialize ae_dev reset status as well, in case enet layer wants to
3309          * know if device is undergoing reset
3310          */
3311         ae_dev->reset_type = hdev->reset_type;
3312         hdev->rst_stats.reset_cnt++;
3313         /* perform reset of the stack & ae device for a client */
3314         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3315         if (ret)
3316                 goto err_reset;
3317
3318         ret = hclge_reset_prepare_down(hdev);
3319         if (ret)
3320                 goto err_reset;
3321
3322         rtnl_lock();
3323         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3324         if (ret)
3325                 goto err_reset_lock;
3326
3327         rtnl_unlock();
3328
3329         ret = hclge_reset_prepare_wait(hdev);
3330         if (ret)
3331                 goto err_reset;
3332
3333         if (hclge_reset_wait(hdev)) {
3334                 is_timeout = true;
3335                 goto err_reset;
3336         }
3337
3338         hdev->rst_stats.hw_reset_done_cnt++;
3339
3340         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3341         if (ret)
3342                 goto err_reset;
3343
3344         rtnl_lock();
3345         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3346         if (ret)
3347                 goto err_reset_lock;
3348
3349         ret = hclge_reset_ae_dev(hdev->ae_dev);
3350         if (ret)
3351                 goto err_reset_lock;
3352
3353         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3354         if (ret)
3355                 goto err_reset_lock;
3356
3357         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3358         if (ret)
3359                 goto err_reset_lock;
3360
3361         hclge_clear_reset_cause(hdev);
3362
3363         ret = hclge_reset_prepare_up(hdev);
3364         if (ret)
3365                 goto err_reset_lock;
3366
3367         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3368         if (ret)
3369                 goto err_reset_lock;
3370
3371         rtnl_unlock();
3372
3373         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3374         if (ret)
3375                 goto err_reset;
3376
3377         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3378         if (ret)
3379                 goto err_reset;
3380
3381         hdev->last_reset_time = jiffies;
3382         hdev->reset_fail_cnt = 0;
3383         hdev->rst_stats.reset_done_cnt++;
3384         ae_dev->reset_type = HNAE3_NONE_RESET;
3385         del_timer(&hdev->reset_timer);
3386
3387         return;
3388
3389 err_reset_lock:
3390         rtnl_unlock();
3391 err_reset:
3392         if (hclge_reset_err_handle(hdev, is_timeout))
3393                 hclge_reset_task_schedule(hdev);
3394 }
3395
3396 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3397 {
3398         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3399         struct hclge_dev *hdev = ae_dev->priv;
3400
3401         /* We might end up getting called broadly because of 2 below cases:
3402          * 1. Recoverable error was conveyed through APEI and only way to bring
3403          *    normalcy is to reset.
3404          * 2. A new reset request from the stack due to timeout
3405          *
3406          * For the first case,error event might not have ae handle available.
3407          * check if this is a new reset request and we are not here just because
3408          * last reset attempt did not succeed and watchdog hit us again. We will
3409          * know this if last reset request did not occur very recently (watchdog
3410          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3411          * In case of new request we reset the "reset level" to PF reset.
3412          * And if it is a repeat reset request of the most recent one then we
3413          * want to make sure we throttle the reset request. Therefore, we will
3414          * not allow it again before 3*HZ times.
3415          */
3416         if (!handle)
3417                 handle = &hdev->vport[0].nic;
3418
3419         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3420                 return;
3421         else if (hdev->default_reset_request)
3422                 hdev->reset_level =
3423                         hclge_get_reset_level(hdev,
3424                                               &hdev->default_reset_request);
3425         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3426                 hdev->reset_level = HNAE3_FUNC_RESET;
3427
3428         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3429                  hdev->reset_level);
3430
3431         /* request reset & schedule reset task */
3432         set_bit(hdev->reset_level, &hdev->reset_request);
3433         hclge_reset_task_schedule(hdev);
3434
3435         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3436                 hdev->reset_level++;
3437 }
3438
3439 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3440                                         enum hnae3_reset_type rst_type)
3441 {
3442         struct hclge_dev *hdev = ae_dev->priv;
3443
3444         set_bit(rst_type, &hdev->default_reset_request);
3445 }
3446
3447 static void hclge_reset_timer(struct timer_list *t)
3448 {
3449         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3450
3451         dev_info(&hdev->pdev->dev,
3452                  "triggering global reset in reset timer\n");
3453         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3454         hclge_reset_event(hdev->pdev, NULL);
3455 }
3456
3457 static void hclge_reset_subtask(struct hclge_dev *hdev)
3458 {
3459         /* check if there is any ongoing reset in the hardware. This status can
3460          * be checked from reset_pending. If there is then, we need to wait for
3461          * hardware to complete reset.
3462          *    a. If we are able to figure out in reasonable time that hardware
3463          *       has fully resetted then, we can proceed with driver, client
3464          *       reset.
3465          *    b. else, we can come back later to check this status so re-sched
3466          *       now.
3467          */
3468         hdev->last_reset_time = jiffies;
3469         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3470         if (hdev->reset_type != HNAE3_NONE_RESET)
3471                 hclge_reset(hdev);
3472
3473         /* check if we got any *new* reset requests to be honored */
3474         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3475         if (hdev->reset_type != HNAE3_NONE_RESET)
3476                 hclge_do_reset(hdev);
3477
3478         hdev->reset_type = HNAE3_NONE_RESET;
3479 }
3480
3481 static void hclge_reset_service_task(struct work_struct *work)
3482 {
3483         struct hclge_dev *hdev =
3484                 container_of(work, struct hclge_dev, rst_service_task);
3485
3486         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3487                 return;
3488
3489         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3490
3491         hclge_reset_subtask(hdev);
3492
3493         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3494 }
3495
3496 static void hclge_mailbox_service_task(struct work_struct *work)
3497 {
3498         struct hclge_dev *hdev =
3499                 container_of(work, struct hclge_dev, mbx_service_task);
3500
3501         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3502                 return;
3503
3504         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3505
3506         hclge_mbx_handler(hdev);
3507
3508         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3509 }
3510
3511 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3512 {
3513         int i;
3514
3515         /* start from vport 1 for PF is always alive */
3516         for (i = 1; i < hdev->num_alloc_vport; i++) {
3517                 struct hclge_vport *vport = &hdev->vport[i];
3518
3519                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3520                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3521
3522                 /* If vf is not alive, set to default value */
3523                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3524                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3525         }
3526 }
3527
3528 static void hclge_service_task(struct work_struct *work)
3529 {
3530         struct hclge_dev *hdev =
3531                 container_of(work, struct hclge_dev, service_task);
3532
3533         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3534                 hclge_update_stats_for_all(hdev);
3535                 hdev->hw_stats.stats_timer = 0;
3536         }
3537
3538         hclge_update_port_info(hdev);
3539         hclge_update_link_status(hdev);
3540         hclge_update_vport_alive(hdev);
3541         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3542                 hclge_rfs_filter_expire(hdev);
3543                 hdev->fd_arfs_expire_timer = 0;
3544         }
3545         hclge_service_complete(hdev);
3546 }
3547
3548 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3549 {
3550         /* VF handle has no client */
3551         if (!handle->client)
3552                 return container_of(handle, struct hclge_vport, nic);
3553         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3554                 return container_of(handle, struct hclge_vport, roce);
3555         else
3556                 return container_of(handle, struct hclge_vport, nic);
3557 }
3558
3559 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3560                             struct hnae3_vector_info *vector_info)
3561 {
3562         struct hclge_vport *vport = hclge_get_vport(handle);
3563         struct hnae3_vector_info *vector = vector_info;
3564         struct hclge_dev *hdev = vport->back;
3565         int alloc = 0;
3566         int i, j;
3567
3568         vector_num = min(hdev->num_msi_left, vector_num);
3569
3570         for (j = 0; j < vector_num; j++) {
3571                 for (i = 1; i < hdev->num_msi; i++) {
3572                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3573                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3574                                 vector->io_addr = hdev->hw.io_base +
3575                                         HCLGE_VECTOR_REG_BASE +
3576                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3577                                         vport->vport_id *
3578                                         HCLGE_VECTOR_VF_OFFSET;
3579                                 hdev->vector_status[i] = vport->vport_id;
3580                                 hdev->vector_irq[i] = vector->vector;
3581
3582                                 vector++;
3583                                 alloc++;
3584
3585                                 break;
3586                         }
3587                 }
3588         }
3589         hdev->num_msi_left -= alloc;
3590         hdev->num_msi_used += alloc;
3591
3592         return alloc;
3593 }
3594
3595 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3596 {
3597         int i;
3598
3599         for (i = 0; i < hdev->num_msi; i++)
3600                 if (vector == hdev->vector_irq[i])
3601                         return i;
3602
3603         return -EINVAL;
3604 }
3605
3606 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3607 {
3608         struct hclge_vport *vport = hclge_get_vport(handle);
3609         struct hclge_dev *hdev = vport->back;
3610         int vector_id;
3611
3612         vector_id = hclge_get_vector_index(hdev, vector);
3613         if (vector_id < 0) {
3614                 dev_err(&hdev->pdev->dev,
3615                         "Get vector index fail. vector_id =%d\n", vector_id);
3616                 return vector_id;
3617         }
3618
3619         hclge_free_vector(hdev, vector_id);
3620
3621         return 0;
3622 }
3623
3624 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3625 {
3626         return HCLGE_RSS_KEY_SIZE;
3627 }
3628
3629 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3630 {
3631         return HCLGE_RSS_IND_TBL_SIZE;
3632 }
3633
3634 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3635                                   const u8 hfunc, const u8 *key)
3636 {
3637         struct hclge_rss_config_cmd *req;
3638         struct hclge_desc desc;
3639         int key_offset;
3640         int key_size;
3641         int ret;
3642
3643         req = (struct hclge_rss_config_cmd *)desc.data;
3644
3645         for (key_offset = 0; key_offset < 3; key_offset++) {
3646                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3647                                            false);
3648
3649                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3650                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3651
3652                 if (key_offset == 2)
3653                         key_size =
3654                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3655                 else
3656                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3657
3658                 memcpy(req->hash_key,
3659                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3660
3661                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3662                 if (ret) {
3663                         dev_err(&hdev->pdev->dev,
3664                                 "Configure RSS config fail, status = %d\n",
3665                                 ret);
3666                         return ret;
3667                 }
3668         }
3669         return 0;
3670 }
3671
3672 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3673 {
3674         struct hclge_rss_indirection_table_cmd *req;
3675         struct hclge_desc desc;
3676         int i, j;
3677         int ret;
3678
3679         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3680
3681         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3682                 hclge_cmd_setup_basic_desc
3683                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3684
3685                 req->start_table_index =
3686                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3687                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3688
3689                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3690                         req->rss_result[j] =
3691                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3692
3693                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3694                 if (ret) {
3695                         dev_err(&hdev->pdev->dev,
3696                                 "Configure rss indir table fail,status = %d\n",
3697                                 ret);
3698                         return ret;
3699                 }
3700         }
3701         return 0;
3702 }
3703
3704 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3705                                  u16 *tc_size, u16 *tc_offset)
3706 {
3707         struct hclge_rss_tc_mode_cmd *req;
3708         struct hclge_desc desc;
3709         int ret;
3710         int i;
3711
3712         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3713         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3714
3715         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3716                 u16 mode = 0;
3717
3718                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3719                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3720                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3721                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3722                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3723
3724                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3725         }
3726
3727         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3728         if (ret)
3729                 dev_err(&hdev->pdev->dev,
3730                         "Configure rss tc mode fail, status = %d\n", ret);
3731
3732         return ret;
3733 }
3734
3735 static void hclge_get_rss_type(struct hclge_vport *vport)
3736 {
3737         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3738             vport->rss_tuple_sets.ipv4_udp_en ||
3739             vport->rss_tuple_sets.ipv4_sctp_en ||
3740             vport->rss_tuple_sets.ipv6_tcp_en ||
3741             vport->rss_tuple_sets.ipv6_udp_en ||
3742             vport->rss_tuple_sets.ipv6_sctp_en)
3743                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3744         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3745                  vport->rss_tuple_sets.ipv6_fragment_en)
3746                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3747         else
3748                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3749 }
3750
3751 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3752 {
3753         struct hclge_rss_input_tuple_cmd *req;
3754         struct hclge_desc desc;
3755         int ret;
3756
3757         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3758
3759         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3760
3761         /* Get the tuple cfg from pf */
3762         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3763         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3764         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3765         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3766         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3767         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3768         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3769         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3770         hclge_get_rss_type(&hdev->vport[0]);
3771         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3772         if (ret)
3773                 dev_err(&hdev->pdev->dev,
3774                         "Configure rss input fail, status = %d\n", ret);
3775         return ret;
3776 }
3777
3778 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3779                          u8 *key, u8 *hfunc)
3780 {
3781         struct hclge_vport *vport = hclge_get_vport(handle);
3782         int i;
3783
3784         /* Get hash algorithm */
3785         if (hfunc) {
3786                 switch (vport->rss_algo) {
3787                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3788                         *hfunc = ETH_RSS_HASH_TOP;
3789                         break;
3790                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3791                         *hfunc = ETH_RSS_HASH_XOR;
3792                         break;
3793                 default:
3794                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3795                         break;
3796                 }
3797         }
3798
3799         /* Get the RSS Key required by the user */
3800         if (key)
3801                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3802
3803         /* Get indirect table */
3804         if (indir)
3805                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3806                         indir[i] =  vport->rss_indirection_tbl[i];
3807
3808         return 0;
3809 }
3810
3811 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3812                          const  u8 *key, const  u8 hfunc)
3813 {
3814         struct hclge_vport *vport = hclge_get_vport(handle);
3815         struct hclge_dev *hdev = vport->back;
3816         u8 hash_algo;
3817         int ret, i;
3818
3819         /* Set the RSS Hash Key if specififed by the user */
3820         if (key) {
3821                 switch (hfunc) {
3822                 case ETH_RSS_HASH_TOP:
3823                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3824                         break;
3825                 case ETH_RSS_HASH_XOR:
3826                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3827                         break;
3828                 case ETH_RSS_HASH_NO_CHANGE:
3829                         hash_algo = vport->rss_algo;
3830                         break;
3831                 default:
3832                         return -EINVAL;
3833                 }
3834
3835                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3836                 if (ret)
3837                         return ret;
3838
3839                 /* Update the shadow RSS key with user specified qids */
3840                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3841                 vport->rss_algo = hash_algo;
3842         }
3843
3844         /* Update the shadow RSS table with user specified qids */
3845         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3846                 vport->rss_indirection_tbl[i] = indir[i];
3847
3848         /* Update the hardware */
3849         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3850 }
3851
3852 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3853 {
3854         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3855
3856         if (nfc->data & RXH_L4_B_2_3)
3857                 hash_sets |= HCLGE_D_PORT_BIT;
3858         else
3859                 hash_sets &= ~HCLGE_D_PORT_BIT;
3860
3861         if (nfc->data & RXH_IP_SRC)
3862                 hash_sets |= HCLGE_S_IP_BIT;
3863         else
3864                 hash_sets &= ~HCLGE_S_IP_BIT;
3865
3866         if (nfc->data & RXH_IP_DST)
3867                 hash_sets |= HCLGE_D_IP_BIT;
3868         else
3869                 hash_sets &= ~HCLGE_D_IP_BIT;
3870
3871         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3872                 hash_sets |= HCLGE_V_TAG_BIT;
3873
3874         return hash_sets;
3875 }
3876
3877 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3878                                struct ethtool_rxnfc *nfc)
3879 {
3880         struct hclge_vport *vport = hclge_get_vport(handle);
3881         struct hclge_dev *hdev = vport->back;
3882         struct hclge_rss_input_tuple_cmd *req;
3883         struct hclge_desc desc;
3884         u8 tuple_sets;
3885         int ret;
3886
3887         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3888                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3889                 return -EINVAL;
3890
3891         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3892         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3893
3894         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3895         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3896         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3897         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3898         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3899         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3900         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3901         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3902
3903         tuple_sets = hclge_get_rss_hash_bits(nfc);
3904         switch (nfc->flow_type) {
3905         case TCP_V4_FLOW:
3906                 req->ipv4_tcp_en = tuple_sets;
3907                 break;
3908         case TCP_V6_FLOW:
3909                 req->ipv6_tcp_en = tuple_sets;
3910                 break;
3911         case UDP_V4_FLOW:
3912                 req->ipv4_udp_en = tuple_sets;
3913                 break;
3914         case UDP_V6_FLOW:
3915                 req->ipv6_udp_en = tuple_sets;
3916                 break;
3917         case SCTP_V4_FLOW:
3918                 req->ipv4_sctp_en = tuple_sets;
3919                 break;
3920         case SCTP_V6_FLOW:
3921                 if ((nfc->data & RXH_L4_B_0_1) ||
3922                     (nfc->data & RXH_L4_B_2_3))
3923                         return -EINVAL;
3924
3925                 req->ipv6_sctp_en = tuple_sets;
3926                 break;
3927         case IPV4_FLOW:
3928                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3929                 break;
3930         case IPV6_FLOW:
3931                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3932                 break;
3933         default:
3934                 return -EINVAL;
3935         }
3936
3937         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3938         if (ret) {
3939                 dev_err(&hdev->pdev->dev,
3940                         "Set rss tuple fail, status = %d\n", ret);
3941                 return ret;
3942         }
3943
3944         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3945         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3946         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3947         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3948         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3949         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3950         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3951         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3952         hclge_get_rss_type(vport);
3953         return 0;
3954 }
3955
3956 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3957                                struct ethtool_rxnfc *nfc)
3958 {
3959         struct hclge_vport *vport = hclge_get_vport(handle);
3960         u8 tuple_sets;
3961
3962         nfc->data = 0;
3963
3964         switch (nfc->flow_type) {
3965         case TCP_V4_FLOW:
3966                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3967                 break;
3968         case UDP_V4_FLOW:
3969                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3970                 break;
3971         case TCP_V6_FLOW:
3972                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3973                 break;
3974         case UDP_V6_FLOW:
3975                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3976                 break;
3977         case SCTP_V4_FLOW:
3978                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3979                 break;
3980         case SCTP_V6_FLOW:
3981                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3982                 break;
3983         case IPV4_FLOW:
3984         case IPV6_FLOW:
3985                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3986                 break;
3987         default:
3988                 return -EINVAL;
3989         }
3990
3991         if (!tuple_sets)
3992                 return 0;
3993
3994         if (tuple_sets & HCLGE_D_PORT_BIT)
3995                 nfc->data |= RXH_L4_B_2_3;
3996         if (tuple_sets & HCLGE_S_PORT_BIT)
3997                 nfc->data |= RXH_L4_B_0_1;
3998         if (tuple_sets & HCLGE_D_IP_BIT)
3999                 nfc->data |= RXH_IP_DST;
4000         if (tuple_sets & HCLGE_S_IP_BIT)
4001                 nfc->data |= RXH_IP_SRC;
4002
4003         return 0;
4004 }
4005
4006 static int hclge_get_tc_size(struct hnae3_handle *handle)
4007 {
4008         struct hclge_vport *vport = hclge_get_vport(handle);
4009         struct hclge_dev *hdev = vport->back;
4010
4011         return hdev->rss_size_max;
4012 }
4013
4014 int hclge_rss_init_hw(struct hclge_dev *hdev)
4015 {
4016         struct hclge_vport *vport = hdev->vport;
4017         u8 *rss_indir = vport[0].rss_indirection_tbl;
4018         u16 rss_size = vport[0].alloc_rss_size;
4019         u8 *key = vport[0].rss_hash_key;
4020         u8 hfunc = vport[0].rss_algo;
4021         u16 tc_offset[HCLGE_MAX_TC_NUM];
4022         u16 tc_valid[HCLGE_MAX_TC_NUM];
4023         u16 tc_size[HCLGE_MAX_TC_NUM];
4024         u16 roundup_size;
4025         int i, ret;
4026
4027         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4028         if (ret)
4029                 return ret;
4030
4031         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4032         if (ret)
4033                 return ret;
4034
4035         ret = hclge_set_rss_input_tuple(hdev);
4036         if (ret)
4037                 return ret;
4038
4039         /* Each TC have the same queue size, and tc_size set to hardware is
4040          * the log2 of roundup power of two of rss_size, the acutal queue
4041          * size is limited by indirection table.
4042          */
4043         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4044                 dev_err(&hdev->pdev->dev,
4045                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4046                         rss_size);
4047                 return -EINVAL;
4048         }
4049
4050         roundup_size = roundup_pow_of_two(rss_size);
4051         roundup_size = ilog2(roundup_size);
4052
4053         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4054                 tc_valid[i] = 0;
4055
4056                 if (!(hdev->hw_tc_map & BIT(i)))
4057                         continue;
4058
4059                 tc_valid[i] = 1;
4060                 tc_size[i] = roundup_size;
4061                 tc_offset[i] = rss_size * i;
4062         }
4063
4064         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4065 }
4066
4067 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4068 {
4069         struct hclge_vport *vport = hdev->vport;
4070         int i, j;
4071
4072         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4073                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4074                         vport[j].rss_indirection_tbl[i] =
4075                                 i % vport[j].alloc_rss_size;
4076         }
4077 }
4078
4079 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4080 {
4081         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4082         struct hclge_vport *vport = hdev->vport;
4083
4084         if (hdev->pdev->revision >= 0x21)
4085                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4086
4087         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4088                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4089                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4090                 vport[i].rss_tuple_sets.ipv4_udp_en =
4091                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4092                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4093                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4094                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4095                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4096                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4097                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4098                 vport[i].rss_tuple_sets.ipv6_udp_en =
4099                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4100                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4101                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4102                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4103                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4104
4105                 vport[i].rss_algo = rss_algo;
4106
4107                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4108                        HCLGE_RSS_KEY_SIZE);
4109         }
4110
4111         hclge_rss_indir_init_cfg(hdev);
4112 }
4113
4114 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4115                                 int vector_id, bool en,
4116                                 struct hnae3_ring_chain_node *ring_chain)
4117 {
4118         struct hclge_dev *hdev = vport->back;
4119         struct hnae3_ring_chain_node *node;
4120         struct hclge_desc desc;
4121         struct hclge_ctrl_vector_chain_cmd *req
4122                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4123         enum hclge_cmd_status status;
4124         enum hclge_opcode_type op;
4125         u16 tqp_type_and_id;
4126         int i;
4127
4128         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4129         hclge_cmd_setup_basic_desc(&desc, op, false);
4130         req->int_vector_id = vector_id;
4131
4132         i = 0;
4133         for (node = ring_chain; node; node = node->next) {
4134                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4135                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4136                                 HCLGE_INT_TYPE_S,
4137                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4138                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4139                                 HCLGE_TQP_ID_S, node->tqp_index);
4140                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4141                                 HCLGE_INT_GL_IDX_S,
4142                                 hnae3_get_field(node->int_gl_idx,
4143                                                 HNAE3_RING_GL_IDX_M,
4144                                                 HNAE3_RING_GL_IDX_S));
4145                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4146                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4147                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4148                         req->vfid = vport->vport_id;
4149
4150                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4151                         if (status) {
4152                                 dev_err(&hdev->pdev->dev,
4153                                         "Map TQP fail, status is %d.\n",
4154                                         status);
4155                                 return -EIO;
4156                         }
4157                         i = 0;
4158
4159                         hclge_cmd_setup_basic_desc(&desc,
4160                                                    op,
4161                                                    false);
4162                         req->int_vector_id = vector_id;
4163                 }
4164         }
4165
4166         if (i > 0) {
4167                 req->int_cause_num = i;
4168                 req->vfid = vport->vport_id;
4169                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4170                 if (status) {
4171                         dev_err(&hdev->pdev->dev,
4172                                 "Map TQP fail, status is %d.\n", status);
4173                         return -EIO;
4174                 }
4175         }
4176
4177         return 0;
4178 }
4179
4180 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4181                                     int vector,
4182                                     struct hnae3_ring_chain_node *ring_chain)
4183 {
4184         struct hclge_vport *vport = hclge_get_vport(handle);
4185         struct hclge_dev *hdev = vport->back;
4186         int vector_id;
4187
4188         vector_id = hclge_get_vector_index(hdev, vector);
4189         if (vector_id < 0) {
4190                 dev_err(&hdev->pdev->dev,
4191                         "Get vector index fail. vector_id =%d\n", vector_id);
4192                 return vector_id;
4193         }
4194
4195         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4196 }
4197
4198 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4199                                        int vector,
4200                                        struct hnae3_ring_chain_node *ring_chain)
4201 {
4202         struct hclge_vport *vport = hclge_get_vport(handle);
4203         struct hclge_dev *hdev = vport->back;
4204         int vector_id, ret;
4205
4206         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4207                 return 0;
4208
4209         vector_id = hclge_get_vector_index(hdev, vector);
4210         if (vector_id < 0) {
4211                 dev_err(&handle->pdev->dev,
4212                         "Get vector index fail. ret =%d\n", vector_id);
4213                 return vector_id;
4214         }
4215
4216         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4217         if (ret)
4218                 dev_err(&handle->pdev->dev,
4219                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4220                         vector_id,
4221                         ret);
4222
4223         return ret;
4224 }
4225
4226 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4227                                struct hclge_promisc_param *param)
4228 {
4229         struct hclge_promisc_cfg_cmd *req;
4230         struct hclge_desc desc;
4231         int ret;
4232
4233         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4234
4235         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4236         req->vf_id = param->vf_id;
4237
4238         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4239          * pdev revision(0x20), new revision support them. The
4240          * value of this two fields will not return error when driver
4241          * send command to fireware in revision(0x20).
4242          */
4243         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4244                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4245
4246         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4247         if (ret)
4248                 dev_err(&hdev->pdev->dev,
4249                         "Set promisc mode fail, status is %d.\n", ret);
4250
4251         return ret;
4252 }
4253
4254 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4255                               bool en_mc, bool en_bc, int vport_id)
4256 {
4257         if (!param)
4258                 return;
4259
4260         memset(param, 0, sizeof(struct hclge_promisc_param));
4261         if (en_uc)
4262                 param->enable = HCLGE_PROMISC_EN_UC;
4263         if (en_mc)
4264                 param->enable |= HCLGE_PROMISC_EN_MC;
4265         if (en_bc)
4266                 param->enable |= HCLGE_PROMISC_EN_BC;
4267         param->vf_id = vport_id;
4268 }
4269
4270 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4271                                   bool en_mc_pmc)
4272 {
4273         struct hclge_vport *vport = hclge_get_vport(handle);
4274         struct hclge_dev *hdev = vport->back;
4275         struct hclge_promisc_param param;
4276         bool en_bc_pmc = true;
4277
4278         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4279          * always bypassed. So broadcast promisc should be disabled until
4280          * user enable promisc mode
4281          */
4282         if (handle->pdev->revision == 0x20)
4283                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4284
4285         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4286                                  vport->vport_id);
4287         return hclge_cmd_set_promisc_mode(hdev, &param);
4288 }
4289
4290 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4291 {
4292         struct hclge_get_fd_mode_cmd *req;
4293         struct hclge_desc desc;
4294         int ret;
4295
4296         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4297
4298         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4299
4300         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4301         if (ret) {
4302                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4303                 return ret;
4304         }
4305
4306         *fd_mode = req->mode;
4307
4308         return ret;
4309 }
4310
4311 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4312                                    u32 *stage1_entry_num,
4313                                    u32 *stage2_entry_num,
4314                                    u16 *stage1_counter_num,
4315                                    u16 *stage2_counter_num)
4316 {
4317         struct hclge_get_fd_allocation_cmd *req;
4318         struct hclge_desc desc;
4319         int ret;
4320
4321         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4322
4323         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4324
4325         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4326         if (ret) {
4327                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4328                         ret);
4329                 return ret;
4330         }
4331
4332         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4333         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4334         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4335         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4336
4337         return ret;
4338 }
4339
4340 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4341 {
4342         struct hclge_set_fd_key_config_cmd *req;
4343         struct hclge_fd_key_cfg *stage;
4344         struct hclge_desc desc;
4345         int ret;
4346
4347         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4348
4349         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4350         stage = &hdev->fd_cfg.key_cfg[stage_num];
4351         req->stage = stage_num;
4352         req->key_select = stage->key_sel;
4353         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4354         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4355         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4356         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4357         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4358         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4359
4360         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4361         if (ret)
4362                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4363
4364         return ret;
4365 }
4366
4367 static int hclge_init_fd_config(struct hclge_dev *hdev)
4368 {
4369 #define LOW_2_WORDS             0x03
4370         struct hclge_fd_key_cfg *key_cfg;
4371         int ret;
4372
4373         if (!hnae3_dev_fd_supported(hdev))
4374                 return 0;
4375
4376         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4377         if (ret)
4378                 return ret;
4379
4380         switch (hdev->fd_cfg.fd_mode) {
4381         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4382                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4383                 break;
4384         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4385                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4386                 break;
4387         default:
4388                 dev_err(&hdev->pdev->dev,
4389                         "Unsupported flow director mode %d\n",
4390                         hdev->fd_cfg.fd_mode);
4391                 return -EOPNOTSUPP;
4392         }
4393
4394         hdev->fd_cfg.proto_support =
4395                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4396                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4397         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4398         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4399         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4400         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4401         key_cfg->outer_sipv6_word_en = 0;
4402         key_cfg->outer_dipv6_word_en = 0;
4403
4404         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4405                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4406                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4407                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4408
4409         /* If use max 400bit key, we can support tuples for ether type */
4410         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4411                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4412                 key_cfg->tuple_active |=
4413                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4414         }
4415
4416         /* roce_type is used to filter roce frames
4417          * dst_vport is used to specify the rule
4418          */
4419         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4420
4421         ret = hclge_get_fd_allocation(hdev,
4422                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4423                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4424                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4425                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4426         if (ret)
4427                 return ret;
4428
4429         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4430 }
4431
4432 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4433                                 int loc, u8 *key, bool is_add)
4434 {
4435         struct hclge_fd_tcam_config_1_cmd *req1;
4436         struct hclge_fd_tcam_config_2_cmd *req2;
4437         struct hclge_fd_tcam_config_3_cmd *req3;
4438         struct hclge_desc desc[3];
4439         int ret;
4440
4441         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4442         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4443         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4444         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4445         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4446
4447         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4448         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4449         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4450
4451         req1->stage = stage;
4452         req1->xy_sel = sel_x ? 1 : 0;
4453         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4454         req1->index = cpu_to_le32(loc);
4455         req1->entry_vld = sel_x ? is_add : 0;
4456
4457         if (key) {
4458                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4459                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4460                        sizeof(req2->tcam_data));
4461                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4462                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4463         }
4464
4465         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4466         if (ret)
4467                 dev_err(&hdev->pdev->dev,
4468                         "config tcam key fail, ret=%d\n",
4469                         ret);
4470
4471         return ret;
4472 }
4473
4474 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4475                               struct hclge_fd_ad_data *action)
4476 {
4477         struct hclge_fd_ad_config_cmd *req;
4478         struct hclge_desc desc;
4479         u64 ad_data = 0;
4480         int ret;
4481
4482         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4483
4484         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4485         req->index = cpu_to_le32(loc);
4486         req->stage = stage;
4487
4488         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4489                       action->write_rule_id_to_bd);
4490         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4491                         action->rule_id);
4492         ad_data <<= 32;
4493         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4494         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4495                       action->forward_to_direct_queue);
4496         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4497                         action->queue_id);
4498         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4499         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4500                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4501         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4502         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4503                         action->counter_id);
4504
4505         req->ad_data = cpu_to_le64(ad_data);
4506         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4507         if (ret)
4508                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4509
4510         return ret;
4511 }
4512
4513 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4514                                    struct hclge_fd_rule *rule)
4515 {
4516         u16 tmp_x_s, tmp_y_s;
4517         u32 tmp_x_l, tmp_y_l;
4518         int i;
4519
4520         if (rule->unused_tuple & tuple_bit)
4521                 return true;
4522
4523         switch (tuple_bit) {
4524         case 0:
4525                 return false;
4526         case BIT(INNER_DST_MAC):
4527                 for (i = 0; i < 6; i++) {
4528                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4529                                rule->tuples_mask.dst_mac[i]);
4530                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4531                                rule->tuples_mask.dst_mac[i]);
4532                 }
4533
4534                 return true;
4535         case BIT(INNER_SRC_MAC):
4536                 for (i = 0; i < 6; i++) {
4537                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4538                                rule->tuples.src_mac[i]);
4539                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4540                                rule->tuples.src_mac[i]);
4541                 }
4542
4543                 return true;
4544         case BIT(INNER_VLAN_TAG_FST):
4545                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4546                        rule->tuples_mask.vlan_tag1);
4547                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4548                        rule->tuples_mask.vlan_tag1);
4549                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4550                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4551
4552                 return true;
4553         case BIT(INNER_ETH_TYPE):
4554                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4555                        rule->tuples_mask.ether_proto);
4556                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4557                        rule->tuples_mask.ether_proto);
4558                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4559                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4560
4561                 return true;
4562         case BIT(INNER_IP_TOS):
4563                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4564                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4565
4566                 return true;
4567         case BIT(INNER_IP_PROTO):
4568                 calc_x(*key_x, rule->tuples.ip_proto,
4569                        rule->tuples_mask.ip_proto);
4570                 calc_y(*key_y, rule->tuples.ip_proto,
4571                        rule->tuples_mask.ip_proto);
4572
4573                 return true;
4574         case BIT(INNER_SRC_IP):
4575                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4576                        rule->tuples_mask.src_ip[3]);
4577                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4578                        rule->tuples_mask.src_ip[3]);
4579                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4580                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4581
4582                 return true;
4583         case BIT(INNER_DST_IP):
4584                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4585                        rule->tuples_mask.dst_ip[3]);
4586                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4587                        rule->tuples_mask.dst_ip[3]);
4588                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4589                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4590
4591                 return true;
4592         case BIT(INNER_SRC_PORT):
4593                 calc_x(tmp_x_s, rule->tuples.src_port,
4594                        rule->tuples_mask.src_port);
4595                 calc_y(tmp_y_s, rule->tuples.src_port,
4596                        rule->tuples_mask.src_port);
4597                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4598                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4599
4600                 return true;
4601         case BIT(INNER_DST_PORT):
4602                 calc_x(tmp_x_s, rule->tuples.dst_port,
4603                        rule->tuples_mask.dst_port);
4604                 calc_y(tmp_y_s, rule->tuples.dst_port,
4605                        rule->tuples_mask.dst_port);
4606                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4607                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4608
4609                 return true;
4610         default:
4611                 return false;
4612         }
4613 }
4614
4615 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4616                                  u8 vf_id, u8 network_port_id)
4617 {
4618         u32 port_number = 0;
4619
4620         if (port_type == HOST_PORT) {
4621                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4622                                 pf_id);
4623                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4624                                 vf_id);
4625                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4626         } else {
4627                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4628                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4629                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4630         }
4631
4632         return port_number;
4633 }
4634
4635 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4636                                        __le32 *key_x, __le32 *key_y,
4637                                        struct hclge_fd_rule *rule)
4638 {
4639         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4640         u8 cur_pos = 0, tuple_size, shift_bits;
4641         int i;
4642
4643         for (i = 0; i < MAX_META_DATA; i++) {
4644                 tuple_size = meta_data_key_info[i].key_length;
4645                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4646
4647                 switch (tuple_bit) {
4648                 case BIT(ROCE_TYPE):
4649                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4650                         cur_pos += tuple_size;
4651                         break;
4652                 case BIT(DST_VPORT):
4653                         port_number = hclge_get_port_number(HOST_PORT, 0,
4654                                                             rule->vf_id, 0);
4655                         hnae3_set_field(meta_data,
4656                                         GENMASK(cur_pos + tuple_size, cur_pos),
4657                                         cur_pos, port_number);
4658                         cur_pos += tuple_size;
4659                         break;
4660                 default:
4661                         break;
4662                 }
4663         }
4664
4665         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4666         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4667         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4668
4669         *key_x = cpu_to_le32(tmp_x << shift_bits);
4670         *key_y = cpu_to_le32(tmp_y << shift_bits);
4671 }
4672
4673 /* A complete key is combined with meta data key and tuple key.
4674  * Meta data key is stored at the MSB region, and tuple key is stored at
4675  * the LSB region, unused bits will be filled 0.
4676  */
4677 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4678                             struct hclge_fd_rule *rule)
4679 {
4680         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4681         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4682         u8 *cur_key_x, *cur_key_y;
4683         int i, ret, tuple_size;
4684         u8 meta_data_region;
4685
4686         memset(key_x, 0, sizeof(key_x));
4687         memset(key_y, 0, sizeof(key_y));
4688         cur_key_x = key_x;
4689         cur_key_y = key_y;
4690
4691         for (i = 0 ; i < MAX_TUPLE; i++) {
4692                 bool tuple_valid;
4693                 u32 check_tuple;
4694
4695                 tuple_size = tuple_key_info[i].key_length / 8;
4696                 check_tuple = key_cfg->tuple_active & BIT(i);
4697
4698                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4699                                                      cur_key_y, rule);
4700                 if (tuple_valid) {
4701                         cur_key_x += tuple_size;
4702                         cur_key_y += tuple_size;
4703                 }
4704         }
4705
4706         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4707                         MAX_META_DATA_LENGTH / 8;
4708
4709         hclge_fd_convert_meta_data(key_cfg,
4710                                    (__le32 *)(key_x + meta_data_region),
4711                                    (__le32 *)(key_y + meta_data_region),
4712                                    rule);
4713
4714         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4715                                    true);
4716         if (ret) {
4717                 dev_err(&hdev->pdev->dev,
4718                         "fd key_y config fail, loc=%d, ret=%d\n",
4719                         rule->queue_id, ret);
4720                 return ret;
4721         }
4722
4723         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4724                                    true);
4725         if (ret)
4726                 dev_err(&hdev->pdev->dev,
4727                         "fd key_x config fail, loc=%d, ret=%d\n",
4728                         rule->queue_id, ret);
4729         return ret;
4730 }
4731
4732 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4733                                struct hclge_fd_rule *rule)
4734 {
4735         struct hclge_fd_ad_data ad_data;
4736
4737         ad_data.ad_id = rule->location;
4738
4739         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4740                 ad_data.drop_packet = true;
4741                 ad_data.forward_to_direct_queue = false;
4742                 ad_data.queue_id = 0;
4743         } else {
4744                 ad_data.drop_packet = false;
4745                 ad_data.forward_to_direct_queue = true;
4746                 ad_data.queue_id = rule->queue_id;
4747         }
4748
4749         ad_data.use_counter = false;
4750         ad_data.counter_id = 0;
4751
4752         ad_data.use_next_stage = false;
4753         ad_data.next_input_key = 0;
4754
4755         ad_data.write_rule_id_to_bd = true;
4756         ad_data.rule_id = rule->location;
4757
4758         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4759 }
4760
4761 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4762                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4763 {
4764         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4765         struct ethtool_usrip4_spec *usr_ip4_spec;
4766         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4767         struct ethtool_usrip6_spec *usr_ip6_spec;
4768         struct ethhdr *ether_spec;
4769
4770         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4771                 return -EINVAL;
4772
4773         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4774                 return -EOPNOTSUPP;
4775
4776         if ((fs->flow_type & FLOW_EXT) &&
4777             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4778                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4779                 return -EOPNOTSUPP;
4780         }
4781
4782         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4783         case SCTP_V4_FLOW:
4784         case TCP_V4_FLOW:
4785         case UDP_V4_FLOW:
4786                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4787                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4788
4789                 if (!tcp_ip4_spec->ip4src)
4790                         *unused |= BIT(INNER_SRC_IP);
4791
4792                 if (!tcp_ip4_spec->ip4dst)
4793                         *unused |= BIT(INNER_DST_IP);
4794
4795                 if (!tcp_ip4_spec->psrc)
4796                         *unused |= BIT(INNER_SRC_PORT);
4797
4798                 if (!tcp_ip4_spec->pdst)
4799                         *unused |= BIT(INNER_DST_PORT);
4800
4801                 if (!tcp_ip4_spec->tos)
4802                         *unused |= BIT(INNER_IP_TOS);
4803
4804                 break;
4805         case IP_USER_FLOW:
4806                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4807                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4808                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4809
4810                 if (!usr_ip4_spec->ip4src)
4811                         *unused |= BIT(INNER_SRC_IP);
4812
4813                 if (!usr_ip4_spec->ip4dst)
4814                         *unused |= BIT(INNER_DST_IP);
4815
4816                 if (!usr_ip4_spec->tos)
4817                         *unused |= BIT(INNER_IP_TOS);
4818
4819                 if (!usr_ip4_spec->proto)
4820                         *unused |= BIT(INNER_IP_PROTO);
4821
4822                 if (usr_ip4_spec->l4_4_bytes)
4823                         return -EOPNOTSUPP;
4824
4825                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4826                         return -EOPNOTSUPP;
4827
4828                 break;
4829         case SCTP_V6_FLOW:
4830         case TCP_V6_FLOW:
4831         case UDP_V6_FLOW:
4832                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4833                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4834                         BIT(INNER_IP_TOS);
4835
4836                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4837                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4838                         *unused |= BIT(INNER_SRC_IP);
4839
4840                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4841                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4842                         *unused |= BIT(INNER_DST_IP);
4843
4844                 if (!tcp_ip6_spec->psrc)
4845                         *unused |= BIT(INNER_SRC_PORT);
4846
4847                 if (!tcp_ip6_spec->pdst)
4848                         *unused |= BIT(INNER_DST_PORT);
4849
4850                 if (tcp_ip6_spec->tclass)
4851                         return -EOPNOTSUPP;
4852
4853                 break;
4854         case IPV6_USER_FLOW:
4855                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4856                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4857                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4858                         BIT(INNER_DST_PORT);
4859
4860                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4861                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4862                         *unused |= BIT(INNER_SRC_IP);
4863
4864                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4865                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4866                         *unused |= BIT(INNER_DST_IP);
4867
4868                 if (!usr_ip6_spec->l4_proto)
4869                         *unused |= BIT(INNER_IP_PROTO);
4870
4871                 if (usr_ip6_spec->tclass)
4872                         return -EOPNOTSUPP;
4873
4874                 if (usr_ip6_spec->l4_4_bytes)
4875                         return -EOPNOTSUPP;
4876
4877                 break;
4878         case ETHER_FLOW:
4879                 ether_spec = &fs->h_u.ether_spec;
4880                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4881                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4882                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4883
4884                 if (is_zero_ether_addr(ether_spec->h_source))
4885                         *unused |= BIT(INNER_SRC_MAC);
4886
4887                 if (is_zero_ether_addr(ether_spec->h_dest))
4888                         *unused |= BIT(INNER_DST_MAC);
4889
4890                 if (!ether_spec->h_proto)
4891                         *unused |= BIT(INNER_ETH_TYPE);
4892
4893                 break;
4894         default:
4895                 return -EOPNOTSUPP;
4896         }
4897
4898         if ((fs->flow_type & FLOW_EXT)) {
4899                 if (fs->h_ext.vlan_etype)
4900                         return -EOPNOTSUPP;
4901                 if (!fs->h_ext.vlan_tci)
4902                         *unused |= BIT(INNER_VLAN_TAG_FST);
4903
4904                 if (fs->m_ext.vlan_tci) {
4905                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4906                                 return -EINVAL;
4907                 }
4908         } else {
4909                 *unused |= BIT(INNER_VLAN_TAG_FST);
4910         }
4911
4912         if (fs->flow_type & FLOW_MAC_EXT) {
4913                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4914                         return -EOPNOTSUPP;
4915
4916                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4917                         *unused |= BIT(INNER_DST_MAC);
4918                 else
4919                         *unused &= ~(BIT(INNER_DST_MAC));
4920         }
4921
4922         return 0;
4923 }
4924
4925 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4926 {
4927         struct hclge_fd_rule *rule = NULL;
4928         struct hlist_node *node2;
4929
4930         spin_lock_bh(&hdev->fd_rule_lock);
4931         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4932                 if (rule->location >= location)
4933                         break;
4934         }
4935
4936         spin_unlock_bh(&hdev->fd_rule_lock);
4937
4938         return  rule && rule->location == location;
4939 }
4940
4941 /* make sure being called after lock up with fd_rule_lock */
4942 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4943                                      struct hclge_fd_rule *new_rule,
4944                                      u16 location,
4945                                      bool is_add)
4946 {
4947         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4948         struct hlist_node *node2;
4949
4950         if (is_add && !new_rule)
4951                 return -EINVAL;
4952
4953         hlist_for_each_entry_safe(rule, node2,
4954                                   &hdev->fd_rule_list, rule_node) {
4955                 if (rule->location >= location)
4956                         break;
4957                 parent = rule;
4958         }
4959
4960         if (rule && rule->location == location) {
4961                 hlist_del(&rule->rule_node);
4962                 kfree(rule);
4963                 hdev->hclge_fd_rule_num--;
4964
4965                 if (!is_add) {
4966                         if (!hdev->hclge_fd_rule_num)
4967                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4968                         clear_bit(location, hdev->fd_bmap);
4969
4970                         return 0;
4971                 }
4972         } else if (!is_add) {
4973                 dev_err(&hdev->pdev->dev,
4974                         "delete fail, rule %d is inexistent\n",
4975                         location);
4976                 return -EINVAL;
4977         }
4978
4979         INIT_HLIST_NODE(&new_rule->rule_node);
4980
4981         if (parent)
4982                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4983         else
4984                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4985
4986         set_bit(location, hdev->fd_bmap);
4987         hdev->hclge_fd_rule_num++;
4988         hdev->fd_active_type = new_rule->rule_type;
4989
4990         return 0;
4991 }
4992
4993 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4994                               struct ethtool_rx_flow_spec *fs,
4995                               struct hclge_fd_rule *rule)
4996 {
4997         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4998
4999         switch (flow_type) {
5000         case SCTP_V4_FLOW:
5001         case TCP_V4_FLOW:
5002         case UDP_V4_FLOW:
5003                 rule->tuples.src_ip[3] =
5004                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5005                 rule->tuples_mask.src_ip[3] =
5006                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5007
5008                 rule->tuples.dst_ip[3] =
5009                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5010                 rule->tuples_mask.dst_ip[3] =
5011                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5012
5013                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5014                 rule->tuples_mask.src_port =
5015                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5016
5017                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5018                 rule->tuples_mask.dst_port =
5019                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5020
5021                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5022                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5023
5024                 rule->tuples.ether_proto = ETH_P_IP;
5025                 rule->tuples_mask.ether_proto = 0xFFFF;
5026
5027                 break;
5028         case IP_USER_FLOW:
5029                 rule->tuples.src_ip[3] =
5030                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5031                 rule->tuples_mask.src_ip[3] =
5032                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5033
5034                 rule->tuples.dst_ip[3] =
5035                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5036                 rule->tuples_mask.dst_ip[3] =
5037                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5038
5039                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5040                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5041
5042                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5043                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5044
5045                 rule->tuples.ether_proto = ETH_P_IP;
5046                 rule->tuples_mask.ether_proto = 0xFFFF;
5047
5048                 break;
5049         case SCTP_V6_FLOW:
5050         case TCP_V6_FLOW:
5051         case UDP_V6_FLOW:
5052                 be32_to_cpu_array(rule->tuples.src_ip,
5053                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
5054                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5055                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
5056
5057                 be32_to_cpu_array(rule->tuples.dst_ip,
5058                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
5059                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5060                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
5061
5062                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5063                 rule->tuples_mask.src_port =
5064                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5065
5066                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5067                 rule->tuples_mask.dst_port =
5068                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5069
5070                 rule->tuples.ether_proto = ETH_P_IPV6;
5071                 rule->tuples_mask.ether_proto = 0xFFFF;
5072
5073                 break;
5074         case IPV6_USER_FLOW:
5075                 be32_to_cpu_array(rule->tuples.src_ip,
5076                                   fs->h_u.usr_ip6_spec.ip6src, 4);
5077                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5078                                   fs->m_u.usr_ip6_spec.ip6src, 4);
5079
5080                 be32_to_cpu_array(rule->tuples.dst_ip,
5081                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
5082                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5083                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
5084
5085                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5086                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5087
5088                 rule->tuples.ether_proto = ETH_P_IPV6;
5089                 rule->tuples_mask.ether_proto = 0xFFFF;
5090
5091                 break;
5092         case ETHER_FLOW:
5093                 ether_addr_copy(rule->tuples.src_mac,
5094                                 fs->h_u.ether_spec.h_source);
5095                 ether_addr_copy(rule->tuples_mask.src_mac,
5096                                 fs->m_u.ether_spec.h_source);
5097
5098                 ether_addr_copy(rule->tuples.dst_mac,
5099                                 fs->h_u.ether_spec.h_dest);
5100                 ether_addr_copy(rule->tuples_mask.dst_mac,
5101                                 fs->m_u.ether_spec.h_dest);
5102
5103                 rule->tuples.ether_proto =
5104                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5105                 rule->tuples_mask.ether_proto =
5106                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5107
5108                 break;
5109         default:
5110                 return -EOPNOTSUPP;
5111         }
5112
5113         switch (flow_type) {
5114         case SCTP_V4_FLOW:
5115         case SCTP_V6_FLOW:
5116                 rule->tuples.ip_proto = IPPROTO_SCTP;
5117                 rule->tuples_mask.ip_proto = 0xFF;
5118                 break;
5119         case TCP_V4_FLOW:
5120         case TCP_V6_FLOW:
5121                 rule->tuples.ip_proto = IPPROTO_TCP;
5122                 rule->tuples_mask.ip_proto = 0xFF;
5123                 break;
5124         case UDP_V4_FLOW:
5125         case UDP_V6_FLOW:
5126                 rule->tuples.ip_proto = IPPROTO_UDP;
5127                 rule->tuples_mask.ip_proto = 0xFF;
5128                 break;
5129         default:
5130                 break;
5131         }
5132
5133         if ((fs->flow_type & FLOW_EXT)) {
5134                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5135                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5136         }
5137
5138         if (fs->flow_type & FLOW_MAC_EXT) {
5139                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5140                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5141         }
5142
5143         return 0;
5144 }
5145
5146 /* make sure being called after lock up with fd_rule_lock */
5147 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5148                                 struct hclge_fd_rule *rule)
5149 {
5150         int ret;
5151
5152         if (!rule) {
5153                 dev_err(&hdev->pdev->dev,
5154                         "The flow director rule is NULL\n");
5155                 return -EINVAL;
5156         }
5157
5158         /* it will never fail here, so needn't to check return value */
5159         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5160
5161         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5162         if (ret)
5163                 goto clear_rule;
5164
5165         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5166         if (ret)
5167                 goto clear_rule;
5168
5169         return 0;
5170
5171 clear_rule:
5172         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5173         return ret;
5174 }
5175
5176 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5177                               struct ethtool_rxnfc *cmd)
5178 {
5179         struct hclge_vport *vport = hclge_get_vport(handle);
5180         struct hclge_dev *hdev = vport->back;
5181         u16 dst_vport_id = 0, q_index = 0;
5182         struct ethtool_rx_flow_spec *fs;
5183         struct hclge_fd_rule *rule;
5184         u32 unused = 0;
5185         u8 action;
5186         int ret;
5187
5188         if (!hnae3_dev_fd_supported(hdev))
5189                 return -EOPNOTSUPP;
5190
5191         if (!hdev->fd_en) {
5192                 dev_warn(&hdev->pdev->dev,
5193                          "Please enable flow director first\n");
5194                 return -EOPNOTSUPP;
5195         }
5196
5197         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5198
5199         ret = hclge_fd_check_spec(hdev, fs, &unused);
5200         if (ret) {
5201                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5202                 return ret;
5203         }
5204
5205         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5206                 action = HCLGE_FD_ACTION_DROP_PACKET;
5207         } else {
5208                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5209                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5210                 u16 tqps;
5211
5212                 if (vf > hdev->num_req_vfs) {
5213                         dev_err(&hdev->pdev->dev,
5214                                 "Error: vf id (%d) > max vf num (%d)\n",
5215                                 vf, hdev->num_req_vfs);
5216                         return -EINVAL;
5217                 }
5218
5219                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5220                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5221
5222                 if (ring >= tqps) {
5223                         dev_err(&hdev->pdev->dev,
5224                                 "Error: queue id (%d) > max tqp num (%d)\n",
5225                                 ring, tqps - 1);
5226                         return -EINVAL;
5227                 }
5228
5229                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5230                 q_index = ring;
5231         }
5232
5233         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5234         if (!rule)
5235                 return -ENOMEM;
5236
5237         ret = hclge_fd_get_tuple(hdev, fs, rule);
5238         if (ret) {
5239                 kfree(rule);
5240                 return ret;
5241         }
5242
5243         rule->flow_type = fs->flow_type;
5244
5245         rule->location = fs->location;
5246         rule->unused_tuple = unused;
5247         rule->vf_id = dst_vport_id;
5248         rule->queue_id = q_index;
5249         rule->action = action;
5250         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5251
5252         /* to avoid rule conflict, when user configure rule by ethtool,
5253          * we need to clear all arfs rules
5254          */
5255         hclge_clear_arfs_rules(handle);
5256
5257         spin_lock_bh(&hdev->fd_rule_lock);
5258         ret = hclge_fd_config_rule(hdev, rule);
5259
5260         spin_unlock_bh(&hdev->fd_rule_lock);
5261
5262         return ret;
5263 }
5264
5265 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5266                               struct ethtool_rxnfc *cmd)
5267 {
5268         struct hclge_vport *vport = hclge_get_vport(handle);
5269         struct hclge_dev *hdev = vport->back;
5270         struct ethtool_rx_flow_spec *fs;
5271         int ret;
5272
5273         if (!hnae3_dev_fd_supported(hdev))
5274                 return -EOPNOTSUPP;
5275
5276         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5277
5278         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5279                 return -EINVAL;
5280
5281         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5282                 dev_err(&hdev->pdev->dev,
5283                         "Delete fail, rule %d is inexistent\n",
5284                         fs->location);
5285                 return -ENOENT;
5286         }
5287
5288         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5289                                    fs->location, NULL, false);
5290         if (ret)
5291                 return ret;
5292
5293         spin_lock_bh(&hdev->fd_rule_lock);
5294         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5295
5296         spin_unlock_bh(&hdev->fd_rule_lock);
5297
5298         return ret;
5299 }
5300
5301 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5302                                      bool clear_list)
5303 {
5304         struct hclge_vport *vport = hclge_get_vport(handle);
5305         struct hclge_dev *hdev = vport->back;
5306         struct hclge_fd_rule *rule;
5307         struct hlist_node *node;
5308         u16 location;
5309
5310         if (!hnae3_dev_fd_supported(hdev))
5311                 return;
5312
5313         spin_lock_bh(&hdev->fd_rule_lock);
5314         for_each_set_bit(location, hdev->fd_bmap,
5315                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5316                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5317                                      NULL, false);
5318
5319         if (clear_list) {
5320                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5321                                           rule_node) {
5322                         hlist_del(&rule->rule_node);
5323                         kfree(rule);
5324                 }
5325                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5326                 hdev->hclge_fd_rule_num = 0;
5327                 bitmap_zero(hdev->fd_bmap,
5328                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5329         }
5330
5331         spin_unlock_bh(&hdev->fd_rule_lock);
5332 }
5333
5334 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5335 {
5336         struct hclge_vport *vport = hclge_get_vport(handle);
5337         struct hclge_dev *hdev = vport->back;
5338         struct hclge_fd_rule *rule;
5339         struct hlist_node *node;
5340         int ret;
5341
5342         /* Return ok here, because reset error handling will check this
5343          * return value. If error is returned here, the reset process will
5344          * fail.
5345          */
5346         if (!hnae3_dev_fd_supported(hdev))
5347                 return 0;
5348
5349         /* if fd is disabled, should not restore it when reset */
5350         if (!hdev->fd_en)
5351                 return 0;
5352
5353         spin_lock_bh(&hdev->fd_rule_lock);
5354         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5355                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5356                 if (!ret)
5357                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5358
5359                 if (ret) {
5360                         dev_warn(&hdev->pdev->dev,
5361                                  "Restore rule %d failed, remove it\n",
5362                                  rule->location);
5363                         clear_bit(rule->location, hdev->fd_bmap);
5364                         hlist_del(&rule->rule_node);
5365                         kfree(rule);
5366                         hdev->hclge_fd_rule_num--;
5367                 }
5368         }
5369
5370         if (hdev->hclge_fd_rule_num)
5371                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5372
5373         spin_unlock_bh(&hdev->fd_rule_lock);
5374
5375         return 0;
5376 }
5377
5378 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5379                                  struct ethtool_rxnfc *cmd)
5380 {
5381         struct hclge_vport *vport = hclge_get_vport(handle);
5382         struct hclge_dev *hdev = vport->back;
5383
5384         if (!hnae3_dev_fd_supported(hdev))
5385                 return -EOPNOTSUPP;
5386
5387         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5388         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5389
5390         return 0;
5391 }
5392
5393 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5394                                   struct ethtool_rxnfc *cmd)
5395 {
5396         struct hclge_vport *vport = hclge_get_vport(handle);
5397         struct hclge_fd_rule *rule = NULL;
5398         struct hclge_dev *hdev = vport->back;
5399         struct ethtool_rx_flow_spec *fs;
5400         struct hlist_node *node2;
5401
5402         if (!hnae3_dev_fd_supported(hdev))
5403                 return -EOPNOTSUPP;
5404
5405         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5406
5407         spin_lock_bh(&hdev->fd_rule_lock);
5408
5409         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5410                 if (rule->location >= fs->location)
5411                         break;
5412         }
5413
5414         if (!rule || fs->location != rule->location) {
5415                 spin_unlock_bh(&hdev->fd_rule_lock);
5416
5417                 return -ENOENT;
5418         }
5419
5420         fs->flow_type = rule->flow_type;
5421         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5422         case SCTP_V4_FLOW:
5423         case TCP_V4_FLOW:
5424         case UDP_V4_FLOW:
5425                 fs->h_u.tcp_ip4_spec.ip4src =
5426                                 cpu_to_be32(rule->tuples.src_ip[3]);
5427                 fs->m_u.tcp_ip4_spec.ip4src =
5428                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5429                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5430
5431                 fs->h_u.tcp_ip4_spec.ip4dst =
5432                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5433                 fs->m_u.tcp_ip4_spec.ip4dst =
5434                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5435                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5436
5437                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5438                 fs->m_u.tcp_ip4_spec.psrc =
5439                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5440                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5441
5442                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5443                 fs->m_u.tcp_ip4_spec.pdst =
5444                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5445                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5446
5447                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5448                 fs->m_u.tcp_ip4_spec.tos =
5449                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5450                                 0 : rule->tuples_mask.ip_tos;
5451
5452                 break;
5453         case IP_USER_FLOW:
5454                 fs->h_u.usr_ip4_spec.ip4src =
5455                                 cpu_to_be32(rule->tuples.src_ip[3]);
5456                 fs->m_u.tcp_ip4_spec.ip4src =
5457                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5458                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5459
5460                 fs->h_u.usr_ip4_spec.ip4dst =
5461                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5462                 fs->m_u.usr_ip4_spec.ip4dst =
5463                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5464                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5465
5466                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5467                 fs->m_u.usr_ip4_spec.tos =
5468                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5469                                 0 : rule->tuples_mask.ip_tos;
5470
5471                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5472                 fs->m_u.usr_ip4_spec.proto =
5473                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5474                                 0 : rule->tuples_mask.ip_proto;
5475
5476                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5477
5478                 break;
5479         case SCTP_V6_FLOW:
5480         case TCP_V6_FLOW:
5481         case UDP_V6_FLOW:
5482                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5483                                   rule->tuples.src_ip, 4);
5484                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5485                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5486                 else
5487                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5488                                           rule->tuples_mask.src_ip, 4);
5489
5490                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5491                                   rule->tuples.dst_ip, 4);
5492                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5493                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5494                 else
5495                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5496                                           rule->tuples_mask.dst_ip, 4);
5497
5498                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5499                 fs->m_u.tcp_ip6_spec.psrc =
5500                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5501                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5502
5503                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5504                 fs->m_u.tcp_ip6_spec.pdst =
5505                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5506                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5507
5508                 break;
5509         case IPV6_USER_FLOW:
5510                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5511                                   rule->tuples.src_ip, 4);
5512                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5513                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5514                 else
5515                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5516                                           rule->tuples_mask.src_ip, 4);
5517
5518                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5519                                   rule->tuples.dst_ip, 4);
5520                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5521                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5522                 else
5523                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5524                                           rule->tuples_mask.dst_ip, 4);
5525
5526                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5527                 fs->m_u.usr_ip6_spec.l4_proto =
5528                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5529                                 0 : rule->tuples_mask.ip_proto;
5530
5531                 break;
5532         case ETHER_FLOW:
5533                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5534                                 rule->tuples.src_mac);
5535                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5536                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5537                 else
5538                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5539                                         rule->tuples_mask.src_mac);
5540
5541                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5542                                 rule->tuples.dst_mac);
5543                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5544                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5545                 else
5546                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5547                                         rule->tuples_mask.dst_mac);
5548
5549                 fs->h_u.ether_spec.h_proto =
5550                                 cpu_to_be16(rule->tuples.ether_proto);
5551                 fs->m_u.ether_spec.h_proto =
5552                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5553                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5554
5555                 break;
5556         default:
5557                 spin_unlock_bh(&hdev->fd_rule_lock);
5558                 return -EOPNOTSUPP;
5559         }
5560
5561         if (fs->flow_type & FLOW_EXT) {
5562                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5563                 fs->m_ext.vlan_tci =
5564                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5565                                 cpu_to_be16(VLAN_VID_MASK) :
5566                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5567         }
5568
5569         if (fs->flow_type & FLOW_MAC_EXT) {
5570                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5571                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5572                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5573                 else
5574                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5575                                         rule->tuples_mask.dst_mac);
5576         }
5577
5578         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5579                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5580         } else {
5581                 u64 vf_id;
5582
5583                 fs->ring_cookie = rule->queue_id;
5584                 vf_id = rule->vf_id;
5585                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5586                 fs->ring_cookie |= vf_id;
5587         }
5588
5589         spin_unlock_bh(&hdev->fd_rule_lock);
5590
5591         return 0;
5592 }
5593
5594 static int hclge_get_all_rules(struct hnae3_handle *handle,
5595                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5596 {
5597         struct hclge_vport *vport = hclge_get_vport(handle);
5598         struct hclge_dev *hdev = vport->back;
5599         struct hclge_fd_rule *rule;
5600         struct hlist_node *node2;
5601         int cnt = 0;
5602
5603         if (!hnae3_dev_fd_supported(hdev))
5604                 return -EOPNOTSUPP;
5605
5606         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5607
5608         spin_lock_bh(&hdev->fd_rule_lock);
5609         hlist_for_each_entry_safe(rule, node2,
5610                                   &hdev->fd_rule_list, rule_node) {
5611                 if (cnt == cmd->rule_cnt) {
5612                         spin_unlock_bh(&hdev->fd_rule_lock);
5613                         return -EMSGSIZE;
5614                 }
5615
5616                 rule_locs[cnt] = rule->location;
5617                 cnt++;
5618         }
5619
5620         spin_unlock_bh(&hdev->fd_rule_lock);
5621
5622         cmd->rule_cnt = cnt;
5623
5624         return 0;
5625 }
5626
5627 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5628                                      struct hclge_fd_rule_tuples *tuples)
5629 {
5630         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5631         tuples->ip_proto = fkeys->basic.ip_proto;
5632         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5633
5634         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5635                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5636                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5637         } else {
5638                 memcpy(tuples->src_ip,
5639                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5640                        sizeof(tuples->src_ip));
5641                 memcpy(tuples->dst_ip,
5642                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5643                        sizeof(tuples->dst_ip));
5644         }
5645 }
5646
5647 /* traverse all rules, check whether an existed rule has the same tuples */
5648 static struct hclge_fd_rule *
5649 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5650                           const struct hclge_fd_rule_tuples *tuples)
5651 {
5652         struct hclge_fd_rule *rule = NULL;
5653         struct hlist_node *node;
5654
5655         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5656                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5657                         return rule;
5658         }
5659
5660         return NULL;
5661 }
5662
5663 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5664                                      struct hclge_fd_rule *rule)
5665 {
5666         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5667                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5668                              BIT(INNER_SRC_PORT);
5669         rule->action = 0;
5670         rule->vf_id = 0;
5671         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5672         if (tuples->ether_proto == ETH_P_IP) {
5673                 if (tuples->ip_proto == IPPROTO_TCP)
5674                         rule->flow_type = TCP_V4_FLOW;
5675                 else
5676                         rule->flow_type = UDP_V4_FLOW;
5677         } else {
5678                 if (tuples->ip_proto == IPPROTO_TCP)
5679                         rule->flow_type = TCP_V6_FLOW;
5680                 else
5681                         rule->flow_type = UDP_V6_FLOW;
5682         }
5683         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5684         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5685 }
5686
5687 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5688                                       u16 flow_id, struct flow_keys *fkeys)
5689 {
5690         struct hclge_vport *vport = hclge_get_vport(handle);
5691         struct hclge_fd_rule_tuples new_tuples;
5692         struct hclge_dev *hdev = vport->back;
5693         struct hclge_fd_rule *rule;
5694         u16 tmp_queue_id;
5695         u16 bit_id;
5696         int ret;
5697
5698         if (!hnae3_dev_fd_supported(hdev))
5699                 return -EOPNOTSUPP;
5700
5701         memset(&new_tuples, 0, sizeof(new_tuples));
5702         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5703
5704         spin_lock_bh(&hdev->fd_rule_lock);
5705
5706         /* when there is already fd rule existed add by user,
5707          * arfs should not work
5708          */
5709         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5710                 spin_unlock_bh(&hdev->fd_rule_lock);
5711
5712                 return -EOPNOTSUPP;
5713         }
5714
5715         /* check is there flow director filter existed for this flow,
5716          * if not, create a new filter for it;
5717          * if filter exist with different queue id, modify the filter;
5718          * if filter exist with same queue id, do nothing
5719          */
5720         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5721         if (!rule) {
5722                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5723                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5724                         spin_unlock_bh(&hdev->fd_rule_lock);
5725
5726                         return -ENOSPC;
5727                 }
5728
5729                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5730                 if (!rule) {
5731                         spin_unlock_bh(&hdev->fd_rule_lock);
5732
5733                         return -ENOMEM;
5734                 }
5735
5736                 set_bit(bit_id, hdev->fd_bmap);
5737                 rule->location = bit_id;
5738                 rule->flow_id = flow_id;
5739                 rule->queue_id = queue_id;
5740                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5741                 ret = hclge_fd_config_rule(hdev, rule);
5742
5743                 spin_unlock_bh(&hdev->fd_rule_lock);
5744
5745                 if (ret)
5746                         return ret;
5747
5748                 return rule->location;
5749         }
5750
5751         spin_unlock_bh(&hdev->fd_rule_lock);
5752
5753         if (rule->queue_id == queue_id)
5754                 return rule->location;
5755
5756         tmp_queue_id = rule->queue_id;
5757         rule->queue_id = queue_id;
5758         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5759         if (ret) {
5760                 rule->queue_id = tmp_queue_id;
5761                 return ret;
5762         }
5763
5764         return rule->location;
5765 }
5766
5767 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5768 {
5769 #ifdef CONFIG_RFS_ACCEL
5770         struct hnae3_handle *handle = &hdev->vport[0].nic;
5771         struct hclge_fd_rule *rule;
5772         struct hlist_node *node;
5773         HLIST_HEAD(del_list);
5774
5775         spin_lock_bh(&hdev->fd_rule_lock);
5776         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5777                 spin_unlock_bh(&hdev->fd_rule_lock);
5778                 return;
5779         }
5780         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5781                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5782                                         rule->flow_id, rule->location)) {
5783                         hlist_del_init(&rule->rule_node);
5784                         hlist_add_head(&rule->rule_node, &del_list);
5785                         hdev->hclge_fd_rule_num--;
5786                         clear_bit(rule->location, hdev->fd_bmap);
5787                 }
5788         }
5789         spin_unlock_bh(&hdev->fd_rule_lock);
5790
5791         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5792                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5793                                      rule->location, NULL, false);
5794                 kfree(rule);
5795         }
5796 #endif
5797 }
5798
5799 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5800 {
5801 #ifdef CONFIG_RFS_ACCEL
5802         struct hclge_vport *vport = hclge_get_vport(handle);
5803         struct hclge_dev *hdev = vport->back;
5804
5805         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5806                 hclge_del_all_fd_entries(handle, true);
5807 #endif
5808 }
5809
5810 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5811 {
5812         struct hclge_vport *vport = hclge_get_vport(handle);
5813         struct hclge_dev *hdev = vport->back;
5814
5815         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5816                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5817 }
5818
5819 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5820 {
5821         struct hclge_vport *vport = hclge_get_vport(handle);
5822         struct hclge_dev *hdev = vport->back;
5823
5824         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5825 }
5826
5827 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5828 {
5829         struct hclge_vport *vport = hclge_get_vport(handle);
5830         struct hclge_dev *hdev = vport->back;
5831
5832         return hdev->rst_stats.hw_reset_done_cnt;
5833 }
5834
5835 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5836 {
5837         struct hclge_vport *vport = hclge_get_vport(handle);
5838         struct hclge_dev *hdev = vport->back;
5839         bool clear;
5840
5841         hdev->fd_en = enable;
5842         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5843         if (!enable)
5844                 hclge_del_all_fd_entries(handle, clear);
5845         else
5846                 hclge_restore_fd_entries(handle);
5847 }
5848
5849 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5850 {
5851         struct hclge_desc desc;
5852         struct hclge_config_mac_mode_cmd *req =
5853                 (struct hclge_config_mac_mode_cmd *)desc.data;
5854         u32 loop_en = 0;
5855         int ret;
5856
5857         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5858         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5859         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5860         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5861         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5862         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5863         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5864         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5865         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5866         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5867         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5868         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5869         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5870         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5871         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5872         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5873
5874         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5875         if (ret)
5876                 dev_err(&hdev->pdev->dev,
5877                         "mac enable fail, ret =%d.\n", ret);
5878 }
5879
5880 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5881 {
5882         struct hclge_config_mac_mode_cmd *req;
5883         struct hclge_desc desc;
5884         u32 loop_en;
5885         int ret;
5886
5887         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5888         /* 1 Read out the MAC mode config at first */
5889         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5890         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5891         if (ret) {
5892                 dev_err(&hdev->pdev->dev,
5893                         "mac loopback get fail, ret =%d.\n", ret);
5894                 return ret;
5895         }
5896
5897         /* 2 Then setup the loopback flag */
5898         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5899         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5900         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5901         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5902
5903         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5904
5905         /* 3 Config mac work mode with loopback flag
5906          * and its original configure parameters
5907          */
5908         hclge_cmd_reuse_desc(&desc, false);
5909         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5910         if (ret)
5911                 dev_err(&hdev->pdev->dev,
5912                         "mac loopback set fail, ret =%d.\n", ret);
5913         return ret;
5914 }
5915
5916 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5917                                      enum hnae3_loop loop_mode)
5918 {
5919 #define HCLGE_SERDES_RETRY_MS   10
5920 #define HCLGE_SERDES_RETRY_NUM  100
5921
5922 #define HCLGE_MAC_LINK_STATUS_MS   10
5923 #define HCLGE_MAC_LINK_STATUS_NUM  100
5924 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5925 #define HCLGE_MAC_LINK_STATUS_UP   1
5926
5927         struct hclge_serdes_lb_cmd *req;
5928         struct hclge_desc desc;
5929         int mac_link_ret = 0;
5930         int ret, i = 0;
5931         u8 loop_mode_b;
5932
5933         req = (struct hclge_serdes_lb_cmd *)desc.data;
5934         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5935
5936         switch (loop_mode) {
5937         case HNAE3_LOOP_SERIAL_SERDES:
5938                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5939                 break;
5940         case HNAE3_LOOP_PARALLEL_SERDES:
5941                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5942                 break;
5943         default:
5944                 dev_err(&hdev->pdev->dev,
5945                         "unsupported serdes loopback mode %d\n", loop_mode);
5946                 return -ENOTSUPP;
5947         }
5948
5949         if (en) {
5950                 req->enable = loop_mode_b;
5951                 req->mask = loop_mode_b;
5952                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5953         } else {
5954                 req->mask = loop_mode_b;
5955                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5956         }
5957
5958         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5959         if (ret) {
5960                 dev_err(&hdev->pdev->dev,
5961                         "serdes loopback set fail, ret = %d\n", ret);
5962                 return ret;
5963         }
5964
5965         do {
5966                 msleep(HCLGE_SERDES_RETRY_MS);
5967                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5968                                            true);
5969                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5970                 if (ret) {
5971                         dev_err(&hdev->pdev->dev,
5972                                 "serdes loopback get, ret = %d\n", ret);
5973                         return ret;
5974                 }
5975         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5976                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5977
5978         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5979                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5980                 return -EBUSY;
5981         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5982                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5983                 return -EIO;
5984         }
5985
5986         hclge_cfg_mac_mode(hdev, en);
5987
5988         i = 0;
5989         do {
5990                 /* serdes Internal loopback, independent of the network cable.*/
5991                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5992                 ret = hclge_get_mac_link_status(hdev);
5993                 if (ret == mac_link_ret)
5994                         return 0;
5995         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5996
5997         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5998
5999         return -EBUSY;
6000 }
6001
6002 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6003                             int stream_id, bool enable)
6004 {
6005         struct hclge_desc desc;
6006         struct hclge_cfg_com_tqp_queue_cmd *req =
6007                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6008         int ret;
6009
6010         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6011         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6012         req->stream_id = cpu_to_le16(stream_id);
6013         req->enable |= enable << HCLGE_TQP_ENABLE_B;
6014
6015         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6016         if (ret)
6017                 dev_err(&hdev->pdev->dev,
6018                         "Tqp enable fail, status =%d.\n", ret);
6019         return ret;
6020 }
6021
6022 static int hclge_set_loopback(struct hnae3_handle *handle,
6023                               enum hnae3_loop loop_mode, bool en)
6024 {
6025         struct hclge_vport *vport = hclge_get_vport(handle);
6026         struct hnae3_knic_private_info *kinfo;
6027         struct hclge_dev *hdev = vport->back;
6028         int i, ret;
6029
6030         switch (loop_mode) {
6031         case HNAE3_LOOP_APP:
6032                 ret = hclge_set_app_loopback(hdev, en);
6033                 break;
6034         case HNAE3_LOOP_SERIAL_SERDES:
6035         case HNAE3_LOOP_PARALLEL_SERDES:
6036                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6037                 break;
6038         default:
6039                 ret = -ENOTSUPP;
6040                 dev_err(&hdev->pdev->dev,
6041                         "loop_mode %d is not supported\n", loop_mode);
6042                 break;
6043         }
6044
6045         if (ret)
6046                 return ret;
6047
6048         kinfo = &vport->nic.kinfo;
6049         for (i = 0; i < kinfo->num_tqps; i++) {
6050                 ret = hclge_tqp_enable(hdev, i, 0, en);
6051                 if (ret)
6052                         return ret;
6053         }
6054
6055         return 0;
6056 }
6057
6058 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6059 {
6060         struct hclge_vport *vport = hclge_get_vport(handle);
6061         struct hnae3_knic_private_info *kinfo;
6062         struct hnae3_queue *queue;
6063         struct hclge_tqp *tqp;
6064         int i;
6065
6066         kinfo = &vport->nic.kinfo;
6067         for (i = 0; i < kinfo->num_tqps; i++) {
6068                 queue = handle->kinfo.tqp[i];
6069                 tqp = container_of(queue, struct hclge_tqp, q);
6070                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6071         }
6072 }
6073
6074 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6075 {
6076         struct hclge_vport *vport = hclge_get_vport(handle);
6077         struct hclge_dev *hdev = vport->back;
6078
6079         if (enable) {
6080                 mod_timer(&hdev->service_timer, jiffies + HZ);
6081         } else {
6082                 del_timer_sync(&hdev->service_timer);
6083                 cancel_work_sync(&hdev->service_task);
6084                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6085         }
6086 }
6087
6088 static int hclge_ae_start(struct hnae3_handle *handle)
6089 {
6090         struct hclge_vport *vport = hclge_get_vport(handle);
6091         struct hclge_dev *hdev = vport->back;
6092
6093         /* mac enable */
6094         hclge_cfg_mac_mode(hdev, true);
6095         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6096         hdev->hw.mac.link = 0;
6097
6098         /* reset tqp stats */
6099         hclge_reset_tqp_stats(handle);
6100
6101         hclge_mac_start_phy(hdev);
6102
6103         return 0;
6104 }
6105
6106 static void hclge_ae_stop(struct hnae3_handle *handle)
6107 {
6108         struct hclge_vport *vport = hclge_get_vport(handle);
6109         struct hclge_dev *hdev = vport->back;
6110         int i;
6111
6112         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6113
6114         hclge_clear_arfs_rules(handle);
6115
6116         /* If it is not PF reset, the firmware will disable the MAC,
6117          * so it only need to stop phy here.
6118          */
6119         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6120             hdev->reset_type != HNAE3_FUNC_RESET) {
6121                 hclge_mac_stop_phy(hdev);
6122                 return;
6123         }
6124
6125         for (i = 0; i < handle->kinfo.num_tqps; i++)
6126                 hclge_reset_tqp(handle, i);
6127
6128         /* Mac disable */
6129         hclge_cfg_mac_mode(hdev, false);
6130
6131         hclge_mac_stop_phy(hdev);
6132
6133         /* reset tqp stats */
6134         hclge_reset_tqp_stats(handle);
6135         hclge_update_link_status(hdev);
6136 }
6137
6138 int hclge_vport_start(struct hclge_vport *vport)
6139 {
6140         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6141         vport->last_active_jiffies = jiffies;
6142         return 0;
6143 }
6144
6145 void hclge_vport_stop(struct hclge_vport *vport)
6146 {
6147         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6148 }
6149
6150 static int hclge_client_start(struct hnae3_handle *handle)
6151 {
6152         struct hclge_vport *vport = hclge_get_vport(handle);
6153
6154         return hclge_vport_start(vport);
6155 }
6156
6157 static void hclge_client_stop(struct hnae3_handle *handle)
6158 {
6159         struct hclge_vport *vport = hclge_get_vport(handle);
6160
6161         hclge_vport_stop(vport);
6162 }
6163
6164 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6165                                          u16 cmdq_resp, u8  resp_code,
6166                                          enum hclge_mac_vlan_tbl_opcode op)
6167 {
6168         struct hclge_dev *hdev = vport->back;
6169         int return_status = -EIO;
6170
6171         if (cmdq_resp) {
6172                 dev_err(&hdev->pdev->dev,
6173                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6174                         cmdq_resp);
6175                 return -EIO;
6176         }
6177
6178         if (op == HCLGE_MAC_VLAN_ADD) {
6179                 if ((!resp_code) || (resp_code == 1)) {
6180                         return_status = 0;
6181                 } else if (resp_code == 2) {
6182                         return_status = -ENOSPC;
6183                         dev_err(&hdev->pdev->dev,
6184                                 "add mac addr failed for uc_overflow.\n");
6185                 } else if (resp_code == 3) {
6186                         return_status = -ENOSPC;
6187                         dev_err(&hdev->pdev->dev,
6188                                 "add mac addr failed for mc_overflow.\n");
6189                 } else {
6190                         dev_err(&hdev->pdev->dev,
6191                                 "add mac addr failed for undefined, code=%d.\n",
6192                                 resp_code);
6193                 }
6194         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6195                 if (!resp_code) {
6196                         return_status = 0;
6197                 } else if (resp_code == 1) {
6198                         return_status = -ENOENT;
6199                         dev_dbg(&hdev->pdev->dev,
6200                                 "remove mac addr failed for miss.\n");
6201                 } else {
6202                         dev_err(&hdev->pdev->dev,
6203                                 "remove mac addr failed for undefined, code=%d.\n",
6204                                 resp_code);
6205                 }
6206         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6207                 if (!resp_code) {
6208                         return_status = 0;
6209                 } else if (resp_code == 1) {
6210                         return_status = -ENOENT;
6211                         dev_dbg(&hdev->pdev->dev,
6212                                 "lookup mac addr failed for miss.\n");
6213                 } else {
6214                         dev_err(&hdev->pdev->dev,
6215                                 "lookup mac addr failed for undefined, code=%d.\n",
6216                                 resp_code);
6217                 }
6218         } else {
6219                 return_status = -EINVAL;
6220                 dev_err(&hdev->pdev->dev,
6221                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6222                         op);
6223         }
6224
6225         return return_status;
6226 }
6227
6228 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6229 {
6230         int word_num;
6231         int bit_num;
6232
6233         if (vfid > 255 || vfid < 0)
6234                 return -EIO;
6235
6236         if (vfid >= 0 && vfid <= 191) {
6237                 word_num = vfid / 32;
6238                 bit_num  = vfid % 32;
6239                 if (clr)
6240                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6241                 else
6242                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6243         } else {
6244                 word_num = (vfid - 192) / 32;
6245                 bit_num  = vfid % 32;
6246                 if (clr)
6247                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6248                 else
6249                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6250         }
6251
6252         return 0;
6253 }
6254
6255 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6256 {
6257 #define HCLGE_DESC_NUMBER 3
6258 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6259         int i, j;
6260
6261         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6262                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6263                         if (desc[i].data[j])
6264                                 return false;
6265
6266         return true;
6267 }
6268
6269 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6270                                    const u8 *addr, bool is_mc)
6271 {
6272         const unsigned char *mac_addr = addr;
6273         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6274                        (mac_addr[0]) | (mac_addr[1] << 8);
6275         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6276
6277         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6278         if (is_mc) {
6279                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6280                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6281         }
6282
6283         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6284         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6285 }
6286
6287 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6288                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6289 {
6290         struct hclge_dev *hdev = vport->back;
6291         struct hclge_desc desc;
6292         u8 resp_code;
6293         u16 retval;
6294         int ret;
6295
6296         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6297
6298         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6299
6300         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6301         if (ret) {
6302                 dev_err(&hdev->pdev->dev,
6303                         "del mac addr failed for cmd_send, ret =%d.\n",
6304                         ret);
6305                 return ret;
6306         }
6307         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6308         retval = le16_to_cpu(desc.retval);
6309
6310         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6311                                              HCLGE_MAC_VLAN_REMOVE);
6312 }
6313
6314 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6315                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6316                                      struct hclge_desc *desc,
6317                                      bool is_mc)
6318 {
6319         struct hclge_dev *hdev = vport->back;
6320         u8 resp_code;
6321         u16 retval;
6322         int ret;
6323
6324         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6325         if (is_mc) {
6326                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6327                 memcpy(desc[0].data,
6328                        req,
6329                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6330                 hclge_cmd_setup_basic_desc(&desc[1],
6331                                            HCLGE_OPC_MAC_VLAN_ADD,
6332                                            true);
6333                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6334                 hclge_cmd_setup_basic_desc(&desc[2],
6335                                            HCLGE_OPC_MAC_VLAN_ADD,
6336                                            true);
6337                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6338         } else {
6339                 memcpy(desc[0].data,
6340                        req,
6341                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6342                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6343         }
6344         if (ret) {
6345                 dev_err(&hdev->pdev->dev,
6346                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6347                         ret);
6348                 return ret;
6349         }
6350         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6351         retval = le16_to_cpu(desc[0].retval);
6352
6353         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6354                                              HCLGE_MAC_VLAN_LKUP);
6355 }
6356
6357 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6358                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6359                                   struct hclge_desc *mc_desc)
6360 {
6361         struct hclge_dev *hdev = vport->back;
6362         int cfg_status;
6363         u8 resp_code;
6364         u16 retval;
6365         int ret;
6366
6367         if (!mc_desc) {
6368                 struct hclge_desc desc;
6369
6370                 hclge_cmd_setup_basic_desc(&desc,
6371                                            HCLGE_OPC_MAC_VLAN_ADD,
6372                                            false);
6373                 memcpy(desc.data, req,
6374                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6375                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6376                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6377                 retval = le16_to_cpu(desc.retval);
6378
6379                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6380                                                            resp_code,
6381                                                            HCLGE_MAC_VLAN_ADD);
6382         } else {
6383                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6384                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6385                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6386                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6387                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6388                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6389                 memcpy(mc_desc[0].data, req,
6390                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6391                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6392                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6393                 retval = le16_to_cpu(mc_desc[0].retval);
6394
6395                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6396                                                            resp_code,
6397                                                            HCLGE_MAC_VLAN_ADD);
6398         }
6399
6400         if (ret) {
6401                 dev_err(&hdev->pdev->dev,
6402                         "add mac addr failed for cmd_send, ret =%d.\n",
6403                         ret);
6404                 return ret;
6405         }
6406
6407         return cfg_status;
6408 }
6409
6410 static int hclge_init_umv_space(struct hclge_dev *hdev)
6411 {
6412         u16 allocated_size = 0;
6413         int ret;
6414
6415         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6416                                   true);
6417         if (ret)
6418                 return ret;
6419
6420         if (allocated_size < hdev->wanted_umv_size)
6421                 dev_warn(&hdev->pdev->dev,
6422                          "Alloc umv space failed, want %d, get %d\n",
6423                          hdev->wanted_umv_size, allocated_size);
6424
6425         mutex_init(&hdev->umv_mutex);
6426         hdev->max_umv_size = allocated_size;
6427         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6428         hdev->share_umv_size = hdev->priv_umv_size +
6429                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6430
6431         return 0;
6432 }
6433
6434 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6435 {
6436         int ret;
6437
6438         if (hdev->max_umv_size > 0) {
6439                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6440                                           false);
6441                 if (ret)
6442                         return ret;
6443                 hdev->max_umv_size = 0;
6444         }
6445         mutex_destroy(&hdev->umv_mutex);
6446
6447         return 0;
6448 }
6449
6450 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6451                                u16 *allocated_size, bool is_alloc)
6452 {
6453         struct hclge_umv_spc_alc_cmd *req;
6454         struct hclge_desc desc;
6455         int ret;
6456
6457         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6458         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6459         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6460         req->space_size = cpu_to_le32(space_size);
6461
6462         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6463         if (ret) {
6464                 dev_err(&hdev->pdev->dev,
6465                         "%s umv space failed for cmd_send, ret =%d\n",
6466                         is_alloc ? "allocate" : "free", ret);
6467                 return ret;
6468         }
6469
6470         if (is_alloc && allocated_size)
6471                 *allocated_size = le32_to_cpu(desc.data[1]);
6472
6473         return 0;
6474 }
6475
6476 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6477 {
6478         struct hclge_vport *vport;
6479         int i;
6480
6481         for (i = 0; i < hdev->num_alloc_vport; i++) {
6482                 vport = &hdev->vport[i];
6483                 vport->used_umv_num = 0;
6484         }
6485
6486         mutex_lock(&hdev->umv_mutex);
6487         hdev->share_umv_size = hdev->priv_umv_size +
6488                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6489         mutex_unlock(&hdev->umv_mutex);
6490 }
6491
6492 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6493 {
6494         struct hclge_dev *hdev = vport->back;
6495         bool is_full;
6496
6497         mutex_lock(&hdev->umv_mutex);
6498         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6499                    hdev->share_umv_size == 0);
6500         mutex_unlock(&hdev->umv_mutex);
6501
6502         return is_full;
6503 }
6504
6505 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6506 {
6507         struct hclge_dev *hdev = vport->back;
6508
6509         mutex_lock(&hdev->umv_mutex);
6510         if (is_free) {
6511                 if (vport->used_umv_num > hdev->priv_umv_size)
6512                         hdev->share_umv_size++;
6513
6514                 if (vport->used_umv_num > 0)
6515                         vport->used_umv_num--;
6516         } else {
6517                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6518                     hdev->share_umv_size > 0)
6519                         hdev->share_umv_size--;
6520                 vport->used_umv_num++;
6521         }
6522         mutex_unlock(&hdev->umv_mutex);
6523 }
6524
6525 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6526                              const unsigned char *addr)
6527 {
6528         struct hclge_vport *vport = hclge_get_vport(handle);
6529
6530         return hclge_add_uc_addr_common(vport, addr);
6531 }
6532
6533 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6534                              const unsigned char *addr)
6535 {
6536         struct hclge_dev *hdev = vport->back;
6537         struct hclge_mac_vlan_tbl_entry_cmd req;
6538         struct hclge_desc desc;
6539         u16 egress_port = 0;
6540         int ret;
6541
6542         /* mac addr check */
6543         if (is_zero_ether_addr(addr) ||
6544             is_broadcast_ether_addr(addr) ||
6545             is_multicast_ether_addr(addr)) {
6546                 dev_err(&hdev->pdev->dev,
6547                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6548                          addr,
6549                          is_zero_ether_addr(addr),
6550                          is_broadcast_ether_addr(addr),
6551                          is_multicast_ether_addr(addr));
6552                 return -EINVAL;
6553         }
6554
6555         memset(&req, 0, sizeof(req));
6556
6557         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6558                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6559
6560         req.egress_port = cpu_to_le16(egress_port);
6561
6562         hclge_prepare_mac_addr(&req, addr, false);
6563
6564         /* Lookup the mac address in the mac_vlan table, and add
6565          * it if the entry is inexistent. Repeated unicast entry
6566          * is not allowed in the mac vlan table.
6567          */
6568         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6569         if (ret == -ENOENT) {
6570                 if (!hclge_is_umv_space_full(vport)) {
6571                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6572                         if (!ret)
6573                                 hclge_update_umv_space(vport, false);
6574                         return ret;
6575                 }
6576
6577                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6578                         hdev->priv_umv_size);
6579
6580                 return -ENOSPC;
6581         }
6582
6583         /* check if we just hit the duplicate */
6584         if (!ret) {
6585                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6586                          vport->vport_id, addr);
6587                 return 0;
6588         }
6589
6590         dev_err(&hdev->pdev->dev,
6591                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6592                 addr);
6593
6594         return ret;
6595 }
6596
6597 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6598                             const unsigned char *addr)
6599 {
6600         struct hclge_vport *vport = hclge_get_vport(handle);
6601
6602         return hclge_rm_uc_addr_common(vport, addr);
6603 }
6604
6605 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6606                             const unsigned char *addr)
6607 {
6608         struct hclge_dev *hdev = vport->back;
6609         struct hclge_mac_vlan_tbl_entry_cmd req;
6610         int ret;
6611
6612         /* mac addr check */
6613         if (is_zero_ether_addr(addr) ||
6614             is_broadcast_ether_addr(addr) ||
6615             is_multicast_ether_addr(addr)) {
6616                 dev_dbg(&hdev->pdev->dev,
6617                         "Remove mac err! invalid mac:%pM.\n",
6618                          addr);
6619                 return -EINVAL;
6620         }
6621
6622         memset(&req, 0, sizeof(req));
6623         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6624         hclge_prepare_mac_addr(&req, addr, false);
6625         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6626         if (!ret)
6627                 hclge_update_umv_space(vport, true);
6628
6629         return ret;
6630 }
6631
6632 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6633                              const unsigned char *addr)
6634 {
6635         struct hclge_vport *vport = hclge_get_vport(handle);
6636
6637         return hclge_add_mc_addr_common(vport, addr);
6638 }
6639
6640 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6641                              const unsigned char *addr)
6642 {
6643         struct hclge_dev *hdev = vport->back;
6644         struct hclge_mac_vlan_tbl_entry_cmd req;
6645         struct hclge_desc desc[3];
6646         int status;
6647
6648         /* mac addr check */
6649         if (!is_multicast_ether_addr(addr)) {
6650                 dev_err(&hdev->pdev->dev,
6651                         "Add mc mac err! invalid mac:%pM.\n",
6652                          addr);
6653                 return -EINVAL;
6654         }
6655         memset(&req, 0, sizeof(req));
6656         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6657         hclge_prepare_mac_addr(&req, addr, true);
6658         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6659         if (!status) {
6660                 /* This mac addr exist, update VFID for it */
6661                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6662                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6663         } else {
6664                 /* This mac addr do not exist, add new entry for it */
6665                 memset(desc[0].data, 0, sizeof(desc[0].data));
6666                 memset(desc[1].data, 0, sizeof(desc[0].data));
6667                 memset(desc[2].data, 0, sizeof(desc[0].data));
6668                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6669                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6670         }
6671
6672         if (status == -ENOSPC)
6673                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6674
6675         return status;
6676 }
6677
6678 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6679                             const unsigned char *addr)
6680 {
6681         struct hclge_vport *vport = hclge_get_vport(handle);
6682
6683         return hclge_rm_mc_addr_common(vport, addr);
6684 }
6685
6686 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6687                             const unsigned char *addr)
6688 {
6689         struct hclge_dev *hdev = vport->back;
6690         struct hclge_mac_vlan_tbl_entry_cmd req;
6691         enum hclge_cmd_status status;
6692         struct hclge_desc desc[3];
6693
6694         /* mac addr check */
6695         if (!is_multicast_ether_addr(addr)) {
6696                 dev_dbg(&hdev->pdev->dev,
6697                         "Remove mc mac err! invalid mac:%pM.\n",
6698                          addr);
6699                 return -EINVAL;
6700         }
6701
6702         memset(&req, 0, sizeof(req));
6703         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6704         hclge_prepare_mac_addr(&req, addr, true);
6705         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6706         if (!status) {
6707                 /* This mac addr exist, remove this handle's VFID for it */
6708                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6709
6710                 if (hclge_is_all_function_id_zero(desc))
6711                         /* All the vfid is zero, so need to delete this entry */
6712                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6713                 else
6714                         /* Not all the vfid is zero, update the vfid */
6715                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6716
6717         } else {
6718                 /* Maybe this mac address is in mta table, but it cannot be
6719                  * deleted here because an entry of mta represents an address
6720                  * range rather than a specific address. the delete action to
6721                  * all entries will take effect in update_mta_status called by
6722                  * hns3_nic_set_rx_mode.
6723                  */
6724                 status = 0;
6725         }
6726
6727         return status;
6728 }
6729
6730 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6731                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6732 {
6733         struct hclge_vport_mac_addr_cfg *mac_cfg;
6734         struct list_head *list;
6735
6736         if (!vport->vport_id)
6737                 return;
6738
6739         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6740         if (!mac_cfg)
6741                 return;
6742
6743         mac_cfg->hd_tbl_status = true;
6744         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6745
6746         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6747                &vport->uc_mac_list : &vport->mc_mac_list;
6748
6749         list_add_tail(&mac_cfg->node, list);
6750 }
6751
6752 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6753                               bool is_write_tbl,
6754                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6755 {
6756         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6757         struct list_head *list;
6758         bool uc_flag, mc_flag;
6759
6760         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6761                &vport->uc_mac_list : &vport->mc_mac_list;
6762
6763         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6764         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6765
6766         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6767                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6768                         if (uc_flag && mac_cfg->hd_tbl_status)
6769                                 hclge_rm_uc_addr_common(vport, mac_addr);
6770
6771                         if (mc_flag && mac_cfg->hd_tbl_status)
6772                                 hclge_rm_mc_addr_common(vport, mac_addr);
6773
6774                         list_del(&mac_cfg->node);
6775                         kfree(mac_cfg);
6776                         break;
6777                 }
6778         }
6779 }
6780
6781 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6782                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6783 {
6784         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6785         struct list_head *list;
6786
6787         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6788                &vport->uc_mac_list : &vport->mc_mac_list;
6789
6790         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6791                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6792                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6793
6794                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6795                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6796
6797                 mac_cfg->hd_tbl_status = false;
6798                 if (is_del_list) {
6799                         list_del(&mac_cfg->node);
6800                         kfree(mac_cfg);
6801                 }
6802         }
6803 }
6804
6805 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6806 {
6807         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6808         struct hclge_vport *vport;
6809         int i;
6810
6811         mutex_lock(&hdev->vport_cfg_mutex);
6812         for (i = 0; i < hdev->num_alloc_vport; i++) {
6813                 vport = &hdev->vport[i];
6814                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6815                         list_del(&mac->node);
6816                         kfree(mac);
6817                 }
6818
6819                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6820                         list_del(&mac->node);
6821                         kfree(mac);
6822                 }
6823         }
6824         mutex_unlock(&hdev->vport_cfg_mutex);
6825 }
6826
6827 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6828                                               u16 cmdq_resp, u8 resp_code)
6829 {
6830 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6831 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6832 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6833 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6834
6835         int return_status;
6836
6837         if (cmdq_resp) {
6838                 dev_err(&hdev->pdev->dev,
6839                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6840                         cmdq_resp);
6841                 return -EIO;
6842         }
6843
6844         switch (resp_code) {
6845         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6846         case HCLGE_ETHERTYPE_ALREADY_ADD:
6847                 return_status = 0;
6848                 break;
6849         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6850                 dev_err(&hdev->pdev->dev,
6851                         "add mac ethertype failed for manager table overflow.\n");
6852                 return_status = -EIO;
6853                 break;
6854         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6855                 dev_err(&hdev->pdev->dev,
6856                         "add mac ethertype failed for key conflict.\n");
6857                 return_status = -EIO;
6858                 break;
6859         default:
6860                 dev_err(&hdev->pdev->dev,
6861                         "add mac ethertype failed for undefined, code=%d.\n",
6862                         resp_code);
6863                 return_status = -EIO;
6864         }
6865
6866         return return_status;
6867 }
6868
6869 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6870                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6871 {
6872         struct hclge_desc desc;
6873         u8 resp_code;
6874         u16 retval;
6875         int ret;
6876
6877         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6878         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6879
6880         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6881         if (ret) {
6882                 dev_err(&hdev->pdev->dev,
6883                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6884                         ret);
6885                 return ret;
6886         }
6887
6888         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6889         retval = le16_to_cpu(desc.retval);
6890
6891         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6892 }
6893
6894 static int init_mgr_tbl(struct hclge_dev *hdev)
6895 {
6896         int ret;
6897         int i;
6898
6899         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6900                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6901                 if (ret) {
6902                         dev_err(&hdev->pdev->dev,
6903                                 "add mac ethertype failed, ret =%d.\n",
6904                                 ret);
6905                         return ret;
6906                 }
6907         }
6908
6909         return 0;
6910 }
6911
6912 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6913 {
6914         struct hclge_vport *vport = hclge_get_vport(handle);
6915         struct hclge_dev *hdev = vport->back;
6916
6917         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6918 }
6919
6920 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6921                               bool is_first)
6922 {
6923         const unsigned char *new_addr = (const unsigned char *)p;
6924         struct hclge_vport *vport = hclge_get_vport(handle);
6925         struct hclge_dev *hdev = vport->back;
6926         int ret;
6927
6928         /* mac addr check */
6929         if (is_zero_ether_addr(new_addr) ||
6930             is_broadcast_ether_addr(new_addr) ||
6931             is_multicast_ether_addr(new_addr)) {
6932                 dev_err(&hdev->pdev->dev,
6933                         "Change uc mac err! invalid mac:%p.\n",
6934                          new_addr);
6935                 return -EINVAL;
6936         }
6937
6938         if ((!is_first || is_kdump_kernel()) &&
6939             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6940                 dev_warn(&hdev->pdev->dev,
6941                          "remove old uc mac address fail.\n");
6942
6943         ret = hclge_add_uc_addr(handle, new_addr);
6944         if (ret) {
6945                 dev_err(&hdev->pdev->dev,
6946                         "add uc mac address fail, ret =%d.\n",
6947                         ret);
6948
6949                 if (!is_first &&
6950                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6951                         dev_err(&hdev->pdev->dev,
6952                                 "restore uc mac address fail.\n");
6953
6954                 return -EIO;
6955         }
6956
6957         ret = hclge_pause_addr_cfg(hdev, new_addr);
6958         if (ret) {
6959                 dev_err(&hdev->pdev->dev,
6960                         "configure mac pause address fail, ret =%d.\n",
6961                         ret);
6962                 return -EIO;
6963         }
6964
6965         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6966
6967         return 0;
6968 }
6969
6970 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6971                           int cmd)
6972 {
6973         struct hclge_vport *vport = hclge_get_vport(handle);
6974         struct hclge_dev *hdev = vport->back;
6975
6976         if (!hdev->hw.mac.phydev)
6977                 return -EOPNOTSUPP;
6978
6979         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6980 }
6981
6982 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6983                                       u8 fe_type, bool filter_en, u8 vf_id)
6984 {
6985         struct hclge_vlan_filter_ctrl_cmd *req;
6986         struct hclge_desc desc;
6987         int ret;
6988
6989         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6990
6991         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6992         req->vlan_type = vlan_type;
6993         req->vlan_fe = filter_en ? fe_type : 0;
6994         req->vf_id = vf_id;
6995
6996         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6997         if (ret)
6998                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6999                         ret);
7000
7001         return ret;
7002 }
7003
7004 #define HCLGE_FILTER_TYPE_VF            0
7005 #define HCLGE_FILTER_TYPE_PORT          1
7006 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7007 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7008 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7009 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7010 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7011 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7012                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7013 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7014                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7015
7016 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7017 {
7018         struct hclge_vport *vport = hclge_get_vport(handle);
7019         struct hclge_dev *hdev = vport->back;
7020
7021         if (hdev->pdev->revision >= 0x21) {
7022                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7023                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7024                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7025                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7026         } else {
7027                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7028                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7029                                            0);
7030         }
7031         if (enable)
7032                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7033         else
7034                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7035 }
7036
7037 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7038                                     bool is_kill, u16 vlan, u8 qos,
7039                                     __be16 proto)
7040 {
7041 #define HCLGE_MAX_VF_BYTES  16
7042         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7043         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7044         struct hclge_desc desc[2];
7045         u8 vf_byte_val;
7046         u8 vf_byte_off;
7047         int ret;
7048
7049         hclge_cmd_setup_basic_desc(&desc[0],
7050                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7051         hclge_cmd_setup_basic_desc(&desc[1],
7052                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7053
7054         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7055
7056         vf_byte_off = vfid / 8;
7057         vf_byte_val = 1 << (vfid % 8);
7058
7059         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7060         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7061
7062         req0->vlan_id  = cpu_to_le16(vlan);
7063         req0->vlan_cfg = is_kill;
7064
7065         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7066                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7067         else
7068                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7069
7070         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7071         if (ret) {
7072                 dev_err(&hdev->pdev->dev,
7073                         "Send vf vlan command fail, ret =%d.\n",
7074                         ret);
7075                 return ret;
7076         }
7077
7078         if (!is_kill) {
7079 #define HCLGE_VF_VLAN_NO_ENTRY  2
7080                 if (!req0->resp_code || req0->resp_code == 1)
7081                         return 0;
7082
7083                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7084                         dev_warn(&hdev->pdev->dev,
7085                                  "vf vlan table is full, vf vlan filter is disabled\n");
7086                         return 0;
7087                 }
7088
7089                 dev_err(&hdev->pdev->dev,
7090                         "Add vf vlan filter fail, ret =%d.\n",
7091                         req0->resp_code);
7092         } else {
7093 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7094                 if (!req0->resp_code)
7095                         return 0;
7096
7097                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7098                         dev_warn(&hdev->pdev->dev,
7099                                  "vlan %d filter is not in vf vlan table\n",
7100                                  vlan);
7101                         return 0;
7102                 }
7103
7104                 dev_err(&hdev->pdev->dev,
7105                         "Kill vf vlan filter fail, ret =%d.\n",
7106                         req0->resp_code);
7107         }
7108
7109         return -EIO;
7110 }
7111
7112 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7113                                       u16 vlan_id, bool is_kill)
7114 {
7115         struct hclge_vlan_filter_pf_cfg_cmd *req;
7116         struct hclge_desc desc;
7117         u8 vlan_offset_byte_val;
7118         u8 vlan_offset_byte;
7119         u8 vlan_offset_160;
7120         int ret;
7121
7122         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7123
7124         vlan_offset_160 = vlan_id / 160;
7125         vlan_offset_byte = (vlan_id % 160) / 8;
7126         vlan_offset_byte_val = 1 << (vlan_id % 8);
7127
7128         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7129         req->vlan_offset = vlan_offset_160;
7130         req->vlan_cfg = is_kill;
7131         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7132
7133         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7134         if (ret)
7135                 dev_err(&hdev->pdev->dev,
7136                         "port vlan command, send fail, ret =%d.\n", ret);
7137         return ret;
7138 }
7139
7140 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7141                                     u16 vport_id, u16 vlan_id, u8 qos,
7142                                     bool is_kill)
7143 {
7144         u16 vport_idx, vport_num = 0;
7145         int ret;
7146
7147         if (is_kill && !vlan_id)
7148                 return 0;
7149
7150         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7151                                        0, proto);
7152         if (ret) {
7153                 dev_err(&hdev->pdev->dev,
7154                         "Set %d vport vlan filter config fail, ret =%d.\n",
7155                         vport_id, ret);
7156                 return ret;
7157         }
7158
7159         /* vlan 0 may be added twice when 8021q module is enabled */
7160         if (!is_kill && !vlan_id &&
7161             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7162                 return 0;
7163
7164         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7165                 dev_err(&hdev->pdev->dev,
7166                         "Add port vlan failed, vport %d is already in vlan %d\n",
7167                         vport_id, vlan_id);
7168                 return -EINVAL;
7169         }
7170
7171         if (is_kill &&
7172             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7173                 dev_err(&hdev->pdev->dev,
7174                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7175                         vport_id, vlan_id);
7176                 return -EINVAL;
7177         }
7178
7179         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7180                 vport_num++;
7181
7182         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7183                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7184                                                  is_kill);
7185
7186         return ret;
7187 }
7188
7189 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7190 {
7191         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7192         struct hclge_vport_vtag_tx_cfg_cmd *req;
7193         struct hclge_dev *hdev = vport->back;
7194         struct hclge_desc desc;
7195         int status;
7196
7197         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7198
7199         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7200         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7201         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7202         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7203                       vcfg->accept_tag1 ? 1 : 0);
7204         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7205                       vcfg->accept_untag1 ? 1 : 0);
7206         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7207                       vcfg->accept_tag2 ? 1 : 0);
7208         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7209                       vcfg->accept_untag2 ? 1 : 0);
7210         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7211                       vcfg->insert_tag1_en ? 1 : 0);
7212         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7213                       vcfg->insert_tag2_en ? 1 : 0);
7214         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7215
7216         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7217         req->vf_bitmap[req->vf_offset] =
7218                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7219
7220         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7221         if (status)
7222                 dev_err(&hdev->pdev->dev,
7223                         "Send port txvlan cfg command fail, ret =%d\n",
7224                         status);
7225
7226         return status;
7227 }
7228
7229 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7230 {
7231         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7232         struct hclge_vport_vtag_rx_cfg_cmd *req;
7233         struct hclge_dev *hdev = vport->back;
7234         struct hclge_desc desc;
7235         int status;
7236
7237         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7238
7239         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7240         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7241                       vcfg->strip_tag1_en ? 1 : 0);
7242         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7243                       vcfg->strip_tag2_en ? 1 : 0);
7244         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7245                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7246         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7247                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7248
7249         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7250         req->vf_bitmap[req->vf_offset] =
7251                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7252
7253         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7254         if (status)
7255                 dev_err(&hdev->pdev->dev,
7256                         "Send port rxvlan cfg command fail, ret =%d\n",
7257                         status);
7258
7259         return status;
7260 }
7261
7262 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7263                                   u16 port_base_vlan_state,
7264                                   u16 vlan_tag)
7265 {
7266         int ret;
7267
7268         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7269                 vport->txvlan_cfg.accept_tag1 = true;
7270                 vport->txvlan_cfg.insert_tag1_en = false;
7271                 vport->txvlan_cfg.default_tag1 = 0;
7272         } else {
7273                 vport->txvlan_cfg.accept_tag1 = false;
7274                 vport->txvlan_cfg.insert_tag1_en = true;
7275                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7276         }
7277
7278         vport->txvlan_cfg.accept_untag1 = true;
7279
7280         /* accept_tag2 and accept_untag2 are not supported on
7281          * pdev revision(0x20), new revision support them,
7282          * this two fields can not be configured by user.
7283          */
7284         vport->txvlan_cfg.accept_tag2 = true;
7285         vport->txvlan_cfg.accept_untag2 = true;
7286         vport->txvlan_cfg.insert_tag2_en = false;
7287         vport->txvlan_cfg.default_tag2 = 0;
7288
7289         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7290                 vport->rxvlan_cfg.strip_tag1_en = false;
7291                 vport->rxvlan_cfg.strip_tag2_en =
7292                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7293         } else {
7294                 vport->rxvlan_cfg.strip_tag1_en =
7295                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7296                 vport->rxvlan_cfg.strip_tag2_en = true;
7297         }
7298         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7299         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7300
7301         ret = hclge_set_vlan_tx_offload_cfg(vport);
7302         if (ret)
7303                 return ret;
7304
7305         return hclge_set_vlan_rx_offload_cfg(vport);
7306 }
7307
7308 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7309 {
7310         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7311         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7312         struct hclge_desc desc;
7313         int status;
7314
7315         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7316         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7317         rx_req->ot_fst_vlan_type =
7318                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7319         rx_req->ot_sec_vlan_type =
7320                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7321         rx_req->in_fst_vlan_type =
7322                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7323         rx_req->in_sec_vlan_type =
7324                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7325
7326         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7327         if (status) {
7328                 dev_err(&hdev->pdev->dev,
7329                         "Send rxvlan protocol type command fail, ret =%d\n",
7330                         status);
7331                 return status;
7332         }
7333
7334         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7335
7336         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7337         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7338         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7339
7340         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7341         if (status)
7342                 dev_err(&hdev->pdev->dev,
7343                         "Send txvlan protocol type command fail, ret =%d\n",
7344                         status);
7345
7346         return status;
7347 }
7348
7349 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7350 {
7351 #define HCLGE_DEF_VLAN_TYPE             0x8100
7352
7353         struct hnae3_handle *handle = &hdev->vport[0].nic;
7354         struct hclge_vport *vport;
7355         int ret;
7356         int i;
7357
7358         if (hdev->pdev->revision >= 0x21) {
7359                 /* for revision 0x21, vf vlan filter is per function */
7360                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7361                         vport = &hdev->vport[i];
7362                         ret = hclge_set_vlan_filter_ctrl(hdev,
7363                                                          HCLGE_FILTER_TYPE_VF,
7364                                                          HCLGE_FILTER_FE_EGRESS,
7365                                                          true,
7366                                                          vport->vport_id);
7367                         if (ret)
7368                                 return ret;
7369                 }
7370
7371                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7372                                                  HCLGE_FILTER_FE_INGRESS, true,
7373                                                  0);
7374                 if (ret)
7375                         return ret;
7376         } else {
7377                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7378                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7379                                                  true, 0);
7380                 if (ret)
7381                         return ret;
7382         }
7383
7384         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7385
7386         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7387         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7388         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7389         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7390         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7391         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7392
7393         ret = hclge_set_vlan_protocol_type(hdev);
7394         if (ret)
7395                 return ret;
7396
7397         for (i = 0; i < hdev->num_alloc_vport; i++) {
7398                 u16 vlan_tag;
7399
7400                 vport = &hdev->vport[i];
7401                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7402
7403                 ret = hclge_vlan_offload_cfg(vport,
7404                                              vport->port_base_vlan_cfg.state,
7405                                              vlan_tag);
7406                 if (ret)
7407                         return ret;
7408         }
7409
7410         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7411 }
7412
7413 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7414                                        bool writen_to_tbl)
7415 {
7416         struct hclge_vport_vlan_cfg *vlan;
7417
7418         /* vlan 0 is reserved */
7419         if (!vlan_id)
7420                 return;
7421
7422         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7423         if (!vlan)
7424                 return;
7425
7426         vlan->hd_tbl_status = writen_to_tbl;
7427         vlan->vlan_id = vlan_id;
7428
7429         list_add_tail(&vlan->node, &vport->vlan_list);
7430 }
7431
7432 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7433 {
7434         struct hclge_vport_vlan_cfg *vlan, *tmp;
7435         struct hclge_dev *hdev = vport->back;
7436         int ret;
7437
7438         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7439                 if (!vlan->hd_tbl_status) {
7440                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7441                                                        vport->vport_id,
7442                                                        vlan->vlan_id, 0, false);
7443                         if (ret) {
7444                                 dev_err(&hdev->pdev->dev,
7445                                         "restore vport vlan list failed, ret=%d\n",
7446                                         ret);
7447                                 return ret;
7448                         }
7449                 }
7450                 vlan->hd_tbl_status = true;
7451         }
7452
7453         return 0;
7454 }
7455
7456 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7457                                       bool is_write_tbl)
7458 {
7459         struct hclge_vport_vlan_cfg *vlan, *tmp;
7460         struct hclge_dev *hdev = vport->back;
7461
7462         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7463                 if (vlan->vlan_id == vlan_id) {
7464                         if (is_write_tbl && vlan->hd_tbl_status)
7465                                 hclge_set_vlan_filter_hw(hdev,
7466                                                          htons(ETH_P_8021Q),
7467                                                          vport->vport_id,
7468                                                          vlan_id, 0,
7469                                                          true);
7470
7471                         list_del(&vlan->node);
7472                         kfree(vlan);
7473                         break;
7474                 }
7475         }
7476 }
7477
7478 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7479 {
7480         struct hclge_vport_vlan_cfg *vlan, *tmp;
7481         struct hclge_dev *hdev = vport->back;
7482
7483         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7484                 if (vlan->hd_tbl_status)
7485                         hclge_set_vlan_filter_hw(hdev,
7486                                                  htons(ETH_P_8021Q),
7487                                                  vport->vport_id,
7488                                                  vlan->vlan_id, 0,
7489                                                  true);
7490
7491                 vlan->hd_tbl_status = false;
7492                 if (is_del_list) {
7493                         list_del(&vlan->node);
7494                         kfree(vlan);
7495                 }
7496         }
7497 }
7498
7499 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7500 {
7501         struct hclge_vport_vlan_cfg *vlan, *tmp;
7502         struct hclge_vport *vport;
7503         int i;
7504
7505         mutex_lock(&hdev->vport_cfg_mutex);
7506         for (i = 0; i < hdev->num_alloc_vport; i++) {
7507                 vport = &hdev->vport[i];
7508                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7509                         list_del(&vlan->node);
7510                         kfree(vlan);
7511                 }
7512         }
7513         mutex_unlock(&hdev->vport_cfg_mutex);
7514 }
7515
7516 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7517 {
7518         struct hclge_vport *vport = hclge_get_vport(handle);
7519
7520         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7521                 vport->rxvlan_cfg.strip_tag1_en = false;
7522                 vport->rxvlan_cfg.strip_tag2_en = enable;
7523         } else {
7524                 vport->rxvlan_cfg.strip_tag1_en = enable;
7525                 vport->rxvlan_cfg.strip_tag2_en = true;
7526         }
7527         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7528         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7529         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7530
7531         return hclge_set_vlan_rx_offload_cfg(vport);
7532 }
7533
7534 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7535                                             u16 port_base_vlan_state,
7536                                             struct hclge_vlan_info *new_info,
7537                                             struct hclge_vlan_info *old_info)
7538 {
7539         struct hclge_dev *hdev = vport->back;
7540         int ret;
7541
7542         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7543                 hclge_rm_vport_all_vlan_table(vport, false);
7544                 return hclge_set_vlan_filter_hw(hdev,
7545                                                  htons(new_info->vlan_proto),
7546                                                  vport->vport_id,
7547                                                  new_info->vlan_tag,
7548                                                  new_info->qos, false);
7549         }
7550
7551         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7552                                        vport->vport_id, old_info->vlan_tag,
7553                                        old_info->qos, true);
7554         if (ret)
7555                 return ret;
7556
7557         return hclge_add_vport_all_vlan_table(vport);
7558 }
7559
7560 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7561                                     struct hclge_vlan_info *vlan_info)
7562 {
7563         struct hnae3_handle *nic = &vport->nic;
7564         struct hclge_vlan_info *old_vlan_info;
7565         struct hclge_dev *hdev = vport->back;
7566         int ret;
7567
7568         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7569
7570         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7571         if (ret)
7572                 return ret;
7573
7574         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7575                 /* add new VLAN tag */
7576                 ret = hclge_set_vlan_filter_hw(hdev,
7577                                                htons(vlan_info->vlan_proto),
7578                                                vport->vport_id,
7579                                                vlan_info->vlan_tag,
7580                                                vlan_info->qos, false);
7581                 if (ret)
7582                         return ret;
7583
7584                 /* remove old VLAN tag */
7585                 ret = hclge_set_vlan_filter_hw(hdev,
7586                                                htons(old_vlan_info->vlan_proto),
7587                                                vport->vport_id,
7588                                                old_vlan_info->vlan_tag,
7589                                                old_vlan_info->qos, true);
7590                 if (ret)
7591                         return ret;
7592
7593                 goto update;
7594         }
7595
7596         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7597                                                old_vlan_info);
7598         if (ret)
7599                 return ret;
7600
7601         /* update state only when disable/enable port based VLAN */
7602         vport->port_base_vlan_cfg.state = state;
7603         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7604                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7605         else
7606                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7607
7608 update:
7609         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7610         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7611         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7612
7613         return 0;
7614 }
7615
7616 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7617                                           enum hnae3_port_base_vlan_state state,
7618                                           u16 vlan)
7619 {
7620         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7621                 if (!vlan)
7622                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7623                 else
7624                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7625         } else {
7626                 if (!vlan)
7627                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7628                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7629                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7630                 else
7631                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7632         }
7633 }
7634
7635 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7636                                     u16 vlan, u8 qos, __be16 proto)
7637 {
7638         struct hclge_vport *vport = hclge_get_vport(handle);
7639         struct hclge_dev *hdev = vport->back;
7640         struct hclge_vlan_info vlan_info;
7641         u16 state;
7642         int ret;
7643
7644         if (hdev->pdev->revision == 0x20)
7645                 return -EOPNOTSUPP;
7646
7647         /* qos is a 3 bits value, so can not be bigger than 7 */
7648         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7649                 return -EINVAL;
7650         if (proto != htons(ETH_P_8021Q))
7651                 return -EPROTONOSUPPORT;
7652
7653         vport = &hdev->vport[vfid];
7654         state = hclge_get_port_base_vlan_state(vport,
7655                                                vport->port_base_vlan_cfg.state,
7656                                                vlan);
7657         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7658                 return 0;
7659
7660         vlan_info.vlan_tag = vlan;
7661         vlan_info.qos = qos;
7662         vlan_info.vlan_proto = ntohs(proto);
7663
7664         /* update port based VLAN for PF */
7665         if (!vfid) {
7666                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7667                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7668                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7669
7670                 return ret;
7671         }
7672
7673         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7674                 return hclge_update_port_base_vlan_cfg(vport, state,
7675                                                        &vlan_info);
7676         } else {
7677                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7678                                                         (u8)vfid, state,
7679                                                         vlan, qos,
7680                                                         ntohs(proto));
7681                 return ret;
7682         }
7683 }
7684
7685 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7686                           u16 vlan_id, bool is_kill)
7687 {
7688         struct hclge_vport *vport = hclge_get_vport(handle);
7689         struct hclge_dev *hdev = vport->back;
7690         bool writen_to_tbl = false;
7691         int ret = 0;
7692
7693         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7694          * filter entry. In this case, we don't update VLAN filter table
7695          * when user add new VLAN or remove exist VLAN, just update the vport
7696          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7697          * table until port based VLAN disabled
7698          */
7699         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7700                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7701                                                vlan_id, 0, is_kill);
7702                 writen_to_tbl = true;
7703         }
7704
7705         if (ret)
7706                 return ret;
7707
7708         if (is_kill)
7709                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7710         else
7711                 hclge_add_vport_vlan_table(vport, vlan_id,
7712                                            writen_to_tbl);
7713
7714         return 0;
7715 }
7716
7717 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7718 {
7719         struct hclge_config_max_frm_size_cmd *req;
7720         struct hclge_desc desc;
7721
7722         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7723
7724         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7725         req->max_frm_size = cpu_to_le16(new_mps);
7726         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7727
7728         return hclge_cmd_send(&hdev->hw, &desc, 1);
7729 }
7730
7731 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7732 {
7733         struct hclge_vport *vport = hclge_get_vport(handle);
7734
7735         return hclge_set_vport_mtu(vport, new_mtu);
7736 }
7737
7738 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7739 {
7740         struct hclge_dev *hdev = vport->back;
7741         int i, max_frm_size, ret = 0;
7742
7743         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7744         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7745             max_frm_size > HCLGE_MAC_MAX_FRAME)
7746                 return -EINVAL;
7747
7748         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7749         mutex_lock(&hdev->vport_lock);
7750         /* VF's mps must fit within hdev->mps */
7751         if (vport->vport_id && max_frm_size > hdev->mps) {
7752                 mutex_unlock(&hdev->vport_lock);
7753                 return -EINVAL;
7754         } else if (vport->vport_id) {
7755                 vport->mps = max_frm_size;
7756                 mutex_unlock(&hdev->vport_lock);
7757                 return 0;
7758         }
7759
7760         /* PF's mps must be greater then VF's mps */
7761         for (i = 1; i < hdev->num_alloc_vport; i++)
7762                 if (max_frm_size < hdev->vport[i].mps) {
7763                         mutex_unlock(&hdev->vport_lock);
7764                         return -EINVAL;
7765                 }
7766
7767         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7768
7769         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7770         if (ret) {
7771                 dev_err(&hdev->pdev->dev,
7772                         "Change mtu fail, ret =%d\n", ret);
7773                 goto out;
7774         }
7775
7776         hdev->mps = max_frm_size;
7777         vport->mps = max_frm_size;
7778
7779         ret = hclge_buffer_alloc(hdev);
7780         if (ret)
7781                 dev_err(&hdev->pdev->dev,
7782                         "Allocate buffer fail, ret =%d\n", ret);
7783
7784 out:
7785         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7786         mutex_unlock(&hdev->vport_lock);
7787         return ret;
7788 }
7789
7790 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7791                                     bool enable)
7792 {
7793         struct hclge_reset_tqp_queue_cmd *req;
7794         struct hclge_desc desc;
7795         int ret;
7796
7797         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7798
7799         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7800         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7801         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7802
7803         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7804         if (ret) {
7805                 dev_err(&hdev->pdev->dev,
7806                         "Send tqp reset cmd error, status =%d\n", ret);
7807                 return ret;
7808         }
7809
7810         return 0;
7811 }
7812
7813 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7814 {
7815         struct hclge_reset_tqp_queue_cmd *req;
7816         struct hclge_desc desc;
7817         int ret;
7818
7819         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7820
7821         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7822         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7823
7824         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7825         if (ret) {
7826                 dev_err(&hdev->pdev->dev,
7827                         "Get reset status error, status =%d\n", ret);
7828                 return ret;
7829         }
7830
7831         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7832 }
7833
7834 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7835 {
7836         struct hnae3_queue *queue;
7837         struct hclge_tqp *tqp;
7838
7839         queue = handle->kinfo.tqp[queue_id];
7840         tqp = container_of(queue, struct hclge_tqp, q);
7841
7842         return tqp->index;
7843 }
7844
7845 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7846 {
7847         struct hclge_vport *vport = hclge_get_vport(handle);
7848         struct hclge_dev *hdev = vport->back;
7849         int reset_try_times = 0;
7850         int reset_status;
7851         u16 queue_gid;
7852         int ret = 0;
7853
7854         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7855
7856         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7857         if (ret) {
7858                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7859                 return ret;
7860         }
7861
7862         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7863         if (ret) {
7864                 dev_err(&hdev->pdev->dev,
7865                         "Send reset tqp cmd fail, ret = %d\n", ret);
7866                 return ret;
7867         }
7868
7869         reset_try_times = 0;
7870         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7871                 /* Wait for tqp hw reset */
7872                 msleep(20);
7873                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7874                 if (reset_status)
7875                         break;
7876         }
7877
7878         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7879                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7880                 return ret;
7881         }
7882
7883         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7884         if (ret)
7885                 dev_err(&hdev->pdev->dev,
7886                         "Deassert the soft reset fail, ret = %d\n", ret);
7887
7888         return ret;
7889 }
7890
7891 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7892 {
7893         struct hclge_dev *hdev = vport->back;
7894         int reset_try_times = 0;
7895         int reset_status;
7896         u16 queue_gid;
7897         int ret;
7898
7899         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7900
7901         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7902         if (ret) {
7903                 dev_warn(&hdev->pdev->dev,
7904                          "Send reset tqp cmd fail, ret = %d\n", ret);
7905                 return;
7906         }
7907
7908         reset_try_times = 0;
7909         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7910                 /* Wait for tqp hw reset */
7911                 msleep(20);
7912                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7913                 if (reset_status)
7914                         break;
7915         }
7916
7917         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7918                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7919                 return;
7920         }
7921
7922         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7923         if (ret)
7924                 dev_warn(&hdev->pdev->dev,
7925                          "Deassert the soft reset fail, ret = %d\n", ret);
7926 }
7927
7928 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7929 {
7930         struct hclge_vport *vport = hclge_get_vport(handle);
7931         struct hclge_dev *hdev = vport->back;
7932
7933         return hdev->fw_version;
7934 }
7935
7936 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7937 {
7938         struct phy_device *phydev = hdev->hw.mac.phydev;
7939
7940         if (!phydev)
7941                 return;
7942
7943         phy_set_asym_pause(phydev, rx_en, tx_en);
7944 }
7945
7946 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7947 {
7948         int ret;
7949
7950         if (rx_en && tx_en)
7951                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7952         else if (rx_en && !tx_en)
7953                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7954         else if (!rx_en && tx_en)
7955                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7956         else
7957                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7958
7959         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7960                 return 0;
7961
7962         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7963         if (ret) {
7964                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7965                         ret);
7966                 return ret;
7967         }
7968
7969         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7970
7971         return 0;
7972 }
7973
7974 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7975 {
7976         struct phy_device *phydev = hdev->hw.mac.phydev;
7977         u16 remote_advertising = 0;
7978         u16 local_advertising = 0;
7979         u32 rx_pause, tx_pause;
7980         u8 flowctl;
7981
7982         if (!phydev->link || !phydev->autoneg)
7983                 return 0;
7984
7985         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7986
7987         if (phydev->pause)
7988                 remote_advertising = LPA_PAUSE_CAP;
7989
7990         if (phydev->asym_pause)
7991                 remote_advertising |= LPA_PAUSE_ASYM;
7992
7993         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7994                                            remote_advertising);
7995         tx_pause = flowctl & FLOW_CTRL_TX;
7996         rx_pause = flowctl & FLOW_CTRL_RX;
7997
7998         if (phydev->duplex == HCLGE_MAC_HALF) {
7999                 tx_pause = 0;
8000                 rx_pause = 0;
8001         }
8002
8003         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8004 }
8005
8006 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8007                                  u32 *rx_en, u32 *tx_en)
8008 {
8009         struct hclge_vport *vport = hclge_get_vport(handle);
8010         struct hclge_dev *hdev = vport->back;
8011
8012         *auto_neg = hclge_get_autoneg(handle);
8013
8014         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8015                 *rx_en = 0;
8016                 *tx_en = 0;
8017                 return;
8018         }
8019
8020         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8021                 *rx_en = 1;
8022                 *tx_en = 0;
8023         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8024                 *tx_en = 1;
8025                 *rx_en = 0;
8026         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8027                 *rx_en = 1;
8028                 *tx_en = 1;
8029         } else {
8030                 *rx_en = 0;
8031                 *tx_en = 0;
8032         }
8033 }
8034
8035 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8036                                 u32 rx_en, u32 tx_en)
8037 {
8038         struct hclge_vport *vport = hclge_get_vport(handle);
8039         struct hclge_dev *hdev = vport->back;
8040         struct phy_device *phydev = hdev->hw.mac.phydev;
8041         u32 fc_autoneg;
8042
8043         fc_autoneg = hclge_get_autoneg(handle);
8044         if (auto_neg != fc_autoneg) {
8045                 dev_info(&hdev->pdev->dev,
8046                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8047                 return -EOPNOTSUPP;
8048         }
8049
8050         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8051                 dev_info(&hdev->pdev->dev,
8052                          "Priority flow control enabled. Cannot set link flow control.\n");
8053                 return -EOPNOTSUPP;
8054         }
8055
8056         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8057
8058         if (!fc_autoneg)
8059                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8060
8061         if (phydev)
8062                 return phy_start_aneg(phydev);
8063
8064         if (hdev->pdev->revision == 0x20)
8065                 return -EOPNOTSUPP;
8066
8067         return hclge_restart_autoneg(handle);
8068 }
8069
8070 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8071                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8072 {
8073         struct hclge_vport *vport = hclge_get_vport(handle);
8074         struct hclge_dev *hdev = vport->back;
8075
8076         if (speed)
8077                 *speed = hdev->hw.mac.speed;
8078         if (duplex)
8079                 *duplex = hdev->hw.mac.duplex;
8080         if (auto_neg)
8081                 *auto_neg = hdev->hw.mac.autoneg;
8082 }
8083
8084 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8085                                  u8 *module_type)
8086 {
8087         struct hclge_vport *vport = hclge_get_vport(handle);
8088         struct hclge_dev *hdev = vport->back;
8089
8090         if (media_type)
8091                 *media_type = hdev->hw.mac.media_type;
8092
8093         if (module_type)
8094                 *module_type = hdev->hw.mac.module_type;
8095 }
8096
8097 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8098                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8099 {
8100         struct hclge_vport *vport = hclge_get_vport(handle);
8101         struct hclge_dev *hdev = vport->back;
8102         struct phy_device *phydev = hdev->hw.mac.phydev;
8103         int mdix_ctrl, mdix, retval, is_resolved;
8104
8105         if (!phydev) {
8106                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8107                 *tp_mdix = ETH_TP_MDI_INVALID;
8108                 return;
8109         }
8110
8111         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8112
8113         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8114         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8115                                     HCLGE_PHY_MDIX_CTRL_S);
8116
8117         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8118         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8119         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8120
8121         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8122
8123         switch (mdix_ctrl) {
8124         case 0x0:
8125                 *tp_mdix_ctrl = ETH_TP_MDI;
8126                 break;
8127         case 0x1:
8128                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8129                 break;
8130         case 0x3:
8131                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8132                 break;
8133         default:
8134                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8135                 break;
8136         }
8137
8138         if (!is_resolved)
8139                 *tp_mdix = ETH_TP_MDI_INVALID;
8140         else if (mdix)
8141                 *tp_mdix = ETH_TP_MDI_X;
8142         else
8143                 *tp_mdix = ETH_TP_MDI;
8144 }
8145
8146 static void hclge_info_show(struct hclge_dev *hdev)
8147 {
8148         struct device *dev = &hdev->pdev->dev;
8149
8150         dev_info(dev, "PF info begin:\n");
8151
8152         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8153         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8154         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8155         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8156         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8157         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8158         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8159         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8160         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8161         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8162         dev_info(dev, "This is %s PF\n",
8163                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8164         dev_info(dev, "DCB %s\n",
8165                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8166         dev_info(dev, "MQPRIO %s\n",
8167                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8168
8169         dev_info(dev, "PF info end.\n");
8170 }
8171
8172 static int hclge_init_client_instance(struct hnae3_client *client,
8173                                       struct hnae3_ae_dev *ae_dev)
8174 {
8175         struct hclge_dev *hdev = ae_dev->priv;
8176         struct hclge_vport *vport;
8177         int i, ret;
8178
8179         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8180                 vport = &hdev->vport[i];
8181
8182                 switch (client->type) {
8183                 case HNAE3_CLIENT_KNIC:
8184
8185                         hdev->nic_client = client;
8186                         vport->nic.client = client;
8187                         ret = client->ops->init_instance(&vport->nic);
8188                         if (ret)
8189                                 goto clear_nic;
8190
8191                         hnae3_set_client_init_flag(client, ae_dev, 1);
8192                         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8193
8194                         if (netif_msg_drv(&hdev->vport->nic))
8195                                 hclge_info_show(hdev);
8196
8197                         if (hdev->roce_client &&
8198                             hnae3_dev_roce_supported(hdev)) {
8199                                 struct hnae3_client *rc = hdev->roce_client;
8200
8201                                 ret = hclge_init_roce_base_info(vport);
8202                                 if (ret)
8203                                         goto clear_roce;
8204
8205                                 ret = rc->ops->init_instance(&vport->roce);
8206                                 if (ret)
8207                                         goto clear_roce;
8208
8209                                 set_bit(HCLGE_STATE_ROCE_REGISTERED,
8210                                         &hdev->state);
8211                                 hnae3_set_client_init_flag(hdev->roce_client,
8212                                                            ae_dev, 1);
8213                         }
8214
8215                         break;
8216                 case HNAE3_CLIENT_UNIC:
8217                         hdev->nic_client = client;
8218                         vport->nic.client = client;
8219
8220                         ret = client->ops->init_instance(&vport->nic);
8221                         if (ret)
8222                                 goto clear_nic;
8223
8224                         hnae3_set_client_init_flag(client, ae_dev, 1);
8225
8226                         break;
8227                 case HNAE3_CLIENT_ROCE:
8228                         if (hnae3_dev_roce_supported(hdev)) {
8229                                 hdev->roce_client = client;
8230                                 vport->roce.client = client;
8231                         }
8232
8233                         if (hdev->roce_client && hdev->nic_client) {
8234                                 ret = hclge_init_roce_base_info(vport);
8235                                 if (ret)
8236                                         goto clear_roce;
8237
8238                                 ret = client->ops->init_instance(&vport->roce);
8239                                 if (ret)
8240                                         goto clear_roce;
8241
8242                                 set_bit(HCLGE_STATE_ROCE_REGISTERED,
8243                                         &hdev->state);
8244                                 hnae3_set_client_init_flag(client, ae_dev, 1);
8245                         }
8246
8247                         break;
8248                 default:
8249                         return -EINVAL;
8250                 }
8251         }
8252
8253         return 0;
8254
8255 clear_nic:
8256         hdev->nic_client = NULL;
8257         vport->nic.client = NULL;
8258         return ret;
8259 clear_roce:
8260         hdev->roce_client = NULL;
8261         vport->roce.client = NULL;
8262         return ret;
8263 }
8264
8265 static void hclge_uninit_client_instance(struct hnae3_client *client,
8266                                          struct hnae3_ae_dev *ae_dev)
8267 {
8268         struct hclge_dev *hdev = ae_dev->priv;
8269         struct hclge_vport *vport;
8270         int i;
8271
8272         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8273                 vport = &hdev->vport[i];
8274                 if (hdev->roce_client) {
8275                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8276                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8277                                                                 0);
8278                         hdev->roce_client = NULL;
8279                         vport->roce.client = NULL;
8280                 }
8281                 if (client->type == HNAE3_CLIENT_ROCE)
8282                         return;
8283                 if (hdev->nic_client && client->ops->uninit_instance) {
8284                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8285                         client->ops->uninit_instance(&vport->nic, 0);
8286                         hdev->nic_client = NULL;
8287                         vport->nic.client = NULL;
8288                 }
8289         }
8290 }
8291
8292 static int hclge_pci_init(struct hclge_dev *hdev)
8293 {
8294         struct pci_dev *pdev = hdev->pdev;
8295         struct hclge_hw *hw;
8296         int ret;
8297
8298         ret = pci_enable_device(pdev);
8299         if (ret) {
8300                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8301                 return ret;
8302         }
8303
8304         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8305         if (ret) {
8306                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8307                 if (ret) {
8308                         dev_err(&pdev->dev,
8309                                 "can't set consistent PCI DMA");
8310                         goto err_disable_device;
8311                 }
8312                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8313         }
8314
8315         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8316         if (ret) {
8317                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8318                 goto err_disable_device;
8319         }
8320
8321         pci_set_master(pdev);
8322         hw = &hdev->hw;
8323         hw->io_base = pcim_iomap(pdev, 2, 0);
8324         if (!hw->io_base) {
8325                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8326                 ret = -ENOMEM;
8327                 goto err_clr_master;
8328         }
8329
8330         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8331
8332         return 0;
8333 err_clr_master:
8334         pci_clear_master(pdev);
8335         pci_release_regions(pdev);
8336 err_disable_device:
8337         pci_disable_device(pdev);
8338
8339         return ret;
8340 }
8341
8342 static void hclge_pci_uninit(struct hclge_dev *hdev)
8343 {
8344         struct pci_dev *pdev = hdev->pdev;
8345
8346         pcim_iounmap(pdev, hdev->hw.io_base);
8347         pci_free_irq_vectors(pdev);
8348         pci_clear_master(pdev);
8349         pci_release_mem_regions(pdev);
8350         pci_disable_device(pdev);
8351 }
8352
8353 static void hclge_state_init(struct hclge_dev *hdev)
8354 {
8355         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8356         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8357         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8358         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8359         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8360         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8361 }
8362
8363 static void hclge_state_uninit(struct hclge_dev *hdev)
8364 {
8365         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8366
8367         if (hdev->service_timer.function)
8368                 del_timer_sync(&hdev->service_timer);
8369         if (hdev->reset_timer.function)
8370                 del_timer_sync(&hdev->reset_timer);
8371         if (hdev->service_task.func)
8372                 cancel_work_sync(&hdev->service_task);
8373         if (hdev->rst_service_task.func)
8374                 cancel_work_sync(&hdev->rst_service_task);
8375         if (hdev->mbx_service_task.func)
8376                 cancel_work_sync(&hdev->mbx_service_task);
8377 }
8378
8379 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8380 {
8381 #define HCLGE_FLR_WAIT_MS       100
8382 #define HCLGE_FLR_WAIT_CNT      50
8383         struct hclge_dev *hdev = ae_dev->priv;
8384         int cnt = 0;
8385
8386         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8387         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8388         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8389         hclge_reset_event(hdev->pdev, NULL);
8390
8391         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8392                cnt++ < HCLGE_FLR_WAIT_CNT)
8393                 msleep(HCLGE_FLR_WAIT_MS);
8394
8395         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8396                 dev_err(&hdev->pdev->dev,
8397                         "flr wait down timeout: %d\n", cnt);
8398 }
8399
8400 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8401 {
8402         struct hclge_dev *hdev = ae_dev->priv;
8403
8404         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8405 }
8406
8407 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8408 {
8409         struct pci_dev *pdev = ae_dev->pdev;
8410         struct hclge_dev *hdev;
8411         int ret;
8412
8413         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8414         if (!hdev) {
8415                 ret = -ENOMEM;
8416                 goto out;
8417         }
8418
8419         hdev->pdev = pdev;
8420         hdev->ae_dev = ae_dev;
8421         hdev->reset_type = HNAE3_NONE_RESET;
8422         hdev->reset_level = HNAE3_FUNC_RESET;
8423         ae_dev->priv = hdev;
8424         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8425
8426         mutex_init(&hdev->vport_lock);
8427         mutex_init(&hdev->vport_cfg_mutex);
8428         spin_lock_init(&hdev->fd_rule_lock);
8429
8430         ret = hclge_pci_init(hdev);
8431         if (ret) {
8432                 dev_err(&pdev->dev, "PCI init failed\n");
8433                 goto out;
8434         }
8435
8436         /* Firmware command queue initialize */
8437         ret = hclge_cmd_queue_init(hdev);
8438         if (ret) {
8439                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8440                 goto err_pci_uninit;
8441         }
8442
8443         /* Firmware command initialize */
8444         ret = hclge_cmd_init(hdev);
8445         if (ret)
8446                 goto err_cmd_uninit;
8447
8448         ret = hclge_get_cap(hdev);
8449         if (ret) {
8450                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8451                         ret);
8452                 goto err_cmd_uninit;
8453         }
8454
8455         ret = hclge_configure(hdev);
8456         if (ret) {
8457                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8458                 goto err_cmd_uninit;
8459         }
8460
8461         ret = hclge_init_msi(hdev);
8462         if (ret) {
8463                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8464                 goto err_cmd_uninit;
8465         }
8466
8467         ret = hclge_misc_irq_init(hdev);
8468         if (ret) {
8469                 dev_err(&pdev->dev,
8470                         "Misc IRQ(vector0) init error, ret = %d.\n",
8471                         ret);
8472                 goto err_msi_uninit;
8473         }
8474
8475         ret = hclge_alloc_tqps(hdev);
8476         if (ret) {
8477                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8478                 goto err_msi_irq_uninit;
8479         }
8480
8481         ret = hclge_alloc_vport(hdev);
8482         if (ret) {
8483                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8484                 goto err_msi_irq_uninit;
8485         }
8486
8487         ret = hclge_map_tqp(hdev);
8488         if (ret) {
8489                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8490                 goto err_msi_irq_uninit;
8491         }
8492
8493         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8494                 ret = hclge_mac_mdio_config(hdev);
8495                 if (ret) {
8496                         dev_err(&hdev->pdev->dev,
8497                                 "mdio config fail ret=%d\n", ret);
8498                         goto err_msi_irq_uninit;
8499                 }
8500         }
8501
8502         ret = hclge_init_umv_space(hdev);
8503         if (ret) {
8504                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8505                 goto err_mdiobus_unreg;
8506         }
8507
8508         ret = hclge_mac_init(hdev);
8509         if (ret) {
8510                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8511                 goto err_mdiobus_unreg;
8512         }
8513
8514         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8515         if (ret) {
8516                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8517                 goto err_mdiobus_unreg;
8518         }
8519
8520         ret = hclge_config_gro(hdev, true);
8521         if (ret)
8522                 goto err_mdiobus_unreg;
8523
8524         ret = hclge_init_vlan_config(hdev);
8525         if (ret) {
8526                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8527                 goto err_mdiobus_unreg;
8528         }
8529
8530         ret = hclge_tm_schd_init(hdev);
8531         if (ret) {
8532                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8533                 goto err_mdiobus_unreg;
8534         }
8535
8536         hclge_rss_init_cfg(hdev);
8537         ret = hclge_rss_init_hw(hdev);
8538         if (ret) {
8539                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8540                 goto err_mdiobus_unreg;
8541         }
8542
8543         ret = init_mgr_tbl(hdev);
8544         if (ret) {
8545                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8546                 goto err_mdiobus_unreg;
8547         }
8548
8549         ret = hclge_init_fd_config(hdev);
8550         if (ret) {
8551                 dev_err(&pdev->dev,
8552                         "fd table init fail, ret=%d\n", ret);
8553                 goto err_mdiobus_unreg;
8554         }
8555
8556         ret = hclge_hw_error_set_state(hdev, true);
8557         if (ret) {
8558                 dev_err(&pdev->dev,
8559                         "fail(%d) to enable hw error interrupts\n", ret);
8560                 goto err_mdiobus_unreg;
8561         }
8562
8563         INIT_KFIFO(hdev->mac_tnl_log);
8564
8565         hclge_dcb_ops_set(hdev);
8566
8567         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8568         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8569         INIT_WORK(&hdev->service_task, hclge_service_task);
8570         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8571         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8572
8573         hclge_clear_all_event_cause(hdev);
8574
8575         /* Enable MISC vector(vector0) */
8576         hclge_enable_vector(&hdev->misc_vector, true);
8577
8578         hclge_state_init(hdev);
8579         hdev->last_reset_time = jiffies;
8580
8581         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8582         return 0;
8583
8584 err_mdiobus_unreg:
8585         if (hdev->hw.mac.phydev)
8586                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8587 err_msi_irq_uninit:
8588         hclge_misc_irq_uninit(hdev);
8589 err_msi_uninit:
8590         pci_free_irq_vectors(pdev);
8591 err_cmd_uninit:
8592         hclge_cmd_uninit(hdev);
8593 err_pci_uninit:
8594         pcim_iounmap(pdev, hdev->hw.io_base);
8595         pci_clear_master(pdev);
8596         pci_release_regions(pdev);
8597         pci_disable_device(pdev);
8598 out:
8599         return ret;
8600 }
8601
8602 static void hclge_stats_clear(struct hclge_dev *hdev)
8603 {
8604         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8605 }
8606
8607 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8608 {
8609         struct hclge_vport *vport = hdev->vport;
8610         int i;
8611
8612         for (i = 0; i < hdev->num_alloc_vport; i++) {
8613                 hclge_vport_stop(vport);
8614                 vport++;
8615         }
8616 }
8617
8618 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8619 {
8620         struct hclge_dev *hdev = ae_dev->priv;
8621         struct pci_dev *pdev = ae_dev->pdev;
8622         int ret;
8623
8624         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8625
8626         hclge_stats_clear(hdev);
8627         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8628
8629         ret = hclge_cmd_init(hdev);
8630         if (ret) {
8631                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8632                 return ret;
8633         }
8634
8635         ret = hclge_map_tqp(hdev);
8636         if (ret) {
8637                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8638                 return ret;
8639         }
8640
8641         hclge_reset_umv_space(hdev);
8642
8643         ret = hclge_mac_init(hdev);
8644         if (ret) {
8645                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8646                 return ret;
8647         }
8648
8649         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8650         if (ret) {
8651                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8652                 return ret;
8653         }
8654
8655         ret = hclge_config_gro(hdev, true);
8656         if (ret)
8657                 return ret;
8658
8659         ret = hclge_init_vlan_config(hdev);
8660         if (ret) {
8661                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8662                 return ret;
8663         }
8664
8665         ret = hclge_tm_init_hw(hdev, true);
8666         if (ret) {
8667                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8668                 return ret;
8669         }
8670
8671         ret = hclge_rss_init_hw(hdev);
8672         if (ret) {
8673                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8674                 return ret;
8675         }
8676
8677         ret = hclge_init_fd_config(hdev);
8678         if (ret) {
8679                 dev_err(&pdev->dev,
8680                         "fd table init fail, ret=%d\n", ret);
8681                 return ret;
8682         }
8683
8684         /* Re-enable the hw error interrupts because
8685          * the interrupts get disabled on core/global reset.
8686          */
8687         ret = hclge_hw_error_set_state(hdev, true);
8688         if (ret) {
8689                 dev_err(&pdev->dev,
8690                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8691                 return ret;
8692         }
8693
8694         hclge_reset_vport_state(hdev);
8695
8696         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8697                  HCLGE_DRIVER_NAME);
8698
8699         return 0;
8700 }
8701
8702 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8703 {
8704         struct hclge_dev *hdev = ae_dev->priv;
8705         struct hclge_mac *mac = &hdev->hw.mac;
8706
8707         hclge_state_uninit(hdev);
8708
8709         if (mac->phydev)
8710                 mdiobus_unregister(mac->mdio_bus);
8711
8712         hclge_uninit_umv_space(hdev);
8713
8714         /* Disable MISC vector(vector0) */
8715         hclge_enable_vector(&hdev->misc_vector, false);
8716         synchronize_irq(hdev->misc_vector.vector_irq);
8717
8718         hclge_config_mac_tnl_int(hdev, false);
8719         hclge_hw_error_set_state(hdev, false);
8720         hclge_cmd_uninit(hdev);
8721         hclge_misc_irq_uninit(hdev);
8722         hclge_pci_uninit(hdev);
8723         mutex_destroy(&hdev->vport_lock);
8724         hclge_uninit_vport_mac_table(hdev);
8725         hclge_uninit_vport_vlan_table(hdev);
8726         mutex_destroy(&hdev->vport_cfg_mutex);
8727         ae_dev->priv = NULL;
8728 }
8729
8730 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8731 {
8732         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8733         struct hclge_vport *vport = hclge_get_vport(handle);
8734         struct hclge_dev *hdev = vport->back;
8735
8736         return min_t(u32, hdev->rss_size_max,
8737                      vport->alloc_tqps / kinfo->num_tc);
8738 }
8739
8740 static void hclge_get_channels(struct hnae3_handle *handle,
8741                                struct ethtool_channels *ch)
8742 {
8743         ch->max_combined = hclge_get_max_channels(handle);
8744         ch->other_count = 1;
8745         ch->max_other = 1;
8746         ch->combined_count = handle->kinfo.rss_size;
8747 }
8748
8749 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8750                                         u16 *alloc_tqps, u16 *max_rss_size)
8751 {
8752         struct hclge_vport *vport = hclge_get_vport(handle);
8753         struct hclge_dev *hdev = vport->back;
8754
8755         *alloc_tqps = vport->alloc_tqps;
8756         *max_rss_size = hdev->rss_size_max;
8757 }
8758
8759 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8760                               bool rxfh_configured)
8761 {
8762         struct hclge_vport *vport = hclge_get_vport(handle);
8763         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8764         struct hclge_dev *hdev = vport->back;
8765         int cur_rss_size = kinfo->rss_size;
8766         int cur_tqps = kinfo->num_tqps;
8767         u16 tc_offset[HCLGE_MAX_TC_NUM];
8768         u16 tc_valid[HCLGE_MAX_TC_NUM];
8769         u16 tc_size[HCLGE_MAX_TC_NUM];
8770         u16 roundup_size;
8771         u32 *rss_indir;
8772         int ret, i;
8773
8774         kinfo->req_rss_size = new_tqps_num;
8775
8776         ret = hclge_tm_vport_map_update(hdev);
8777         if (ret) {
8778                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8779                 return ret;
8780         }
8781
8782         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8783         roundup_size = ilog2(roundup_size);
8784         /* Set the RSS TC mode according to the new RSS size */
8785         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8786                 tc_valid[i] = 0;
8787
8788                 if (!(hdev->hw_tc_map & BIT(i)))
8789                         continue;
8790
8791                 tc_valid[i] = 1;
8792                 tc_size[i] = roundup_size;
8793                 tc_offset[i] = kinfo->rss_size * i;
8794         }
8795         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8796         if (ret)
8797                 return ret;
8798
8799         /* RSS indirection table has been configuared by user */
8800         if (rxfh_configured)
8801                 goto out;
8802
8803         /* Reinitializes the rss indirect table according to the new RSS size */
8804         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8805         if (!rss_indir)
8806                 return -ENOMEM;
8807
8808         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8809                 rss_indir[i] = i % kinfo->rss_size;
8810
8811         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8812         if (ret)
8813                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8814                         ret);
8815
8816         kfree(rss_indir);
8817
8818 out:
8819         if (!ret)
8820                 dev_info(&hdev->pdev->dev,
8821                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8822                          cur_rss_size, kinfo->rss_size,
8823                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8824
8825         return ret;
8826 }
8827
8828 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8829                               u32 *regs_num_64_bit)
8830 {
8831         struct hclge_desc desc;
8832         u32 total_num;
8833         int ret;
8834
8835         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8836         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8837         if (ret) {
8838                 dev_err(&hdev->pdev->dev,
8839                         "Query register number cmd failed, ret = %d.\n", ret);
8840                 return ret;
8841         }
8842
8843         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8844         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8845
8846         total_num = *regs_num_32_bit + *regs_num_64_bit;
8847         if (!total_num)
8848                 return -EINVAL;
8849
8850         return 0;
8851 }
8852
8853 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8854                                  void *data)
8855 {
8856 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8857
8858         struct hclge_desc *desc;
8859         u32 *reg_val = data;
8860         __le32 *desc_data;
8861         int cmd_num;
8862         int i, k, n;
8863         int ret;
8864
8865         if (regs_num == 0)
8866                 return 0;
8867
8868         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8869         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8870         if (!desc)
8871                 return -ENOMEM;
8872
8873         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8874         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8875         if (ret) {
8876                 dev_err(&hdev->pdev->dev,
8877                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8878                 kfree(desc);
8879                 return ret;
8880         }
8881
8882         for (i = 0; i < cmd_num; i++) {
8883                 if (i == 0) {
8884                         desc_data = (__le32 *)(&desc[i].data[0]);
8885                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8886                 } else {
8887                         desc_data = (__le32 *)(&desc[i]);
8888                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8889                 }
8890                 for (k = 0; k < n; k++) {
8891                         *reg_val++ = le32_to_cpu(*desc_data++);
8892
8893                         regs_num--;
8894                         if (!regs_num)
8895                                 break;
8896                 }
8897         }
8898
8899         kfree(desc);
8900         return 0;
8901 }
8902
8903 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8904                                  void *data)
8905 {
8906 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8907
8908         struct hclge_desc *desc;
8909         u64 *reg_val = data;
8910         __le64 *desc_data;
8911         int cmd_num;
8912         int i, k, n;
8913         int ret;
8914
8915         if (regs_num == 0)
8916                 return 0;
8917
8918         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8919         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8920         if (!desc)
8921                 return -ENOMEM;
8922
8923         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8924         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8925         if (ret) {
8926                 dev_err(&hdev->pdev->dev,
8927                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8928                 kfree(desc);
8929                 return ret;
8930         }
8931
8932         for (i = 0; i < cmd_num; i++) {
8933                 if (i == 0) {
8934                         desc_data = (__le64 *)(&desc[i].data[0]);
8935                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8936                 } else {
8937                         desc_data = (__le64 *)(&desc[i]);
8938                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8939                 }
8940                 for (k = 0; k < n; k++) {
8941                         *reg_val++ = le64_to_cpu(*desc_data++);
8942
8943                         regs_num--;
8944                         if (!regs_num)
8945                                 break;
8946                 }
8947         }
8948
8949         kfree(desc);
8950         return 0;
8951 }
8952
8953 #define MAX_SEPARATE_NUM        4
8954 #define SEPARATOR_VALUE         0xFFFFFFFF
8955 #define REG_NUM_PER_LINE        4
8956 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8957
8958 static int hclge_get_regs_len(struct hnae3_handle *handle)
8959 {
8960         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8961         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8962         struct hclge_vport *vport = hclge_get_vport(handle);
8963         struct hclge_dev *hdev = vport->back;
8964         u32 regs_num_32_bit, regs_num_64_bit;
8965         int ret;
8966
8967         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8968         if (ret) {
8969                 dev_err(&hdev->pdev->dev,
8970                         "Get register number failed, ret = %d.\n", ret);
8971                 return -EOPNOTSUPP;
8972         }
8973
8974         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8975         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8976         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8977         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8978
8979         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8980                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8981                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8982 }
8983
8984 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8985                            void *data)
8986 {
8987         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8988         struct hclge_vport *vport = hclge_get_vport(handle);
8989         struct hclge_dev *hdev = vport->back;
8990         u32 regs_num_32_bit, regs_num_64_bit;
8991         int i, j, reg_um, separator_num;
8992         u32 *reg = data;
8993         int ret;
8994
8995         *version = hdev->fw_version;
8996
8997         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8998         if (ret) {
8999                 dev_err(&hdev->pdev->dev,
9000                         "Get register number failed, ret = %d.\n", ret);
9001                 return;
9002         }
9003
9004         /* fetching per-PF registers valus from PF PCIe register space */
9005         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9006         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9007         for (i = 0; i < reg_um; i++)
9008                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9009         for (i = 0; i < separator_num; i++)
9010                 *reg++ = SEPARATOR_VALUE;
9011
9012         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9013         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9014         for (i = 0; i < reg_um; i++)
9015                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9016         for (i = 0; i < separator_num; i++)
9017                 *reg++ = SEPARATOR_VALUE;
9018
9019         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9020         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9021         for (j = 0; j < kinfo->num_tqps; j++) {
9022                 for (i = 0; i < reg_um; i++)
9023                         *reg++ = hclge_read_dev(&hdev->hw,
9024                                                 ring_reg_addr_list[i] +
9025                                                 0x200 * j);
9026                 for (i = 0; i < separator_num; i++)
9027                         *reg++ = SEPARATOR_VALUE;
9028         }
9029
9030         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9031         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9032         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9033                 for (i = 0; i < reg_um; i++)
9034                         *reg++ = hclge_read_dev(&hdev->hw,
9035                                                 tqp_intr_reg_addr_list[i] +
9036                                                 4 * j);
9037                 for (i = 0; i < separator_num; i++)
9038                         *reg++ = SEPARATOR_VALUE;
9039         }
9040
9041         /* fetching PF common registers values from firmware */
9042         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9043         if (ret) {
9044                 dev_err(&hdev->pdev->dev,
9045                         "Get 32 bit register failed, ret = %d.\n", ret);
9046                 return;
9047         }
9048
9049         reg += regs_num_32_bit;
9050         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9051         if (ret)
9052                 dev_err(&hdev->pdev->dev,
9053                         "Get 64 bit register failed, ret = %d.\n", ret);
9054 }
9055
9056 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9057 {
9058         struct hclge_set_led_state_cmd *req;
9059         struct hclge_desc desc;
9060         int ret;
9061
9062         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9063
9064         req = (struct hclge_set_led_state_cmd *)desc.data;
9065         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9066                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9067
9068         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9069         if (ret)
9070                 dev_err(&hdev->pdev->dev,
9071                         "Send set led state cmd error, ret =%d\n", ret);
9072
9073         return ret;
9074 }
9075
9076 enum hclge_led_status {
9077         HCLGE_LED_OFF,
9078         HCLGE_LED_ON,
9079         HCLGE_LED_NO_CHANGE = 0xFF,
9080 };
9081
9082 static int hclge_set_led_id(struct hnae3_handle *handle,
9083                             enum ethtool_phys_id_state status)
9084 {
9085         struct hclge_vport *vport = hclge_get_vport(handle);
9086         struct hclge_dev *hdev = vport->back;
9087
9088         switch (status) {
9089         case ETHTOOL_ID_ACTIVE:
9090                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9091         case ETHTOOL_ID_INACTIVE:
9092                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9093         default:
9094                 return -EINVAL;
9095         }
9096 }
9097
9098 static void hclge_get_link_mode(struct hnae3_handle *handle,
9099                                 unsigned long *supported,
9100                                 unsigned long *advertising)
9101 {
9102         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9103         struct hclge_vport *vport = hclge_get_vport(handle);
9104         struct hclge_dev *hdev = vport->back;
9105         unsigned int idx = 0;
9106
9107         for (; idx < size; idx++) {
9108                 supported[idx] = hdev->hw.mac.supported[idx];
9109                 advertising[idx] = hdev->hw.mac.advertising[idx];
9110         }
9111 }
9112
9113 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9114 {
9115         struct hclge_vport *vport = hclge_get_vport(handle);
9116         struct hclge_dev *hdev = vport->back;
9117
9118         return hclge_config_gro(hdev, enable);
9119 }
9120
9121 static const struct hnae3_ae_ops hclge_ops = {
9122         .init_ae_dev = hclge_init_ae_dev,
9123         .uninit_ae_dev = hclge_uninit_ae_dev,
9124         .flr_prepare = hclge_flr_prepare,
9125         .flr_done = hclge_flr_done,
9126         .init_client_instance = hclge_init_client_instance,
9127         .uninit_client_instance = hclge_uninit_client_instance,
9128         .map_ring_to_vector = hclge_map_ring_to_vector,
9129         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9130         .get_vector = hclge_get_vector,
9131         .put_vector = hclge_put_vector,
9132         .set_promisc_mode = hclge_set_promisc_mode,
9133         .set_loopback = hclge_set_loopback,
9134         .start = hclge_ae_start,
9135         .stop = hclge_ae_stop,
9136         .client_start = hclge_client_start,
9137         .client_stop = hclge_client_stop,
9138         .get_status = hclge_get_status,
9139         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9140         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9141         .get_media_type = hclge_get_media_type,
9142         .check_port_speed = hclge_check_port_speed,
9143         .get_fec = hclge_get_fec,
9144         .set_fec = hclge_set_fec,
9145         .get_rss_key_size = hclge_get_rss_key_size,
9146         .get_rss_indir_size = hclge_get_rss_indir_size,
9147         .get_rss = hclge_get_rss,
9148         .set_rss = hclge_set_rss,
9149         .set_rss_tuple = hclge_set_rss_tuple,
9150         .get_rss_tuple = hclge_get_rss_tuple,
9151         .get_tc_size = hclge_get_tc_size,
9152         .get_mac_addr = hclge_get_mac_addr,
9153         .set_mac_addr = hclge_set_mac_addr,
9154         .do_ioctl = hclge_do_ioctl,
9155         .add_uc_addr = hclge_add_uc_addr,
9156         .rm_uc_addr = hclge_rm_uc_addr,
9157         .add_mc_addr = hclge_add_mc_addr,
9158         .rm_mc_addr = hclge_rm_mc_addr,
9159         .set_autoneg = hclge_set_autoneg,
9160         .get_autoneg = hclge_get_autoneg,
9161         .restart_autoneg = hclge_restart_autoneg,
9162         .get_pauseparam = hclge_get_pauseparam,
9163         .set_pauseparam = hclge_set_pauseparam,
9164         .set_mtu = hclge_set_mtu,
9165         .reset_queue = hclge_reset_tqp,
9166         .get_stats = hclge_get_stats,
9167         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9168         .update_stats = hclge_update_stats,
9169         .get_strings = hclge_get_strings,
9170         .get_sset_count = hclge_get_sset_count,
9171         .get_fw_version = hclge_get_fw_version,
9172         .get_mdix_mode = hclge_get_mdix_mode,
9173         .enable_vlan_filter = hclge_enable_vlan_filter,
9174         .set_vlan_filter = hclge_set_vlan_filter,
9175         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9176         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9177         .reset_event = hclge_reset_event,
9178         .set_default_reset_request = hclge_set_def_reset_request,
9179         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9180         .set_channels = hclge_set_channels,
9181         .get_channels = hclge_get_channels,
9182         .get_regs_len = hclge_get_regs_len,
9183         .get_regs = hclge_get_regs,
9184         .set_led_id = hclge_set_led_id,
9185         .get_link_mode = hclge_get_link_mode,
9186         .add_fd_entry = hclge_add_fd_entry,
9187         .del_fd_entry = hclge_del_fd_entry,
9188         .del_all_fd_entries = hclge_del_all_fd_entries,
9189         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9190         .get_fd_rule_info = hclge_get_fd_rule_info,
9191         .get_fd_all_rules = hclge_get_all_rules,
9192         .restore_fd_rules = hclge_restore_fd_entries,
9193         .enable_fd = hclge_enable_fd,
9194         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9195         .dbg_run_cmd = hclge_dbg_run_cmd,
9196         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9197         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9198         .ae_dev_resetting = hclge_ae_dev_resetting,
9199         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9200         .set_gro_en = hclge_gro_en,
9201         .get_global_queue_id = hclge_covert_handle_qid_global,
9202         .set_timer_task = hclge_set_timer_task,
9203         .mac_connect_phy = hclge_mac_connect_phy,
9204         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9205 };
9206
9207 static struct hnae3_ae_algo ae_algo = {
9208         .ops = &hclge_ops,
9209         .pdev_id_table = ae_algo_pci_tbl,
9210 };
9211
9212 static int hclge_init(void)
9213 {
9214         pr_info("%s is initializing\n", HCLGE_NAME);
9215
9216         hnae3_register_ae_algo(&ae_algo);
9217
9218         return 0;
9219 }
9220
9221 static void hclge_exit(void)
9222 {
9223         hnae3_unregister_ae_algo(&ae_algo);
9224 }
9225 module_init(hclge_init);
9226 module_exit(hclge_exit);
9227
9228 MODULE_LICENSE("GPL");
9229 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9230 MODULE_DESCRIPTION("HCLGE Driver");
9231 MODULE_VERSION(HCLGE_MOD_VERSION);