]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
net: hns3: fix improper error handling in the hclge_init_ae_dev()
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
21 #include "hclge_tm.h"
22 #include "hclge_err.h"
23 #include "hnae3.h"
24
25 #define HCLGE_NAME                      "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
28
29 #define HCLGE_BUF_SIZE_UNIT     256
30
31 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
32 static int hclge_init_vlan_config(struct hclge_dev *hdev);
33 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
34 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
35                                u16 *allocated_size, bool is_alloc);
36
37 static struct hnae3_ae_algo ae_algo;
38
39 static const struct pci_device_id ae_algo_pci_tbl[] = {
40         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
41         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
47         /* required last entry */
48         {0, }
49 };
50
51 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
52
53 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
54                                          HCLGE_CMDQ_TX_ADDR_H_REG,
55                                          HCLGE_CMDQ_TX_DEPTH_REG,
56                                          HCLGE_CMDQ_TX_TAIL_REG,
57                                          HCLGE_CMDQ_TX_HEAD_REG,
58                                          HCLGE_CMDQ_RX_ADDR_L_REG,
59                                          HCLGE_CMDQ_RX_ADDR_H_REG,
60                                          HCLGE_CMDQ_RX_DEPTH_REG,
61                                          HCLGE_CMDQ_RX_TAIL_REG,
62                                          HCLGE_CMDQ_RX_HEAD_REG,
63                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
64                                          HCLGE_CMDQ_INTR_STS_REG,
65                                          HCLGE_CMDQ_INTR_EN_REG,
66                                          HCLGE_CMDQ_INTR_GEN_REG};
67
68 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
69                                            HCLGE_VECTOR0_OTER_EN_REG,
70                                            HCLGE_MISC_RESET_STS_REG,
71                                            HCLGE_MISC_VECTOR_INT_STS,
72                                            HCLGE_GLOBAL_RESET_REG,
73                                            HCLGE_FUN_RST_ING,
74                                            HCLGE_GRO_EN_REG};
75
76 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
77                                          HCLGE_RING_RX_ADDR_H_REG,
78                                          HCLGE_RING_RX_BD_NUM_REG,
79                                          HCLGE_RING_RX_BD_LENGTH_REG,
80                                          HCLGE_RING_RX_MERGE_EN_REG,
81                                          HCLGE_RING_RX_TAIL_REG,
82                                          HCLGE_RING_RX_HEAD_REG,
83                                          HCLGE_RING_RX_FBD_NUM_REG,
84                                          HCLGE_RING_RX_OFFSET_REG,
85                                          HCLGE_RING_RX_FBD_OFFSET_REG,
86                                          HCLGE_RING_RX_STASH_REG,
87                                          HCLGE_RING_RX_BD_ERR_REG,
88                                          HCLGE_RING_TX_ADDR_L_REG,
89                                          HCLGE_RING_TX_ADDR_H_REG,
90                                          HCLGE_RING_TX_BD_NUM_REG,
91                                          HCLGE_RING_TX_PRIORITY_REG,
92                                          HCLGE_RING_TX_TC_REG,
93                                          HCLGE_RING_TX_MERGE_EN_REG,
94                                          HCLGE_RING_TX_TAIL_REG,
95                                          HCLGE_RING_TX_HEAD_REG,
96                                          HCLGE_RING_TX_FBD_NUM_REG,
97                                          HCLGE_RING_TX_OFFSET_REG,
98                                          HCLGE_RING_TX_EBD_NUM_REG,
99                                          HCLGE_RING_TX_EBD_OFFSET_REG,
100                                          HCLGE_RING_TX_BD_ERR_REG,
101                                          HCLGE_RING_EN_REG};
102
103 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
104                                              HCLGE_TQP_INTR_GL0_REG,
105                                              HCLGE_TQP_INTR_GL1_REG,
106                                              HCLGE_TQP_INTR_GL2_REG,
107                                              HCLGE_TQP_INTR_RL_REG};
108
109 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
110         "App    Loopback test",
111         "Serdes serial Loopback test",
112         "Serdes parallel Loopback test",
113         "Phy    Loopback test"
114 };
115
116 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
117         {"mac_tx_mac_pause_num",
118                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
119         {"mac_rx_mac_pause_num",
120                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
121         {"mac_tx_control_pkt_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
123         {"mac_rx_control_pkt_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
125         {"mac_tx_pfc_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
127         {"mac_tx_pfc_pri0_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
129         {"mac_tx_pfc_pri1_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
131         {"mac_tx_pfc_pri2_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
133         {"mac_tx_pfc_pri3_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
135         {"mac_tx_pfc_pri4_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
137         {"mac_tx_pfc_pri5_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
139         {"mac_tx_pfc_pri6_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
141         {"mac_tx_pfc_pri7_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
143         {"mac_rx_pfc_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
145         {"mac_rx_pfc_pri0_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
147         {"mac_rx_pfc_pri1_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
149         {"mac_rx_pfc_pri2_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
151         {"mac_rx_pfc_pri3_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
153         {"mac_rx_pfc_pri4_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
155         {"mac_rx_pfc_pri5_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
157         {"mac_rx_pfc_pri6_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
159         {"mac_rx_pfc_pri7_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
161         {"mac_tx_total_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
163         {"mac_tx_total_oct_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
165         {"mac_tx_good_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
167         {"mac_tx_bad_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
169         {"mac_tx_good_oct_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
171         {"mac_tx_bad_oct_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
173         {"mac_tx_uni_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
175         {"mac_tx_multi_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
177         {"mac_tx_broad_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
179         {"mac_tx_undersize_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
181         {"mac_tx_oversize_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
183         {"mac_tx_64_oct_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
185         {"mac_tx_65_127_oct_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
187         {"mac_tx_128_255_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
189         {"mac_tx_256_511_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
191         {"mac_tx_512_1023_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
193         {"mac_tx_1024_1518_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
195         {"mac_tx_1519_2047_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
197         {"mac_tx_2048_4095_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
199         {"mac_tx_4096_8191_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
201         {"mac_tx_8192_9216_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
203         {"mac_tx_9217_12287_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
205         {"mac_tx_12288_16383_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
207         {"mac_tx_1519_max_good_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
209         {"mac_tx_1519_max_bad_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
211         {"mac_rx_total_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
213         {"mac_rx_total_oct_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
215         {"mac_rx_good_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
217         {"mac_rx_bad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
219         {"mac_rx_good_oct_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
221         {"mac_rx_bad_oct_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
223         {"mac_rx_uni_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
225         {"mac_rx_multi_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
227         {"mac_rx_broad_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
229         {"mac_rx_undersize_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
231         {"mac_rx_oversize_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
233         {"mac_rx_64_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
235         {"mac_rx_65_127_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
237         {"mac_rx_128_255_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
239         {"mac_rx_256_511_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
241         {"mac_rx_512_1023_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
243         {"mac_rx_1024_1518_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
245         {"mac_rx_1519_2047_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
247         {"mac_rx_2048_4095_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
249         {"mac_rx_4096_8191_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
251         {"mac_rx_8192_9216_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
253         {"mac_rx_9217_12287_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
255         {"mac_rx_12288_16383_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
257         {"mac_rx_1519_max_good_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
259         {"mac_rx_1519_max_bad_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
261
262         {"mac_tx_fragment_pkt_num",
263                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
264         {"mac_tx_undermin_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
266         {"mac_tx_jabber_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
268         {"mac_tx_err_all_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
270         {"mac_tx_from_app_good_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
272         {"mac_tx_from_app_bad_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
274         {"mac_rx_fragment_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
276         {"mac_rx_undermin_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
278         {"mac_rx_jabber_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
280         {"mac_rx_fcs_err_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
282         {"mac_rx_send_app_good_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
284         {"mac_rx_send_app_bad_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
286 };
287
288 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
289         {
290                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
291                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
292                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
293                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
294                 .i_port_bitmap = 0x1,
295         },
296 };
297
298 static const u8 hclge_hash_key[] = {
299         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
300         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
301         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
302         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
303         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
304 };
305
306 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
307 {
308 #define HCLGE_MAC_CMD_NUM 21
309
310         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
311         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
312         __le64 *desc_data;
313         int i, k, n;
314         int ret;
315
316         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
317         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
318         if (ret) {
319                 dev_err(&hdev->pdev->dev,
320                         "Get MAC pkt stats fail, status = %d.\n", ret);
321
322                 return ret;
323         }
324
325         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
326                 /* for special opcode 0032, only the first desc has the head */
327                 if (unlikely(i == 0)) {
328                         desc_data = (__le64 *)(&desc[i].data[0]);
329                         n = HCLGE_RD_FIRST_STATS_NUM;
330                 } else {
331                         desc_data = (__le64 *)(&desc[i]);
332                         n = HCLGE_RD_OTHER_STATS_NUM;
333                 }
334
335                 for (k = 0; k < n; k++) {
336                         *data += le64_to_cpu(*desc_data);
337                         data++;
338                         desc_data++;
339                 }
340         }
341
342         return 0;
343 }
344
345 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
346 {
347         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
348         struct hclge_desc *desc;
349         __le64 *desc_data;
350         u16 i, k, n;
351         int ret;
352
353         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
354         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
355         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
356         if (ret) {
357                 kfree(desc);
358                 return ret;
359         }
360
361         for (i = 0; i < desc_num; i++) {
362                 /* for special opcode 0034, only the first desc has the head */
363                 if (i == 0) {
364                         desc_data = (__le64 *)(&desc[i].data[0]);
365                         n = HCLGE_RD_FIRST_STATS_NUM;
366                 } else {
367                         desc_data = (__le64 *)(&desc[i]);
368                         n = HCLGE_RD_OTHER_STATS_NUM;
369                 }
370
371                 for (k = 0; k < n; k++) {
372                         *data += le64_to_cpu(*desc_data);
373                         data++;
374                         desc_data++;
375                 }
376         }
377
378         kfree(desc);
379
380         return 0;
381 }
382
383 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
384 {
385         struct hclge_desc desc;
386         __le32 *desc_data;
387         u32 reg_num;
388         int ret;
389
390         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
391         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
392         if (ret)
393                 return ret;
394
395         desc_data = (__le32 *)(&desc.data[0]);
396         reg_num = le32_to_cpu(*desc_data);
397
398         *desc_num = 1 + ((reg_num - 3) >> 2) +
399                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
400
401         return 0;
402 }
403
404 static int hclge_mac_update_stats(struct hclge_dev *hdev)
405 {
406         u32 desc_num;
407         int ret;
408
409         ret = hclge_mac_query_reg_num(hdev, &desc_num);
410
411         /* The firmware supports the new statistics acquisition method */
412         if (!ret)
413                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
414         else if (ret == -EOPNOTSUPP)
415                 ret = hclge_mac_update_stats_defective(hdev);
416         else
417                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
418
419         return ret;
420 }
421
422 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
423 {
424         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
425         struct hclge_vport *vport = hclge_get_vport(handle);
426         struct hclge_dev *hdev = vport->back;
427         struct hnae3_queue *queue;
428         struct hclge_desc desc[1];
429         struct hclge_tqp *tqp;
430         int ret, i;
431
432         for (i = 0; i < kinfo->num_tqps; i++) {
433                 queue = handle->kinfo.tqp[i];
434                 tqp = container_of(queue, struct hclge_tqp, q);
435                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
436                 hclge_cmd_setup_basic_desc(&desc[0],
437                                            HCLGE_OPC_QUERY_RX_STATUS,
438                                            true);
439
440                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
441                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
442                 if (ret) {
443                         dev_err(&hdev->pdev->dev,
444                                 "Query tqp stat fail, status = %d,queue = %d\n",
445                                 ret,    i);
446                         return ret;
447                 }
448                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
449                         le32_to_cpu(desc[0].data[1]);
450         }
451
452         for (i = 0; i < kinfo->num_tqps; i++) {
453                 queue = handle->kinfo.tqp[i];
454                 tqp = container_of(queue, struct hclge_tqp, q);
455                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
456                 hclge_cmd_setup_basic_desc(&desc[0],
457                                            HCLGE_OPC_QUERY_TX_STATUS,
458                                            true);
459
460                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
461                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
462                 if (ret) {
463                         dev_err(&hdev->pdev->dev,
464                                 "Query tqp stat fail, status = %d,queue = %d\n",
465                                 ret, i);
466                         return ret;
467                 }
468                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
469                         le32_to_cpu(desc[0].data[1]);
470         }
471
472         return 0;
473 }
474
475 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
476 {
477         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
478         struct hclge_tqp *tqp;
479         u64 *buff = data;
480         int i;
481
482         for (i = 0; i < kinfo->num_tqps; i++) {
483                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
484                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
485         }
486
487         for (i = 0; i < kinfo->num_tqps; i++) {
488                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
489                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
490         }
491
492         return buff;
493 }
494
495 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
496 {
497         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
498
499         return kinfo->num_tqps * (2);
500 }
501
502 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
503 {
504         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
505         u8 *buff = data;
506         int i = 0;
507
508         for (i = 0; i < kinfo->num_tqps; i++) {
509                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
510                         struct hclge_tqp, q);
511                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
512                          tqp->index);
513                 buff = buff + ETH_GSTRING_LEN;
514         }
515
516         for (i = 0; i < kinfo->num_tqps; i++) {
517                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
518                         struct hclge_tqp, q);
519                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
520                          tqp->index);
521                 buff = buff + ETH_GSTRING_LEN;
522         }
523
524         return buff;
525 }
526
527 static u64 *hclge_comm_get_stats(void *comm_stats,
528                                  const struct hclge_comm_stats_str strs[],
529                                  int size, u64 *data)
530 {
531         u64 *buf = data;
532         u32 i;
533
534         for (i = 0; i < size; i++)
535                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
536
537         return buf + size;
538 }
539
540 static u8 *hclge_comm_get_strings(u32 stringset,
541                                   const struct hclge_comm_stats_str strs[],
542                                   int size, u8 *data)
543 {
544         char *buff = (char *)data;
545         u32 i;
546
547         if (stringset != ETH_SS_STATS)
548                 return buff;
549
550         for (i = 0; i < size; i++) {
551                 snprintf(buff, ETH_GSTRING_LEN,
552                          strs[i].desc);
553                 buff = buff + ETH_GSTRING_LEN;
554         }
555
556         return (u8 *)buff;
557 }
558
559 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
560 {
561         struct hnae3_handle *handle;
562         int status;
563
564         handle = &hdev->vport[0].nic;
565         if (handle->client) {
566                 status = hclge_tqps_update_stats(handle);
567                 if (status) {
568                         dev_err(&hdev->pdev->dev,
569                                 "Update TQPS stats fail, status = %d.\n",
570                                 status);
571                 }
572         }
573
574         status = hclge_mac_update_stats(hdev);
575         if (status)
576                 dev_err(&hdev->pdev->dev,
577                         "Update MAC stats fail, status = %d.\n", status);
578 }
579
580 static void hclge_update_stats(struct hnae3_handle *handle,
581                                struct net_device_stats *net_stats)
582 {
583         struct hclge_vport *vport = hclge_get_vport(handle);
584         struct hclge_dev *hdev = vport->back;
585         int status;
586
587         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
588                 return;
589
590         status = hclge_mac_update_stats(hdev);
591         if (status)
592                 dev_err(&hdev->pdev->dev,
593                         "Update MAC stats fail, status = %d.\n",
594                         status);
595
596         status = hclge_tqps_update_stats(handle);
597         if (status)
598                 dev_err(&hdev->pdev->dev,
599                         "Update TQPS stats fail, status = %d.\n",
600                         status);
601
602         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
603 }
604
605 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
606 {
607 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
608                 HNAE3_SUPPORT_PHY_LOOPBACK |\
609                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
610                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
611
612         struct hclge_vport *vport = hclge_get_vport(handle);
613         struct hclge_dev *hdev = vport->back;
614         int count = 0;
615
616         /* Loopback test support rules:
617          * mac: only GE mode support
618          * serdes: all mac mode will support include GE/XGE/LGE/CGE
619          * phy: only support when phy device exist on board
620          */
621         if (stringset == ETH_SS_TEST) {
622                 /* clear loopback bit flags at first */
623                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
624                 if (hdev->pdev->revision >= 0x21 ||
625                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
626                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
627                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
628                         count += 1;
629                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
630                 }
631
632                 count += 2;
633                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
634                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
635         } else if (stringset == ETH_SS_STATS) {
636                 count = ARRAY_SIZE(g_mac_stats_string) +
637                         hclge_tqps_get_sset_count(handle, stringset);
638         }
639
640         return count;
641 }
642
643 static void hclge_get_strings(struct hnae3_handle *handle,
644                               u32 stringset,
645                               u8 *data)
646 {
647         u8 *p = (char *)data;
648         int size;
649
650         if (stringset == ETH_SS_STATS) {
651                 size = ARRAY_SIZE(g_mac_stats_string);
652                 p = hclge_comm_get_strings(stringset,
653                                            g_mac_stats_string,
654                                            size,
655                                            p);
656                 p = hclge_tqps_get_strings(handle, p);
657         } else if (stringset == ETH_SS_TEST) {
658                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
659                         memcpy(p,
660                                hns3_nic_test_strs[HNAE3_LOOP_APP],
661                                ETH_GSTRING_LEN);
662                         p += ETH_GSTRING_LEN;
663                 }
664                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
665                         memcpy(p,
666                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
671                         memcpy(p,
672                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
677                         memcpy(p,
678                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
679                                ETH_GSTRING_LEN);
680                         p += ETH_GSTRING_LEN;
681                 }
682         }
683 }
684
685 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
686 {
687         struct hclge_vport *vport = hclge_get_vport(handle);
688         struct hclge_dev *hdev = vport->back;
689         u64 *p;
690
691         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
692                                  g_mac_stats_string,
693                                  ARRAY_SIZE(g_mac_stats_string),
694                                  data);
695         p = hclge_tqps_get_stats(handle, p);
696 }
697
698 static int hclge_parse_func_status(struct hclge_dev *hdev,
699                                    struct hclge_func_status_cmd *status)
700 {
701         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
702                 return -EINVAL;
703
704         /* Set the pf to main pf */
705         if (status->pf_state & HCLGE_PF_STATE_MAIN)
706                 hdev->flag |= HCLGE_FLAG_MAIN;
707         else
708                 hdev->flag &= ~HCLGE_FLAG_MAIN;
709
710         return 0;
711 }
712
713 static int hclge_query_function_status(struct hclge_dev *hdev)
714 {
715         struct hclge_func_status_cmd *req;
716         struct hclge_desc desc;
717         int timeout = 0;
718         int ret;
719
720         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
721         req = (struct hclge_func_status_cmd *)desc.data;
722
723         do {
724                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
725                 if (ret) {
726                         dev_err(&hdev->pdev->dev,
727                                 "query function status failed %d.\n",
728                                 ret);
729
730                         return ret;
731                 }
732
733                 /* Check pf reset is done */
734                 if (req->pf_state)
735                         break;
736                 usleep_range(1000, 2000);
737         } while (timeout++ < 5);
738
739         ret = hclge_parse_func_status(hdev, req);
740
741         return ret;
742 }
743
744 static int hclge_query_pf_resource(struct hclge_dev *hdev)
745 {
746         struct hclge_pf_res_cmd *req;
747         struct hclge_desc desc;
748         int ret;
749
750         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
751         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
752         if (ret) {
753                 dev_err(&hdev->pdev->dev,
754                         "query pf resource failed %d.\n", ret);
755                 return ret;
756         }
757
758         req = (struct hclge_pf_res_cmd *)desc.data;
759         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
760         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
761
762         if (req->tx_buf_size)
763                 hdev->tx_buf_size =
764                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
765         else
766                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
767
768         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
769
770         if (req->dv_buf_size)
771                 hdev->dv_buf_size =
772                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
773         else
774                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
775
776         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
777
778         if (hnae3_dev_roce_supported(hdev)) {
779                 hdev->roce_base_msix_offset =
780                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
781                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
782                 hdev->num_roce_msi =
783                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
784                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
785
786                 /* PF should have NIC vectors and Roce vectors,
787                  * NIC vectors are queued before Roce vectors.
788                  */
789                 hdev->num_msi = hdev->num_roce_msi  +
790                                 hdev->roce_base_msix_offset;
791         } else {
792                 hdev->num_msi =
793                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
794                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
795         }
796
797         return 0;
798 }
799
800 static int hclge_parse_speed(int speed_cmd, int *speed)
801 {
802         switch (speed_cmd) {
803         case 6:
804                 *speed = HCLGE_MAC_SPEED_10M;
805                 break;
806         case 7:
807                 *speed = HCLGE_MAC_SPEED_100M;
808                 break;
809         case 0:
810                 *speed = HCLGE_MAC_SPEED_1G;
811                 break;
812         case 1:
813                 *speed = HCLGE_MAC_SPEED_10G;
814                 break;
815         case 2:
816                 *speed = HCLGE_MAC_SPEED_25G;
817                 break;
818         case 3:
819                 *speed = HCLGE_MAC_SPEED_40G;
820                 break;
821         case 4:
822                 *speed = HCLGE_MAC_SPEED_50G;
823                 break;
824         case 5:
825                 *speed = HCLGE_MAC_SPEED_100G;
826                 break;
827         default:
828                 return -EINVAL;
829         }
830
831         return 0;
832 }
833
834 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
835                                         u8 speed_ability)
836 {
837         unsigned long *supported = hdev->hw.mac.supported;
838
839         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
840                 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
841                         supported);
842
843         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
844                 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
845                         supported);
846
847         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
848                 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
849                         supported);
850
851         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
852                 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
853                         supported);
854
855         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
856                 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
857                         supported);
858
859         set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
860         set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
861 }
862
863 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
864 {
865         u8 media_type = hdev->hw.mac.media_type;
866
867         if (media_type != HNAE3_MEDIA_TYPE_FIBER)
868                 return;
869
870         hclge_parse_fiber_link_mode(hdev, speed_ability);
871 }
872
873 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
874 {
875         struct hclge_cfg_param_cmd *req;
876         u64 mac_addr_tmp_high;
877         u64 mac_addr_tmp;
878         int i;
879
880         req = (struct hclge_cfg_param_cmd *)desc[0].data;
881
882         /* get the configuration */
883         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
884                                               HCLGE_CFG_VMDQ_M,
885                                               HCLGE_CFG_VMDQ_S);
886         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
887                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
888         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
889                                             HCLGE_CFG_TQP_DESC_N_M,
890                                             HCLGE_CFG_TQP_DESC_N_S);
891
892         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
893                                         HCLGE_CFG_PHY_ADDR_M,
894                                         HCLGE_CFG_PHY_ADDR_S);
895         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
896                                           HCLGE_CFG_MEDIA_TP_M,
897                                           HCLGE_CFG_MEDIA_TP_S);
898         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
899                                           HCLGE_CFG_RX_BUF_LEN_M,
900                                           HCLGE_CFG_RX_BUF_LEN_S);
901         /* get mac_address */
902         mac_addr_tmp = __le32_to_cpu(req->param[2]);
903         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
904                                             HCLGE_CFG_MAC_ADDR_H_M,
905                                             HCLGE_CFG_MAC_ADDR_H_S);
906
907         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
908
909         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
910                                              HCLGE_CFG_DEFAULT_SPEED_M,
911                                              HCLGE_CFG_DEFAULT_SPEED_S);
912         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
913                                             HCLGE_CFG_RSS_SIZE_M,
914                                             HCLGE_CFG_RSS_SIZE_S);
915
916         for (i = 0; i < ETH_ALEN; i++)
917                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
918
919         req = (struct hclge_cfg_param_cmd *)desc[1].data;
920         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
921
922         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
923                                              HCLGE_CFG_SPEED_ABILITY_M,
924                                              HCLGE_CFG_SPEED_ABILITY_S);
925         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
926                                          HCLGE_CFG_UMV_TBL_SPACE_M,
927                                          HCLGE_CFG_UMV_TBL_SPACE_S);
928         if (!cfg->umv_space)
929                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
930 }
931
932 /* hclge_get_cfg: query the static parameter from flash
933  * @hdev: pointer to struct hclge_dev
934  * @hcfg: the config structure to be getted
935  */
936 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
937 {
938         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
939         struct hclge_cfg_param_cmd *req;
940         int i, ret;
941
942         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
943                 u32 offset = 0;
944
945                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
946                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
947                                            true);
948                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
949                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
950                 /* Len should be united by 4 bytes when send to hardware */
951                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
952                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
953                 req->offset = cpu_to_le32(offset);
954         }
955
956         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
957         if (ret) {
958                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
959                 return ret;
960         }
961
962         hclge_parse_cfg(hcfg, desc);
963
964         return 0;
965 }
966
967 static int hclge_get_cap(struct hclge_dev *hdev)
968 {
969         int ret;
970
971         ret = hclge_query_function_status(hdev);
972         if (ret) {
973                 dev_err(&hdev->pdev->dev,
974                         "query function status error %d.\n", ret);
975                 return ret;
976         }
977
978         /* get pf resource */
979         ret = hclge_query_pf_resource(hdev);
980         if (ret)
981                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
982
983         return ret;
984 }
985
986 static int hclge_configure(struct hclge_dev *hdev)
987 {
988         struct hclge_cfg cfg;
989         int ret, i;
990
991         ret = hclge_get_cfg(hdev, &cfg);
992         if (ret) {
993                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
994                 return ret;
995         }
996
997         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
998         hdev->base_tqp_pid = 0;
999         hdev->rss_size_max = cfg.rss_size_max;
1000         hdev->rx_buf_len = cfg.rx_buf_len;
1001         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1002         hdev->hw.mac.media_type = cfg.media_type;
1003         hdev->hw.mac.phy_addr = cfg.phy_addr;
1004         hdev->num_desc = cfg.tqp_desc_num;
1005         hdev->tm_info.num_pg = 1;
1006         hdev->tc_max = cfg.tc_num;
1007         hdev->tm_info.hw_pfc_map = 0;
1008         hdev->wanted_umv_size = cfg.umv_space;
1009
1010         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1011         if (ret) {
1012                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1013                 return ret;
1014         }
1015
1016         hclge_parse_link_mode(hdev, cfg.speed_ability);
1017
1018         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1019             (hdev->tc_max < 1)) {
1020                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1021                          hdev->tc_max);
1022                 hdev->tc_max = 1;
1023         }
1024
1025         /* Dev does not support DCB */
1026         if (!hnae3_dev_dcb_supported(hdev)) {
1027                 hdev->tc_max = 1;
1028                 hdev->pfc_max = 0;
1029         } else {
1030                 hdev->pfc_max = hdev->tc_max;
1031         }
1032
1033         hdev->tm_info.num_tc = 1;
1034
1035         /* Currently not support uncontiuous tc */
1036         for (i = 0; i < hdev->tm_info.num_tc; i++)
1037                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1038
1039         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1040
1041         return ret;
1042 }
1043
1044 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1045                             int tso_mss_max)
1046 {
1047         struct hclge_cfg_tso_status_cmd *req;
1048         struct hclge_desc desc;
1049         u16 tso_mss;
1050
1051         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1052
1053         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1054
1055         tso_mss = 0;
1056         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1057                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1058         req->tso_mss_min = cpu_to_le16(tso_mss);
1059
1060         tso_mss = 0;
1061         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1062                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1063         req->tso_mss_max = cpu_to_le16(tso_mss);
1064
1065         return hclge_cmd_send(&hdev->hw, &desc, 1);
1066 }
1067
1068 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1069 {
1070         struct hclge_cfg_gro_status_cmd *req;
1071         struct hclge_desc desc;
1072         int ret;
1073
1074         if (!hnae3_dev_gro_supported(hdev))
1075                 return 0;
1076
1077         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1078         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1079
1080         req->gro_en = cpu_to_le16(en ? 1 : 0);
1081
1082         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1083         if (ret)
1084                 dev_err(&hdev->pdev->dev,
1085                         "GRO hardware config cmd failed, ret = %d\n", ret);
1086
1087         return ret;
1088 }
1089
1090 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1091 {
1092         struct hclge_tqp *tqp;
1093         int i;
1094
1095         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1096                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1097         if (!hdev->htqp)
1098                 return -ENOMEM;
1099
1100         tqp = hdev->htqp;
1101
1102         for (i = 0; i < hdev->num_tqps; i++) {
1103                 tqp->dev = &hdev->pdev->dev;
1104                 tqp->index = i;
1105
1106                 tqp->q.ae_algo = &ae_algo;
1107                 tqp->q.buf_size = hdev->rx_buf_len;
1108                 tqp->q.desc_num = hdev->num_desc;
1109                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1110                         i * HCLGE_TQP_REG_SIZE;
1111
1112                 tqp++;
1113         }
1114
1115         return 0;
1116 }
1117
1118 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1119                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1120 {
1121         struct hclge_tqp_map_cmd *req;
1122         struct hclge_desc desc;
1123         int ret;
1124
1125         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1126
1127         req = (struct hclge_tqp_map_cmd *)desc.data;
1128         req->tqp_id = cpu_to_le16(tqp_pid);
1129         req->tqp_vf = func_id;
1130         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1131                         1 << HCLGE_TQP_MAP_EN_B;
1132         req->tqp_vid = cpu_to_le16(tqp_vid);
1133
1134         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1135         if (ret)
1136                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1137
1138         return ret;
1139 }
1140
1141 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1142 {
1143         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1144         struct hclge_dev *hdev = vport->back;
1145         int i, alloced;
1146
1147         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1148              alloced < num_tqps; i++) {
1149                 if (!hdev->htqp[i].alloced) {
1150                         hdev->htqp[i].q.handle = &vport->nic;
1151                         hdev->htqp[i].q.tqp_index = alloced;
1152                         hdev->htqp[i].q.desc_num = kinfo->num_desc;
1153                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1154                         hdev->htqp[i].alloced = true;
1155                         alloced++;
1156                 }
1157         }
1158         vport->alloc_tqps = alloced;
1159         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1160                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1161
1162         return 0;
1163 }
1164
1165 static int hclge_knic_setup(struct hclge_vport *vport,
1166                             u16 num_tqps, u16 num_desc)
1167 {
1168         struct hnae3_handle *nic = &vport->nic;
1169         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1170         struct hclge_dev *hdev = vport->back;
1171         int ret;
1172
1173         kinfo->num_desc = num_desc;
1174         kinfo->rx_buf_len = hdev->rx_buf_len;
1175
1176         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1177                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1178         if (!kinfo->tqp)
1179                 return -ENOMEM;
1180
1181         ret = hclge_assign_tqp(vport, num_tqps);
1182         if (ret)
1183                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1184
1185         return ret;
1186 }
1187
1188 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1189                                   struct hclge_vport *vport)
1190 {
1191         struct hnae3_handle *nic = &vport->nic;
1192         struct hnae3_knic_private_info *kinfo;
1193         u16 i;
1194
1195         kinfo = &nic->kinfo;
1196         for (i = 0; i < vport->alloc_tqps; i++) {
1197                 struct hclge_tqp *q =
1198                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1199                 bool is_pf;
1200                 int ret;
1201
1202                 is_pf = !(vport->vport_id);
1203                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1204                                              i, is_pf);
1205                 if (ret)
1206                         return ret;
1207         }
1208
1209         return 0;
1210 }
1211
1212 static int hclge_map_tqp(struct hclge_dev *hdev)
1213 {
1214         struct hclge_vport *vport = hdev->vport;
1215         u16 i, num_vport;
1216
1217         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1218         for (i = 0; i < num_vport; i++) {
1219                 int ret;
1220
1221                 ret = hclge_map_tqp_to_vport(hdev, vport);
1222                 if (ret)
1223                         return ret;
1224
1225                 vport++;
1226         }
1227
1228         return 0;
1229 }
1230
1231 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1232 {
1233         /* this would be initialized later */
1234 }
1235
1236 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1237 {
1238         struct hnae3_handle *nic = &vport->nic;
1239         struct hclge_dev *hdev = vport->back;
1240         int ret;
1241
1242         nic->pdev = hdev->pdev;
1243         nic->ae_algo = &ae_algo;
1244         nic->numa_node_mask = hdev->numa_node_mask;
1245
1246         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1247                 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1248                 if (ret) {
1249                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1250                                 ret);
1251                         return ret;
1252                 }
1253         } else {
1254                 hclge_unic_setup(vport, num_tqps);
1255         }
1256
1257         return 0;
1258 }
1259
1260 static int hclge_alloc_vport(struct hclge_dev *hdev)
1261 {
1262         struct pci_dev *pdev = hdev->pdev;
1263         struct hclge_vport *vport;
1264         u32 tqp_main_vport;
1265         u32 tqp_per_vport;
1266         int num_vport, i;
1267         int ret;
1268
1269         /* We need to alloc a vport for main NIC of PF */
1270         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1271
1272         if (hdev->num_tqps < num_vport) {
1273                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1274                         hdev->num_tqps, num_vport);
1275                 return -EINVAL;
1276         }
1277
1278         /* Alloc the same number of TQPs for every vport */
1279         tqp_per_vport = hdev->num_tqps / num_vport;
1280         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1281
1282         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1283                              GFP_KERNEL);
1284         if (!vport)
1285                 return -ENOMEM;
1286
1287         hdev->vport = vport;
1288         hdev->num_alloc_vport = num_vport;
1289
1290         if (IS_ENABLED(CONFIG_PCI_IOV))
1291                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1292
1293         for (i = 0; i < num_vport; i++) {
1294                 vport->back = hdev;
1295                 vport->vport_id = i;
1296                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1297
1298                 if (i == 0)
1299                         ret = hclge_vport_setup(vport, tqp_main_vport);
1300                 else
1301                         ret = hclge_vport_setup(vport, tqp_per_vport);
1302                 if (ret) {
1303                         dev_err(&pdev->dev,
1304                                 "vport setup failed for vport %d, %d\n",
1305                                 i, ret);
1306                         return ret;
1307                 }
1308
1309                 vport++;
1310         }
1311
1312         return 0;
1313 }
1314
1315 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1316                                     struct hclge_pkt_buf_alloc *buf_alloc)
1317 {
1318 /* TX buffer size is unit by 128 byte */
1319 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1320 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1321         struct hclge_tx_buff_alloc_cmd *req;
1322         struct hclge_desc desc;
1323         int ret;
1324         u8 i;
1325
1326         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1327
1328         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1329         for (i = 0; i < HCLGE_TC_NUM; i++) {
1330                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1331
1332                 req->tx_pkt_buff[i] =
1333                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1334                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1335         }
1336
1337         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1338         if (ret)
1339                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1340                         ret);
1341
1342         return ret;
1343 }
1344
1345 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1346                                  struct hclge_pkt_buf_alloc *buf_alloc)
1347 {
1348         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1349
1350         if (ret)
1351                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1352
1353         return ret;
1354 }
1355
1356 static int hclge_get_tc_num(struct hclge_dev *hdev)
1357 {
1358         int i, cnt = 0;
1359
1360         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1361                 if (hdev->hw_tc_map & BIT(i))
1362                         cnt++;
1363         return cnt;
1364 }
1365
1366 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1367 {
1368         int i, cnt = 0;
1369
1370         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1371                 if (hdev->hw_tc_map & BIT(i) &&
1372                     hdev->tm_info.hw_pfc_map & BIT(i))
1373                         cnt++;
1374         return cnt;
1375 }
1376
1377 /* Get the number of pfc enabled TCs, which have private buffer */
1378 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1379                                   struct hclge_pkt_buf_alloc *buf_alloc)
1380 {
1381         struct hclge_priv_buf *priv;
1382         int i, cnt = 0;
1383
1384         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1385                 priv = &buf_alloc->priv_buf[i];
1386                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1387                     priv->enable)
1388                         cnt++;
1389         }
1390
1391         return cnt;
1392 }
1393
1394 /* Get the number of pfc disabled TCs, which have private buffer */
1395 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1396                                      struct hclge_pkt_buf_alloc *buf_alloc)
1397 {
1398         struct hclge_priv_buf *priv;
1399         int i, cnt = 0;
1400
1401         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1402                 priv = &buf_alloc->priv_buf[i];
1403                 if (hdev->hw_tc_map & BIT(i) &&
1404                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1405                     priv->enable)
1406                         cnt++;
1407         }
1408
1409         return cnt;
1410 }
1411
1412 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1413 {
1414         struct hclge_priv_buf *priv;
1415         u32 rx_priv = 0;
1416         int i;
1417
1418         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1419                 priv = &buf_alloc->priv_buf[i];
1420                 if (priv->enable)
1421                         rx_priv += priv->buf_size;
1422         }
1423         return rx_priv;
1424 }
1425
1426 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1427 {
1428         u32 i, total_tx_size = 0;
1429
1430         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1431                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1432
1433         return total_tx_size;
1434 }
1435
1436 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1437                                 struct hclge_pkt_buf_alloc *buf_alloc,
1438                                 u32 rx_all)
1439 {
1440         u32 shared_buf_min, shared_buf_tc, shared_std;
1441         int tc_num, pfc_enable_num;
1442         u32 shared_buf, aligned_mps;
1443         u32 rx_priv;
1444         int i;
1445
1446         tc_num = hclge_get_tc_num(hdev);
1447         pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1448         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1449
1450         if (hnae3_dev_dcb_supported(hdev))
1451                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1452         else
1453                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1454                                         + hdev->dv_buf_size;
1455
1456         shared_buf_tc = pfc_enable_num * aligned_mps +
1457                         (tc_num - pfc_enable_num) * aligned_mps / 2 +
1458                         aligned_mps;
1459         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1460                              HCLGE_BUF_SIZE_UNIT);
1461
1462         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1463         if (rx_all < rx_priv + shared_std)
1464                 return false;
1465
1466         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1467         buf_alloc->s_buf.buf_size = shared_buf;
1468         if (hnae3_dev_dcb_supported(hdev)) {
1469                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1470                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1471                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1472         } else {
1473                 buf_alloc->s_buf.self.high = aligned_mps +
1474                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1475                 buf_alloc->s_buf.self.low =
1476                         roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1477         }
1478
1479         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1480                 if ((hdev->hw_tc_map & BIT(i)) &&
1481                     (hdev->tm_info.hw_pfc_map & BIT(i))) {
1482                         buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1483                         buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1484                 } else {
1485                         buf_alloc->s_buf.tc_thrd[i].low = 0;
1486                         buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1487                 }
1488         }
1489
1490         return true;
1491 }
1492
1493 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1494                                 struct hclge_pkt_buf_alloc *buf_alloc)
1495 {
1496         u32 i, total_size;
1497
1498         total_size = hdev->pkt_buf_size;
1499
1500         /* alloc tx buffer for all enabled tc */
1501         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1502                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1503
1504                 if (total_size < hdev->tx_buf_size)
1505                         return -ENOMEM;
1506
1507                 if (hdev->hw_tc_map & BIT(i))
1508                         priv->tx_buf_size = hdev->tx_buf_size;
1509                 else
1510                         priv->tx_buf_size = 0;
1511
1512                 total_size -= priv->tx_buf_size;
1513         }
1514
1515         return 0;
1516 }
1517
1518 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1519  * @hdev: pointer to struct hclge_dev
1520  * @buf_alloc: pointer to buffer calculation data
1521  * @return: 0: calculate sucessful, negative: fail
1522  */
1523 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1524                                 struct hclge_pkt_buf_alloc *buf_alloc)
1525 {
1526         u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1527         int no_pfc_priv_num, pfc_priv_num;
1528         struct hclge_priv_buf *priv;
1529         int i;
1530
1531         aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1532         rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1533
1534         /* When DCB is not supported, rx private
1535          * buffer is not allocated.
1536          */
1537         if (!hnae3_dev_dcb_supported(hdev)) {
1538                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1539                         return -ENOMEM;
1540
1541                 return 0;
1542         }
1543
1544         /* step 1, try to alloc private buffer for all enabled tc */
1545         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1546                 priv = &buf_alloc->priv_buf[i];
1547                 if (hdev->hw_tc_map & BIT(i)) {
1548                         priv->enable = 1;
1549                         if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1550                                 priv->wl.low = aligned_mps;
1551                                 priv->wl.high =
1552                                         roundup(priv->wl.low + aligned_mps,
1553                                                 HCLGE_BUF_SIZE_UNIT);
1554                                 priv->buf_size = priv->wl.high +
1555                                         hdev->dv_buf_size;
1556                         } else {
1557                                 priv->wl.low = 0;
1558                                 priv->wl.high = 2 * aligned_mps;
1559                                 priv->buf_size = priv->wl.high +
1560                                                 hdev->dv_buf_size;
1561                         }
1562                 } else {
1563                         priv->enable = 0;
1564                         priv->wl.low = 0;
1565                         priv->wl.high = 0;
1566                         priv->buf_size = 0;
1567                 }
1568         }
1569
1570         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1571                 return 0;
1572
1573         /* step 2, try to decrease the buffer size of
1574          * no pfc TC's private buffer
1575          */
1576         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1577                 priv = &buf_alloc->priv_buf[i];
1578
1579                 priv->enable = 0;
1580                 priv->wl.low = 0;
1581                 priv->wl.high = 0;
1582                 priv->buf_size = 0;
1583
1584                 if (!(hdev->hw_tc_map & BIT(i)))
1585                         continue;
1586
1587                 priv->enable = 1;
1588
1589                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1590                         priv->wl.low = 256;
1591                         priv->wl.high = priv->wl.low + aligned_mps;
1592                         priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1593                 } else {
1594                         priv->wl.low = 0;
1595                         priv->wl.high = aligned_mps;
1596                         priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1597                 }
1598         }
1599
1600         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1601                 return 0;
1602
1603         /* step 3, try to reduce the number of pfc disabled TCs,
1604          * which have private buffer
1605          */
1606         /* get the total no pfc enable TC number, which have private buffer */
1607         no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1608
1609         /* let the last to be cleared first */
1610         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1611                 priv = &buf_alloc->priv_buf[i];
1612
1613                 if (hdev->hw_tc_map & BIT(i) &&
1614                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1615                         /* Clear the no pfc TC private buffer */
1616                         priv->wl.low = 0;
1617                         priv->wl.high = 0;
1618                         priv->buf_size = 0;
1619                         priv->enable = 0;
1620                         no_pfc_priv_num--;
1621                 }
1622
1623                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1624                     no_pfc_priv_num == 0)
1625                         break;
1626         }
1627
1628         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1629                 return 0;
1630
1631         /* step 4, try to reduce the number of pfc enabled TCs
1632          * which have private buffer.
1633          */
1634         pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1635
1636         /* let the last to be cleared first */
1637         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1638                 priv = &buf_alloc->priv_buf[i];
1639
1640                 if (hdev->hw_tc_map & BIT(i) &&
1641                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1642                         /* Reduce the number of pfc TC with private buffer */
1643                         priv->wl.low = 0;
1644                         priv->enable = 0;
1645                         priv->wl.high = 0;
1646                         priv->buf_size = 0;
1647                         pfc_priv_num--;
1648                 }
1649
1650                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1651                     pfc_priv_num == 0)
1652                         break;
1653         }
1654         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1655                 return 0;
1656
1657         return -ENOMEM;
1658 }
1659
1660 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1661                                    struct hclge_pkt_buf_alloc *buf_alloc)
1662 {
1663         struct hclge_rx_priv_buff_cmd *req;
1664         struct hclge_desc desc;
1665         int ret;
1666         int i;
1667
1668         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1669         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1670
1671         /* Alloc private buffer TCs */
1672         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1673                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1674
1675                 req->buf_num[i] =
1676                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1677                 req->buf_num[i] |=
1678                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1679         }
1680
1681         req->shared_buf =
1682                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1683                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1684
1685         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1686         if (ret)
1687                 dev_err(&hdev->pdev->dev,
1688                         "rx private buffer alloc cmd failed %d\n", ret);
1689
1690         return ret;
1691 }
1692
1693 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1694                                    struct hclge_pkt_buf_alloc *buf_alloc)
1695 {
1696         struct hclge_rx_priv_wl_buf *req;
1697         struct hclge_priv_buf *priv;
1698         struct hclge_desc desc[2];
1699         int i, j;
1700         int ret;
1701
1702         for (i = 0; i < 2; i++) {
1703                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1704                                            false);
1705                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1706
1707                 /* The first descriptor set the NEXT bit to 1 */
1708                 if (i == 0)
1709                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1710                 else
1711                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1712
1713                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1714                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1715
1716                         priv = &buf_alloc->priv_buf[idx];
1717                         req->tc_wl[j].high =
1718                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1719                         req->tc_wl[j].high |=
1720                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1721                         req->tc_wl[j].low =
1722                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1723                         req->tc_wl[j].low |=
1724                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1725                 }
1726         }
1727
1728         /* Send 2 descriptor at one time */
1729         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1730         if (ret)
1731                 dev_err(&hdev->pdev->dev,
1732                         "rx private waterline config cmd failed %d\n",
1733                         ret);
1734         return ret;
1735 }
1736
1737 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1738                                     struct hclge_pkt_buf_alloc *buf_alloc)
1739 {
1740         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1741         struct hclge_rx_com_thrd *req;
1742         struct hclge_desc desc[2];
1743         struct hclge_tc_thrd *tc;
1744         int i, j;
1745         int ret;
1746
1747         for (i = 0; i < 2; i++) {
1748                 hclge_cmd_setup_basic_desc(&desc[i],
1749                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1750                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1751
1752                 /* The first descriptor set the NEXT bit to 1 */
1753                 if (i == 0)
1754                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1755                 else
1756                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1757
1758                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1759                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1760
1761                         req->com_thrd[j].high =
1762                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1763                         req->com_thrd[j].high |=
1764                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1765                         req->com_thrd[j].low =
1766                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1767                         req->com_thrd[j].low |=
1768                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1769                 }
1770         }
1771
1772         /* Send 2 descriptors at one time */
1773         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1774         if (ret)
1775                 dev_err(&hdev->pdev->dev,
1776                         "common threshold config cmd failed %d\n", ret);
1777         return ret;
1778 }
1779
1780 static int hclge_common_wl_config(struct hclge_dev *hdev,
1781                                   struct hclge_pkt_buf_alloc *buf_alloc)
1782 {
1783         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1784         struct hclge_rx_com_wl *req;
1785         struct hclge_desc desc;
1786         int ret;
1787
1788         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1789
1790         req = (struct hclge_rx_com_wl *)desc.data;
1791         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1792         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1793
1794         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1795         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1796
1797         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1798         if (ret)
1799                 dev_err(&hdev->pdev->dev,
1800                         "common waterline config cmd failed %d\n", ret);
1801
1802         return ret;
1803 }
1804
1805 int hclge_buffer_alloc(struct hclge_dev *hdev)
1806 {
1807         struct hclge_pkt_buf_alloc *pkt_buf;
1808         int ret;
1809
1810         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1811         if (!pkt_buf)
1812                 return -ENOMEM;
1813
1814         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1815         if (ret) {
1816                 dev_err(&hdev->pdev->dev,
1817                         "could not calc tx buffer size for all TCs %d\n", ret);
1818                 goto out;
1819         }
1820
1821         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1822         if (ret) {
1823                 dev_err(&hdev->pdev->dev,
1824                         "could not alloc tx buffers %d\n", ret);
1825                 goto out;
1826         }
1827
1828         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1829         if (ret) {
1830                 dev_err(&hdev->pdev->dev,
1831                         "could not calc rx priv buffer size for all TCs %d\n",
1832                         ret);
1833                 goto out;
1834         }
1835
1836         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1837         if (ret) {
1838                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1839                         ret);
1840                 goto out;
1841         }
1842
1843         if (hnae3_dev_dcb_supported(hdev)) {
1844                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1845                 if (ret) {
1846                         dev_err(&hdev->pdev->dev,
1847                                 "could not configure rx private waterline %d\n",
1848                                 ret);
1849                         goto out;
1850                 }
1851
1852                 ret = hclge_common_thrd_config(hdev, pkt_buf);
1853                 if (ret) {
1854                         dev_err(&hdev->pdev->dev,
1855                                 "could not configure common threshold %d\n",
1856                                 ret);
1857                         goto out;
1858                 }
1859         }
1860
1861         ret = hclge_common_wl_config(hdev, pkt_buf);
1862         if (ret)
1863                 dev_err(&hdev->pdev->dev,
1864                         "could not configure common waterline %d\n", ret);
1865
1866 out:
1867         kfree(pkt_buf);
1868         return ret;
1869 }
1870
1871 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1872 {
1873         struct hnae3_handle *roce = &vport->roce;
1874         struct hnae3_handle *nic = &vport->nic;
1875
1876         roce->rinfo.num_vectors = vport->back->num_roce_msi;
1877
1878         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1879             vport->back->num_msi_left == 0)
1880                 return -EINVAL;
1881
1882         roce->rinfo.base_vector = vport->back->roce_base_vector;
1883
1884         roce->rinfo.netdev = nic->kinfo.netdev;
1885         roce->rinfo.roce_io_base = vport->back->hw.io_base;
1886
1887         roce->pdev = nic->pdev;
1888         roce->ae_algo = nic->ae_algo;
1889         roce->numa_node_mask = nic->numa_node_mask;
1890
1891         return 0;
1892 }
1893
1894 static int hclge_init_msi(struct hclge_dev *hdev)
1895 {
1896         struct pci_dev *pdev = hdev->pdev;
1897         int vectors;
1898         int i;
1899
1900         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1901                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
1902         if (vectors < 0) {
1903                 dev_err(&pdev->dev,
1904                         "failed(%d) to allocate MSI/MSI-X vectors\n",
1905                         vectors);
1906                 return vectors;
1907         }
1908         if (vectors < hdev->num_msi)
1909                 dev_warn(&hdev->pdev->dev,
1910                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1911                          hdev->num_msi, vectors);
1912
1913         hdev->num_msi = vectors;
1914         hdev->num_msi_left = vectors;
1915         hdev->base_msi_vector = pdev->irq;
1916         hdev->roce_base_vector = hdev->base_msi_vector +
1917                                 hdev->roce_base_msix_offset;
1918
1919         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1920                                            sizeof(u16), GFP_KERNEL);
1921         if (!hdev->vector_status) {
1922                 pci_free_irq_vectors(pdev);
1923                 return -ENOMEM;
1924         }
1925
1926         for (i = 0; i < hdev->num_msi; i++)
1927                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1928
1929         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1930                                         sizeof(int), GFP_KERNEL);
1931         if (!hdev->vector_irq) {
1932                 pci_free_irq_vectors(pdev);
1933                 return -ENOMEM;
1934         }
1935
1936         return 0;
1937 }
1938
1939 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1940 {
1941
1942         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1943                 duplex = HCLGE_MAC_FULL;
1944
1945         return duplex;
1946 }
1947
1948 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1949                                       u8 duplex)
1950 {
1951         struct hclge_config_mac_speed_dup_cmd *req;
1952         struct hclge_desc desc;
1953         int ret;
1954
1955         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1956
1957         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1958
1959         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1960
1961         switch (speed) {
1962         case HCLGE_MAC_SPEED_10M:
1963                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1964                                 HCLGE_CFG_SPEED_S, 6);
1965                 break;
1966         case HCLGE_MAC_SPEED_100M:
1967                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1968                                 HCLGE_CFG_SPEED_S, 7);
1969                 break;
1970         case HCLGE_MAC_SPEED_1G:
1971                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1972                                 HCLGE_CFG_SPEED_S, 0);
1973                 break;
1974         case HCLGE_MAC_SPEED_10G:
1975                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1976                                 HCLGE_CFG_SPEED_S, 1);
1977                 break;
1978         case HCLGE_MAC_SPEED_25G:
1979                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1980                                 HCLGE_CFG_SPEED_S, 2);
1981                 break;
1982         case HCLGE_MAC_SPEED_40G:
1983                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1984                                 HCLGE_CFG_SPEED_S, 3);
1985                 break;
1986         case HCLGE_MAC_SPEED_50G:
1987                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1988                                 HCLGE_CFG_SPEED_S, 4);
1989                 break;
1990         case HCLGE_MAC_SPEED_100G:
1991                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1992                                 HCLGE_CFG_SPEED_S, 5);
1993                 break;
1994         default:
1995                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1996                 return -EINVAL;
1997         }
1998
1999         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2000                       1);
2001
2002         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2003         if (ret) {
2004                 dev_err(&hdev->pdev->dev,
2005                         "mac speed/duplex config cmd failed %d.\n", ret);
2006                 return ret;
2007         }
2008
2009         return 0;
2010 }
2011
2012 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2013 {
2014         int ret;
2015
2016         duplex = hclge_check_speed_dup(duplex, speed);
2017         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2018                 return 0;
2019
2020         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2021         if (ret)
2022                 return ret;
2023
2024         hdev->hw.mac.speed = speed;
2025         hdev->hw.mac.duplex = duplex;
2026
2027         return 0;
2028 }
2029
2030 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2031                                      u8 duplex)
2032 {
2033         struct hclge_vport *vport = hclge_get_vport(handle);
2034         struct hclge_dev *hdev = vport->back;
2035
2036         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2037 }
2038
2039 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2040 {
2041         struct hclge_config_auto_neg_cmd *req;
2042         struct hclge_desc desc;
2043         u32 flag = 0;
2044         int ret;
2045
2046         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2047
2048         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2049         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2050         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2051
2052         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2053         if (ret)
2054                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2055                         ret);
2056
2057         return ret;
2058 }
2059
2060 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2061 {
2062         struct hclge_vport *vport = hclge_get_vport(handle);
2063         struct hclge_dev *hdev = vport->back;
2064
2065         return hclge_set_autoneg_en(hdev, enable);
2066 }
2067
2068 static int hclge_get_autoneg(struct hnae3_handle *handle)
2069 {
2070         struct hclge_vport *vport = hclge_get_vport(handle);
2071         struct hclge_dev *hdev = vport->back;
2072         struct phy_device *phydev = hdev->hw.mac.phydev;
2073
2074         if (phydev)
2075                 return phydev->autoneg;
2076
2077         return hdev->hw.mac.autoneg;
2078 }
2079
2080 static int hclge_mac_init(struct hclge_dev *hdev)
2081 {
2082         struct hclge_mac *mac = &hdev->hw.mac;
2083         int ret;
2084
2085         hdev->support_sfp_query = true;
2086         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2087         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2088                                          hdev->hw.mac.duplex);
2089         if (ret) {
2090                 dev_err(&hdev->pdev->dev,
2091                         "Config mac speed dup fail ret=%d\n", ret);
2092                 return ret;
2093         }
2094
2095         mac->link = 0;
2096
2097         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2098         if (ret) {
2099                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2100                 return ret;
2101         }
2102
2103         ret = hclge_buffer_alloc(hdev);
2104         if (ret)
2105                 dev_err(&hdev->pdev->dev,
2106                         "allocate buffer fail, ret=%d\n", ret);
2107
2108         return ret;
2109 }
2110
2111 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2112 {
2113         if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2114                 schedule_work(&hdev->mbx_service_task);
2115 }
2116
2117 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2118 {
2119         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2120                 schedule_work(&hdev->rst_service_task);
2121 }
2122
2123 static void hclge_task_schedule(struct hclge_dev *hdev)
2124 {
2125         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2126             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2127             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2128                 (void)schedule_work(&hdev->service_task);
2129 }
2130
2131 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2132 {
2133         struct hclge_link_status_cmd *req;
2134         struct hclge_desc desc;
2135         int link_status;
2136         int ret;
2137
2138         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2139         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2140         if (ret) {
2141                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2142                         ret);
2143                 return ret;
2144         }
2145
2146         req = (struct hclge_link_status_cmd *)desc.data;
2147         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2148
2149         return !!link_status;
2150 }
2151
2152 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2153 {
2154         int mac_state;
2155         int link_stat;
2156
2157         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2158                 return 0;
2159
2160         mac_state = hclge_get_mac_link_status(hdev);
2161
2162         if (hdev->hw.mac.phydev) {
2163                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2164                         link_stat = mac_state &
2165                                 hdev->hw.mac.phydev->link;
2166                 else
2167                         link_stat = 0;
2168
2169         } else {
2170                 link_stat = mac_state;
2171         }
2172
2173         return !!link_stat;
2174 }
2175
2176 static void hclge_update_link_status(struct hclge_dev *hdev)
2177 {
2178         struct hnae3_client *rclient = hdev->roce_client;
2179         struct hnae3_client *client = hdev->nic_client;
2180         struct hnae3_handle *rhandle;
2181         struct hnae3_handle *handle;
2182         int state;
2183         int i;
2184
2185         if (!client)
2186                 return;
2187         state = hclge_get_mac_phy_link(hdev);
2188         if (state != hdev->hw.mac.link) {
2189                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2190                         handle = &hdev->vport[i].nic;
2191                         client->ops->link_status_change(handle, state);
2192                         rhandle = &hdev->vport[i].roce;
2193                         if (rclient && rclient->ops->link_status_change)
2194                                 rclient->ops->link_status_change(rhandle,
2195                                                                  state);
2196                 }
2197                 hdev->hw.mac.link = state;
2198         }
2199 }
2200
2201 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2202 {
2203         struct hclge_sfp_speed_cmd *resp = NULL;
2204         struct hclge_desc desc;
2205         int ret;
2206
2207         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2208         resp = (struct hclge_sfp_speed_cmd *)desc.data;
2209         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2210         if (ret == -EOPNOTSUPP) {
2211                 dev_warn(&hdev->pdev->dev,
2212                          "IMP do not support get SFP speed %d\n", ret);
2213                 return ret;
2214         } else if (ret) {
2215                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2216                 return ret;
2217         }
2218
2219         *speed = resp->sfp_speed;
2220
2221         return 0;
2222 }
2223
2224 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2225 {
2226         struct hclge_mac mac = hdev->hw.mac;
2227         int speed;
2228         int ret;
2229
2230         /* get the speed from SFP cmd when phy
2231          * doesn't exit.
2232          */
2233         if (mac.phydev)
2234                 return 0;
2235
2236         /* if IMP does not support get SFP/qSFP speed, return directly */
2237         if (!hdev->support_sfp_query)
2238                 return 0;
2239
2240         ret = hclge_get_sfp_speed(hdev, &speed);
2241         if (ret == -EOPNOTSUPP) {
2242                 hdev->support_sfp_query = false;
2243                 return ret;
2244         } else if (ret) {
2245                 return ret;
2246         }
2247
2248         if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2249                 return 0; /* do nothing if no SFP */
2250
2251         /* must config full duplex for SFP */
2252         return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2253 }
2254
2255 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2256 {
2257         struct hclge_vport *vport = hclge_get_vport(handle);
2258         struct hclge_dev *hdev = vport->back;
2259
2260         return hclge_update_speed_duplex(hdev);
2261 }
2262
2263 static int hclge_get_status(struct hnae3_handle *handle)
2264 {
2265         struct hclge_vport *vport = hclge_get_vport(handle);
2266         struct hclge_dev *hdev = vport->back;
2267
2268         hclge_update_link_status(hdev);
2269
2270         return hdev->hw.mac.link;
2271 }
2272
2273 static void hclge_service_timer(struct timer_list *t)
2274 {
2275         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2276
2277         mod_timer(&hdev->service_timer, jiffies + HZ);
2278         hdev->hw_stats.stats_timer++;
2279         hclge_task_schedule(hdev);
2280 }
2281
2282 static void hclge_service_complete(struct hclge_dev *hdev)
2283 {
2284         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2285
2286         /* Flush memory before next watchdog */
2287         smp_mb__before_atomic();
2288         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2289 }
2290
2291 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2292 {
2293         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2294
2295         /* fetch the events from their corresponding regs */
2296         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2297         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2298         msix_src_reg = hclge_read_dev(&hdev->hw,
2299                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2300
2301         /* Assumption: If by any chance reset and mailbox events are reported
2302          * together then we will only process reset event in this go and will
2303          * defer the processing of the mailbox events. Since, we would have not
2304          * cleared RX CMDQ event this time we would receive again another
2305          * interrupt from H/W just for the mailbox.
2306          */
2307
2308         /* check for vector0 reset event sources */
2309         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2310                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2311                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2312                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2313                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2314                 return HCLGE_VECTOR0_EVENT_RST;
2315         }
2316
2317         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2318                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2319                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2320                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2321                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2322                 return HCLGE_VECTOR0_EVENT_RST;
2323         }
2324
2325         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2326                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2327                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2328                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2329                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2330                 return HCLGE_VECTOR0_EVENT_RST;
2331         }
2332
2333         /* check for vector0 msix event source */
2334         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2335                 return HCLGE_VECTOR0_EVENT_ERR;
2336
2337         /* check for vector0 mailbox(=CMDQ RX) event source */
2338         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2339                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2340                 *clearval = cmdq_src_reg;
2341                 return HCLGE_VECTOR0_EVENT_MBX;
2342         }
2343
2344         return HCLGE_VECTOR0_EVENT_OTHER;
2345 }
2346
2347 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2348                                     u32 regclr)
2349 {
2350         switch (event_type) {
2351         case HCLGE_VECTOR0_EVENT_RST:
2352                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2353                 break;
2354         case HCLGE_VECTOR0_EVENT_MBX:
2355                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2356                 break;
2357         default:
2358                 break;
2359         }
2360 }
2361
2362 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2363 {
2364         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2365                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2366                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2367                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2368         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2369 }
2370
2371 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2372 {
2373         writel(enable ? 1 : 0, vector->addr);
2374 }
2375
2376 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2377 {
2378         struct hclge_dev *hdev = data;
2379         u32 event_cause;
2380         u32 clearval;
2381
2382         hclge_enable_vector(&hdev->misc_vector, false);
2383         event_cause = hclge_check_event_cause(hdev, &clearval);
2384
2385         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2386         switch (event_cause) {
2387         case HCLGE_VECTOR0_EVENT_ERR:
2388                 /* we do not know what type of reset is required now. This could
2389                  * only be decided after we fetch the type of errors which
2390                  * caused this event. Therefore, we will do below for now:
2391                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2392                  *    have defered type of reset to be used.
2393                  * 2. Schedule the reset serivce task.
2394                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2395                  *    will fetch the correct type of reset.  This would be done
2396                  *    by first decoding the types of errors.
2397                  */
2398                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2399                 /* fall through */
2400         case HCLGE_VECTOR0_EVENT_RST:
2401                 hclge_reset_task_schedule(hdev);
2402                 break;
2403         case HCLGE_VECTOR0_EVENT_MBX:
2404                 /* If we are here then,
2405                  * 1. Either we are not handling any mbx task and we are not
2406                  *    scheduled as well
2407                  *                        OR
2408                  * 2. We could be handling a mbx task but nothing more is
2409                  *    scheduled.
2410                  * In both cases, we should schedule mbx task as there are more
2411                  * mbx messages reported by this interrupt.
2412                  */
2413                 hclge_mbx_task_schedule(hdev);
2414                 break;
2415         default:
2416                 dev_warn(&hdev->pdev->dev,
2417                          "received unknown or unhandled event of vector0\n");
2418                 break;
2419         }
2420
2421         /* clear the source of interrupt if it is not cause by reset */
2422         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2423                 hclge_clear_event_cause(hdev, event_cause, clearval);
2424                 hclge_enable_vector(&hdev->misc_vector, true);
2425         }
2426
2427         return IRQ_HANDLED;
2428 }
2429
2430 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2431 {
2432         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2433                 dev_warn(&hdev->pdev->dev,
2434                          "vector(vector_id %d) has been freed.\n", vector_id);
2435                 return;
2436         }
2437
2438         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2439         hdev->num_msi_left += 1;
2440         hdev->num_msi_used -= 1;
2441 }
2442
2443 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2444 {
2445         struct hclge_misc_vector *vector = &hdev->misc_vector;
2446
2447         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2448
2449         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2450         hdev->vector_status[0] = 0;
2451
2452         hdev->num_msi_left -= 1;
2453         hdev->num_msi_used += 1;
2454 }
2455
2456 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2457 {
2458         int ret;
2459
2460         hclge_get_misc_vector(hdev);
2461
2462         /* this would be explicitly freed in the end */
2463         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2464                           0, "hclge_misc", hdev);
2465         if (ret) {
2466                 hclge_free_vector(hdev, 0);
2467                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2468                         hdev->misc_vector.vector_irq);
2469         }
2470
2471         return ret;
2472 }
2473
2474 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2475 {
2476         free_irq(hdev->misc_vector.vector_irq, hdev);
2477         hclge_free_vector(hdev, 0);
2478 }
2479
2480 int hclge_notify_client(struct hclge_dev *hdev,
2481                         enum hnae3_reset_notify_type type)
2482 {
2483         struct hnae3_client *client = hdev->nic_client;
2484         u16 i;
2485
2486         if (!client->ops->reset_notify)
2487                 return -EOPNOTSUPP;
2488
2489         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2490                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2491                 int ret;
2492
2493                 ret = client->ops->reset_notify(handle, type);
2494                 if (ret) {
2495                         dev_err(&hdev->pdev->dev,
2496                                 "notify nic client failed %d(%d)\n", type, ret);
2497                         return ret;
2498                 }
2499         }
2500
2501         return 0;
2502 }
2503
2504 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2505                                     enum hnae3_reset_notify_type type)
2506 {
2507         struct hnae3_client *client = hdev->roce_client;
2508         int ret = 0;
2509         u16 i;
2510
2511         if (!client)
2512                 return 0;
2513
2514         if (!client->ops->reset_notify)
2515                 return -EOPNOTSUPP;
2516
2517         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2518                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2519
2520                 ret = client->ops->reset_notify(handle, type);
2521                 if (ret) {
2522                         dev_err(&hdev->pdev->dev,
2523                                 "notify roce client failed %d(%d)",
2524                                 type, ret);
2525                         return ret;
2526                 }
2527         }
2528
2529         return ret;
2530 }
2531
2532 static int hclge_reset_wait(struct hclge_dev *hdev)
2533 {
2534 #define HCLGE_RESET_WATI_MS     100
2535 #define HCLGE_RESET_WAIT_CNT    200
2536         u32 val, reg, reg_bit;
2537         u32 cnt = 0;
2538
2539         switch (hdev->reset_type) {
2540         case HNAE3_IMP_RESET:
2541                 reg = HCLGE_GLOBAL_RESET_REG;
2542                 reg_bit = HCLGE_IMP_RESET_BIT;
2543                 break;
2544         case HNAE3_GLOBAL_RESET:
2545                 reg = HCLGE_GLOBAL_RESET_REG;
2546                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2547                 break;
2548         case HNAE3_CORE_RESET:
2549                 reg = HCLGE_GLOBAL_RESET_REG;
2550                 reg_bit = HCLGE_CORE_RESET_BIT;
2551                 break;
2552         case HNAE3_FUNC_RESET:
2553                 reg = HCLGE_FUN_RST_ING;
2554                 reg_bit = HCLGE_FUN_RST_ING_B;
2555                 break;
2556         case HNAE3_FLR_RESET:
2557                 break;
2558         default:
2559                 dev_err(&hdev->pdev->dev,
2560                         "Wait for unsupported reset type: %d\n",
2561                         hdev->reset_type);
2562                 return -EINVAL;
2563         }
2564
2565         if (hdev->reset_type == HNAE3_FLR_RESET) {
2566                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2567                        cnt++ < HCLGE_RESET_WAIT_CNT)
2568                         msleep(HCLGE_RESET_WATI_MS);
2569
2570                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2571                         dev_err(&hdev->pdev->dev,
2572                                 "flr wait timeout: %d\n", cnt);
2573                         return -EBUSY;
2574                 }
2575
2576                 return 0;
2577         }
2578
2579         val = hclge_read_dev(&hdev->hw, reg);
2580         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2581                 msleep(HCLGE_RESET_WATI_MS);
2582                 val = hclge_read_dev(&hdev->hw, reg);
2583                 cnt++;
2584         }
2585
2586         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2587                 dev_warn(&hdev->pdev->dev,
2588                          "Wait for reset timeout: %d\n", hdev->reset_type);
2589                 return -EBUSY;
2590         }
2591
2592         return 0;
2593 }
2594
2595 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2596 {
2597         struct hclge_vf_rst_cmd *req;
2598         struct hclge_desc desc;
2599
2600         req = (struct hclge_vf_rst_cmd *)desc.data;
2601         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2602         req->dest_vfid = func_id;
2603
2604         if (reset)
2605                 req->vf_rst = 0x1;
2606
2607         return hclge_cmd_send(&hdev->hw, &desc, 1);
2608 }
2609
2610 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2611 {
2612         int i;
2613
2614         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2615                 struct hclge_vport *vport = &hdev->vport[i];
2616                 int ret;
2617
2618                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2619                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2620                 if (ret) {
2621                         dev_err(&hdev->pdev->dev,
2622                                 "set vf(%d) rst failed %d!\n",
2623                                 vport->vport_id, ret);
2624                         return ret;
2625                 }
2626
2627                 if (!reset)
2628                         continue;
2629
2630                 /* Inform VF to process the reset.
2631                  * hclge_inform_reset_assert_to_vf may fail if VF
2632                  * driver is not loaded.
2633                  */
2634                 ret = hclge_inform_reset_assert_to_vf(vport);
2635                 if (ret)
2636                         dev_warn(&hdev->pdev->dev,
2637                                  "inform reset to vf(%d) failed %d!\n",
2638                                  vport->vport_id, ret);
2639         }
2640
2641         return 0;
2642 }
2643
2644 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2645 {
2646         struct hclge_desc desc;
2647         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2648         int ret;
2649
2650         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2651         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2652         req->fun_reset_vfid = func_id;
2653
2654         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2655         if (ret)
2656                 dev_err(&hdev->pdev->dev,
2657                         "send function reset cmd fail, status =%d\n", ret);
2658
2659         return ret;
2660 }
2661
2662 static void hclge_do_reset(struct hclge_dev *hdev)
2663 {
2664         struct pci_dev *pdev = hdev->pdev;
2665         u32 val;
2666
2667         switch (hdev->reset_type) {
2668         case HNAE3_GLOBAL_RESET:
2669                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2670                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2671                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2672                 dev_info(&pdev->dev, "Global Reset requested\n");
2673                 break;
2674         case HNAE3_CORE_RESET:
2675                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2676                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2677                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2678                 dev_info(&pdev->dev, "Core Reset requested\n");
2679                 break;
2680         case HNAE3_FUNC_RESET:
2681                 dev_info(&pdev->dev, "PF Reset requested\n");
2682                 /* schedule again to check later */
2683                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2684                 hclge_reset_task_schedule(hdev);
2685                 break;
2686         case HNAE3_FLR_RESET:
2687                 dev_info(&pdev->dev, "FLR requested\n");
2688                 /* schedule again to check later */
2689                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2690                 hclge_reset_task_schedule(hdev);
2691                 break;
2692         default:
2693                 dev_warn(&pdev->dev,
2694                          "Unsupported reset type: %d\n", hdev->reset_type);
2695                 break;
2696         }
2697 }
2698
2699 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2700                                                    unsigned long *addr)
2701 {
2702         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2703
2704         /* first, resolve any unknown reset type to the known type(s) */
2705         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2706                 /* we will intentionally ignore any errors from this function
2707                  *  as we will end up in *some* reset request in any case
2708                  */
2709                 hclge_handle_hw_msix_error(hdev, addr);
2710                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2711                 /* We defered the clearing of the error event which caused
2712                  * interrupt since it was not posssible to do that in
2713                  * interrupt context (and this is the reason we introduced
2714                  * new UNKNOWN reset type). Now, the errors have been
2715                  * handled and cleared in hardware we can safely enable
2716                  * interrupts. This is an exception to the norm.
2717                  */
2718                 hclge_enable_vector(&hdev->misc_vector, true);
2719         }
2720
2721         /* return the highest priority reset level amongst all */
2722         if (test_bit(HNAE3_IMP_RESET, addr)) {
2723                 rst_level = HNAE3_IMP_RESET;
2724                 clear_bit(HNAE3_IMP_RESET, addr);
2725                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2726                 clear_bit(HNAE3_CORE_RESET, addr);
2727                 clear_bit(HNAE3_FUNC_RESET, addr);
2728         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2729                 rst_level = HNAE3_GLOBAL_RESET;
2730                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2731                 clear_bit(HNAE3_CORE_RESET, addr);
2732                 clear_bit(HNAE3_FUNC_RESET, addr);
2733         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2734                 rst_level = HNAE3_CORE_RESET;
2735                 clear_bit(HNAE3_CORE_RESET, addr);
2736                 clear_bit(HNAE3_FUNC_RESET, addr);
2737         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2738                 rst_level = HNAE3_FUNC_RESET;
2739                 clear_bit(HNAE3_FUNC_RESET, addr);
2740         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2741                 rst_level = HNAE3_FLR_RESET;
2742                 clear_bit(HNAE3_FLR_RESET, addr);
2743         }
2744
2745         return rst_level;
2746 }
2747
2748 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2749 {
2750         u32 clearval = 0;
2751
2752         switch (hdev->reset_type) {
2753         case HNAE3_IMP_RESET:
2754                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2755                 break;
2756         case HNAE3_GLOBAL_RESET:
2757                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2758                 break;
2759         case HNAE3_CORE_RESET:
2760                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2761                 break;
2762         default:
2763                 break;
2764         }
2765
2766         if (!clearval)
2767                 return;
2768
2769         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2770         hclge_enable_vector(&hdev->misc_vector, true);
2771 }
2772
2773 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2774 {
2775         int ret = 0;
2776
2777         switch (hdev->reset_type) {
2778         case HNAE3_FUNC_RESET:
2779                 /* fall through */
2780         case HNAE3_FLR_RESET:
2781                 ret = hclge_set_all_vf_rst(hdev, true);
2782                 break;
2783         default:
2784                 break;
2785         }
2786
2787         return ret;
2788 }
2789
2790 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2791 {
2792         u32 reg_val;
2793         int ret = 0;
2794
2795         switch (hdev->reset_type) {
2796         case HNAE3_FUNC_RESET:
2797                 /* There is no mechanism for PF to know if VF has stopped IO
2798                  * for now, just wait 100 ms for VF to stop IO
2799                  */
2800                 msleep(100);
2801                 ret = hclge_func_reset_cmd(hdev, 0);
2802                 if (ret) {
2803                         dev_err(&hdev->pdev->dev,
2804                                 "asserting function reset fail %d!\n", ret);
2805                         return ret;
2806                 }
2807
2808                 /* After performaning pf reset, it is not necessary to do the
2809                  * mailbox handling or send any command to firmware, because
2810                  * any mailbox handling or command to firmware is only valid
2811                  * after hclge_cmd_init is called.
2812                  */
2813                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2814                 break;
2815         case HNAE3_FLR_RESET:
2816                 /* There is no mechanism for PF to know if VF has stopped IO
2817                  * for now, just wait 100 ms for VF to stop IO
2818                  */
2819                 msleep(100);
2820                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2821                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2822                 break;
2823         case HNAE3_IMP_RESET:
2824                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2825                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2826                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2827                 break;
2828         default:
2829                 break;
2830         }
2831
2832         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2833
2834         return ret;
2835 }
2836
2837 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2838 {
2839 #define MAX_RESET_FAIL_CNT 5
2840 #define RESET_UPGRADE_DELAY_SEC 10
2841
2842         if (hdev->reset_pending) {
2843                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2844                          hdev->reset_pending);
2845                 return true;
2846         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2847                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2848                     BIT(HCLGE_IMP_RESET_BIT))) {
2849                 dev_info(&hdev->pdev->dev,
2850                          "reset failed because IMP Reset is pending\n");
2851                 hclge_clear_reset_cause(hdev);
2852                 return false;
2853         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2854                 hdev->reset_fail_cnt++;
2855                 if (is_timeout) {
2856                         set_bit(hdev->reset_type, &hdev->reset_pending);
2857                         dev_info(&hdev->pdev->dev,
2858                                  "re-schedule to wait for hw reset done\n");
2859                         return true;
2860                 }
2861
2862                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2863                 hclge_clear_reset_cause(hdev);
2864                 mod_timer(&hdev->reset_timer,
2865                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2866
2867                 return false;
2868         }
2869
2870         hclge_clear_reset_cause(hdev);
2871         dev_err(&hdev->pdev->dev, "Reset fail!\n");
2872         return false;
2873 }
2874
2875 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2876 {
2877         int ret = 0;
2878
2879         switch (hdev->reset_type) {
2880         case HNAE3_FUNC_RESET:
2881                 /* fall through */
2882         case HNAE3_FLR_RESET:
2883                 ret = hclge_set_all_vf_rst(hdev, false);
2884                 break;
2885         default:
2886                 break;
2887         }
2888
2889         return ret;
2890 }
2891
2892 static void hclge_reset(struct hclge_dev *hdev)
2893 {
2894         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2895         bool is_timeout = false;
2896         int ret;
2897
2898         /* Initialize ae_dev reset status as well, in case enet layer wants to
2899          * know if device is undergoing reset
2900          */
2901         ae_dev->reset_type = hdev->reset_type;
2902         hdev->reset_count++;
2903         /* perform reset of the stack & ae device for a client */
2904         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2905         if (ret)
2906                 goto err_reset;
2907
2908         ret = hclge_reset_prepare_down(hdev);
2909         if (ret)
2910                 goto err_reset;
2911
2912         rtnl_lock();
2913         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2914         if (ret)
2915                 goto err_reset_lock;
2916
2917         rtnl_unlock();
2918
2919         ret = hclge_reset_prepare_wait(hdev);
2920         if (ret)
2921                 goto err_reset;
2922
2923         if (hclge_reset_wait(hdev)) {
2924                 is_timeout = true;
2925                 goto err_reset;
2926         }
2927
2928         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2929         if (ret)
2930                 goto err_reset;
2931
2932         rtnl_lock();
2933         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2934         if (ret)
2935                 goto err_reset_lock;
2936
2937         ret = hclge_reset_ae_dev(hdev->ae_dev);
2938         if (ret)
2939                 goto err_reset_lock;
2940
2941         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2942         if (ret)
2943                 goto err_reset_lock;
2944
2945         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
2946         if (ret)
2947                 goto err_reset_lock;
2948
2949         hclge_clear_reset_cause(hdev);
2950
2951         ret = hclge_reset_prepare_up(hdev);
2952         if (ret)
2953                 goto err_reset_lock;
2954
2955         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2956         if (ret)
2957                 goto err_reset_lock;
2958
2959         rtnl_unlock();
2960
2961         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2962         if (ret)
2963                 goto err_reset;
2964
2965         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2966         if (ret)
2967                 goto err_reset;
2968
2969         hdev->last_reset_time = jiffies;
2970         hdev->reset_fail_cnt = 0;
2971         ae_dev->reset_type = HNAE3_NONE_RESET;
2972
2973         return;
2974
2975 err_reset_lock:
2976         rtnl_unlock();
2977 err_reset:
2978         if (hclge_reset_err_handle(hdev, is_timeout))
2979                 hclge_reset_task_schedule(hdev);
2980 }
2981
2982 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2983 {
2984         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2985         struct hclge_dev *hdev = ae_dev->priv;
2986
2987         /* We might end up getting called broadly because of 2 below cases:
2988          * 1. Recoverable error was conveyed through APEI and only way to bring
2989          *    normalcy is to reset.
2990          * 2. A new reset request from the stack due to timeout
2991          *
2992          * For the first case,error event might not have ae handle available.
2993          * check if this is a new reset request and we are not here just because
2994          * last reset attempt did not succeed and watchdog hit us again. We will
2995          * know this if last reset request did not occur very recently (watchdog
2996          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2997          * In case of new request we reset the "reset level" to PF reset.
2998          * And if it is a repeat reset request of the most recent one then we
2999          * want to make sure we throttle the reset request. Therefore, we will
3000          * not allow it again before 3*HZ times.
3001          */
3002         if (!handle)
3003                 handle = &hdev->vport[0].nic;
3004
3005         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3006                 return;
3007         else if (hdev->default_reset_request)
3008                 hdev->reset_level =
3009                         hclge_get_reset_level(hdev,
3010                                               &hdev->default_reset_request);
3011         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3012                 hdev->reset_level = HNAE3_FUNC_RESET;
3013
3014         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3015                  hdev->reset_level);
3016
3017         /* request reset & schedule reset task */
3018         set_bit(hdev->reset_level, &hdev->reset_request);
3019         hclge_reset_task_schedule(hdev);
3020
3021         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3022                 hdev->reset_level++;
3023 }
3024
3025 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3026                                         enum hnae3_reset_type rst_type)
3027 {
3028         struct hclge_dev *hdev = ae_dev->priv;
3029
3030         set_bit(rst_type, &hdev->default_reset_request);
3031 }
3032
3033 static void hclge_reset_timer(struct timer_list *t)
3034 {
3035         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3036
3037         dev_info(&hdev->pdev->dev,
3038                  "triggering global reset in reset timer\n");
3039         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3040         hclge_reset_event(hdev->pdev, NULL);
3041 }
3042
3043 static void hclge_reset_subtask(struct hclge_dev *hdev)
3044 {
3045         /* check if there is any ongoing reset in the hardware. This status can
3046          * be checked from reset_pending. If there is then, we need to wait for
3047          * hardware to complete reset.
3048          *    a. If we are able to figure out in reasonable time that hardware
3049          *       has fully resetted then, we can proceed with driver, client
3050          *       reset.
3051          *    b. else, we can come back later to check this status so re-sched
3052          *       now.
3053          */
3054         hdev->last_reset_time = jiffies;
3055         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3056         if (hdev->reset_type != HNAE3_NONE_RESET)
3057                 hclge_reset(hdev);
3058
3059         /* check if we got any *new* reset requests to be honored */
3060         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3061         if (hdev->reset_type != HNAE3_NONE_RESET)
3062                 hclge_do_reset(hdev);
3063
3064         hdev->reset_type = HNAE3_NONE_RESET;
3065 }
3066
3067 static void hclge_reset_service_task(struct work_struct *work)
3068 {
3069         struct hclge_dev *hdev =
3070                 container_of(work, struct hclge_dev, rst_service_task);
3071
3072         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3073                 return;
3074
3075         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3076
3077         hclge_reset_subtask(hdev);
3078
3079         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3080 }
3081
3082 static void hclge_mailbox_service_task(struct work_struct *work)
3083 {
3084         struct hclge_dev *hdev =
3085                 container_of(work, struct hclge_dev, mbx_service_task);
3086
3087         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3088                 return;
3089
3090         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3091
3092         hclge_mbx_handler(hdev);
3093
3094         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3095 }
3096
3097 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3098 {
3099         int i;
3100
3101         /* start from vport 1 for PF is always alive */
3102         for (i = 1; i < hdev->num_alloc_vport; i++) {
3103                 struct hclge_vport *vport = &hdev->vport[i];
3104
3105                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3106                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3107
3108                 /* If vf is not alive, set to default value */
3109                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3110                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3111         }
3112 }
3113
3114 static void hclge_service_task(struct work_struct *work)
3115 {
3116         struct hclge_dev *hdev =
3117                 container_of(work, struct hclge_dev, service_task);
3118
3119         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3120                 hclge_update_stats_for_all(hdev);
3121                 hdev->hw_stats.stats_timer = 0;
3122         }
3123
3124         hclge_update_speed_duplex(hdev);
3125         hclge_update_link_status(hdev);
3126         hclge_update_vport_alive(hdev);
3127         hclge_service_complete(hdev);
3128 }
3129
3130 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3131 {
3132         /* VF handle has no client */
3133         if (!handle->client)
3134                 return container_of(handle, struct hclge_vport, nic);
3135         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3136                 return container_of(handle, struct hclge_vport, roce);
3137         else
3138                 return container_of(handle, struct hclge_vport, nic);
3139 }
3140
3141 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3142                             struct hnae3_vector_info *vector_info)
3143 {
3144         struct hclge_vport *vport = hclge_get_vport(handle);
3145         struct hnae3_vector_info *vector = vector_info;
3146         struct hclge_dev *hdev = vport->back;
3147         int alloc = 0;
3148         int i, j;
3149
3150         vector_num = min(hdev->num_msi_left, vector_num);
3151
3152         for (j = 0; j < vector_num; j++) {
3153                 for (i = 1; i < hdev->num_msi; i++) {
3154                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3155                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3156                                 vector->io_addr = hdev->hw.io_base +
3157                                         HCLGE_VECTOR_REG_BASE +
3158                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3159                                         vport->vport_id *
3160                                         HCLGE_VECTOR_VF_OFFSET;
3161                                 hdev->vector_status[i] = vport->vport_id;
3162                                 hdev->vector_irq[i] = vector->vector;
3163
3164                                 vector++;
3165                                 alloc++;
3166
3167                                 break;
3168                         }
3169                 }
3170         }
3171         hdev->num_msi_left -= alloc;
3172         hdev->num_msi_used += alloc;
3173
3174         return alloc;
3175 }
3176
3177 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3178 {
3179         int i;
3180
3181         for (i = 0; i < hdev->num_msi; i++)
3182                 if (vector == hdev->vector_irq[i])
3183                         return i;
3184
3185         return -EINVAL;
3186 }
3187
3188 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3189 {
3190         struct hclge_vport *vport = hclge_get_vport(handle);
3191         struct hclge_dev *hdev = vport->back;
3192         int vector_id;
3193
3194         vector_id = hclge_get_vector_index(hdev, vector);
3195         if (vector_id < 0) {
3196                 dev_err(&hdev->pdev->dev,
3197                         "Get vector index fail. vector_id =%d\n", vector_id);
3198                 return vector_id;
3199         }
3200
3201         hclge_free_vector(hdev, vector_id);
3202
3203         return 0;
3204 }
3205
3206 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3207 {
3208         return HCLGE_RSS_KEY_SIZE;
3209 }
3210
3211 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3212 {
3213         return HCLGE_RSS_IND_TBL_SIZE;
3214 }
3215
3216 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3217                                   const u8 hfunc, const u8 *key)
3218 {
3219         struct hclge_rss_config_cmd *req;
3220         struct hclge_desc desc;
3221         int key_offset;
3222         int key_size;
3223         int ret;
3224
3225         req = (struct hclge_rss_config_cmd *)desc.data;
3226
3227         for (key_offset = 0; key_offset < 3; key_offset++) {
3228                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3229                                            false);
3230
3231                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3232                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3233
3234                 if (key_offset == 2)
3235                         key_size =
3236                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3237                 else
3238                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3239
3240                 memcpy(req->hash_key,
3241                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3242
3243                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3244                 if (ret) {
3245                         dev_err(&hdev->pdev->dev,
3246                                 "Configure RSS config fail, status = %d\n",
3247                                 ret);
3248                         return ret;
3249                 }
3250         }
3251         return 0;
3252 }
3253
3254 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3255 {
3256         struct hclge_rss_indirection_table_cmd *req;
3257         struct hclge_desc desc;
3258         int i, j;
3259         int ret;
3260
3261         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3262
3263         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3264                 hclge_cmd_setup_basic_desc
3265                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3266
3267                 req->start_table_index =
3268                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3269                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3270
3271                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3272                         req->rss_result[j] =
3273                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3274
3275                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3276                 if (ret) {
3277                         dev_err(&hdev->pdev->dev,
3278                                 "Configure rss indir table fail,status = %d\n",
3279                                 ret);
3280                         return ret;
3281                 }
3282         }
3283         return 0;
3284 }
3285
3286 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3287                                  u16 *tc_size, u16 *tc_offset)
3288 {
3289         struct hclge_rss_tc_mode_cmd *req;
3290         struct hclge_desc desc;
3291         int ret;
3292         int i;
3293
3294         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3295         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3296
3297         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3298                 u16 mode = 0;
3299
3300                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3301                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3302                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3303                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3304                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3305
3306                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3307         }
3308
3309         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3310         if (ret)
3311                 dev_err(&hdev->pdev->dev,
3312                         "Configure rss tc mode fail, status = %d\n", ret);
3313
3314         return ret;
3315 }
3316
3317 static void hclge_get_rss_type(struct hclge_vport *vport)
3318 {
3319         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3320             vport->rss_tuple_sets.ipv4_udp_en ||
3321             vport->rss_tuple_sets.ipv4_sctp_en ||
3322             vport->rss_tuple_sets.ipv6_tcp_en ||
3323             vport->rss_tuple_sets.ipv6_udp_en ||
3324             vport->rss_tuple_sets.ipv6_sctp_en)
3325                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3326         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3327                  vport->rss_tuple_sets.ipv6_fragment_en)
3328                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3329         else
3330                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3331 }
3332
3333 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3334 {
3335         struct hclge_rss_input_tuple_cmd *req;
3336         struct hclge_desc desc;
3337         int ret;
3338
3339         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3340
3341         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3342
3343         /* Get the tuple cfg from pf */
3344         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3345         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3346         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3347         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3348         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3349         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3350         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3351         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3352         hclge_get_rss_type(&hdev->vport[0]);
3353         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3354         if (ret)
3355                 dev_err(&hdev->pdev->dev,
3356                         "Configure rss input fail, status = %d\n", ret);
3357         return ret;
3358 }
3359
3360 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3361                          u8 *key, u8 *hfunc)
3362 {
3363         struct hclge_vport *vport = hclge_get_vport(handle);
3364         int i;
3365
3366         /* Get hash algorithm */
3367         if (hfunc) {
3368                 switch (vport->rss_algo) {
3369                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3370                         *hfunc = ETH_RSS_HASH_TOP;
3371                         break;
3372                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3373                         *hfunc = ETH_RSS_HASH_XOR;
3374                         break;
3375                 default:
3376                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3377                         break;
3378                 }
3379         }
3380
3381         /* Get the RSS Key required by the user */
3382         if (key)
3383                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3384
3385         /* Get indirect table */
3386         if (indir)
3387                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3388                         indir[i] =  vport->rss_indirection_tbl[i];
3389
3390         return 0;
3391 }
3392
3393 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3394                          const  u8 *key, const  u8 hfunc)
3395 {
3396         struct hclge_vport *vport = hclge_get_vport(handle);
3397         struct hclge_dev *hdev = vport->back;
3398         u8 hash_algo;
3399         int ret, i;
3400
3401         /* Set the RSS Hash Key if specififed by the user */
3402         if (key) {
3403                 switch (hfunc) {
3404                 case ETH_RSS_HASH_TOP:
3405                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3406                         break;
3407                 case ETH_RSS_HASH_XOR:
3408                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3409                         break;
3410                 case ETH_RSS_HASH_NO_CHANGE:
3411                         hash_algo = vport->rss_algo;
3412                         break;
3413                 default:
3414                         return -EINVAL;
3415                 }
3416
3417                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3418                 if (ret)
3419                         return ret;
3420
3421                 /* Update the shadow RSS key with user specified qids */
3422                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3423                 vport->rss_algo = hash_algo;
3424         }
3425
3426         /* Update the shadow RSS table with user specified qids */
3427         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3428                 vport->rss_indirection_tbl[i] = indir[i];
3429
3430         /* Update the hardware */
3431         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3432 }
3433
3434 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3435 {
3436         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3437
3438         if (nfc->data & RXH_L4_B_2_3)
3439                 hash_sets |= HCLGE_D_PORT_BIT;
3440         else
3441                 hash_sets &= ~HCLGE_D_PORT_BIT;
3442
3443         if (nfc->data & RXH_IP_SRC)
3444                 hash_sets |= HCLGE_S_IP_BIT;
3445         else
3446                 hash_sets &= ~HCLGE_S_IP_BIT;
3447
3448         if (nfc->data & RXH_IP_DST)
3449                 hash_sets |= HCLGE_D_IP_BIT;
3450         else
3451                 hash_sets &= ~HCLGE_D_IP_BIT;
3452
3453         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3454                 hash_sets |= HCLGE_V_TAG_BIT;
3455
3456         return hash_sets;
3457 }
3458
3459 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3460                                struct ethtool_rxnfc *nfc)
3461 {
3462         struct hclge_vport *vport = hclge_get_vport(handle);
3463         struct hclge_dev *hdev = vport->back;
3464         struct hclge_rss_input_tuple_cmd *req;
3465         struct hclge_desc desc;
3466         u8 tuple_sets;
3467         int ret;
3468
3469         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3470                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3471                 return -EINVAL;
3472
3473         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3474         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3475
3476         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3477         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3478         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3479         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3480         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3481         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3482         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3483         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3484
3485         tuple_sets = hclge_get_rss_hash_bits(nfc);
3486         switch (nfc->flow_type) {
3487         case TCP_V4_FLOW:
3488                 req->ipv4_tcp_en = tuple_sets;
3489                 break;
3490         case TCP_V6_FLOW:
3491                 req->ipv6_tcp_en = tuple_sets;
3492                 break;
3493         case UDP_V4_FLOW:
3494                 req->ipv4_udp_en = tuple_sets;
3495                 break;
3496         case UDP_V6_FLOW:
3497                 req->ipv6_udp_en = tuple_sets;
3498                 break;
3499         case SCTP_V4_FLOW:
3500                 req->ipv4_sctp_en = tuple_sets;
3501                 break;
3502         case SCTP_V6_FLOW:
3503                 if ((nfc->data & RXH_L4_B_0_1) ||
3504                     (nfc->data & RXH_L4_B_2_3))
3505                         return -EINVAL;
3506
3507                 req->ipv6_sctp_en = tuple_sets;
3508                 break;
3509         case IPV4_FLOW:
3510                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3511                 break;
3512         case IPV6_FLOW:
3513                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3514                 break;
3515         default:
3516                 return -EINVAL;
3517         }
3518
3519         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3520         if (ret) {
3521                 dev_err(&hdev->pdev->dev,
3522                         "Set rss tuple fail, status = %d\n", ret);
3523                 return ret;
3524         }
3525
3526         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3527         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3528         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3529         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3530         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3531         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3532         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3533         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3534         hclge_get_rss_type(vport);
3535         return 0;
3536 }
3537
3538 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3539                                struct ethtool_rxnfc *nfc)
3540 {
3541         struct hclge_vport *vport = hclge_get_vport(handle);
3542         u8 tuple_sets;
3543
3544         nfc->data = 0;
3545
3546         switch (nfc->flow_type) {
3547         case TCP_V4_FLOW:
3548                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3549                 break;
3550         case UDP_V4_FLOW:
3551                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3552                 break;
3553         case TCP_V6_FLOW:
3554                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3555                 break;
3556         case UDP_V6_FLOW:
3557                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3558                 break;
3559         case SCTP_V4_FLOW:
3560                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3561                 break;
3562         case SCTP_V6_FLOW:
3563                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3564                 break;
3565         case IPV4_FLOW:
3566         case IPV6_FLOW:
3567                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3568                 break;
3569         default:
3570                 return -EINVAL;
3571         }
3572
3573         if (!tuple_sets)
3574                 return 0;
3575
3576         if (tuple_sets & HCLGE_D_PORT_BIT)
3577                 nfc->data |= RXH_L4_B_2_3;
3578         if (tuple_sets & HCLGE_S_PORT_BIT)
3579                 nfc->data |= RXH_L4_B_0_1;
3580         if (tuple_sets & HCLGE_D_IP_BIT)
3581                 nfc->data |= RXH_IP_DST;
3582         if (tuple_sets & HCLGE_S_IP_BIT)
3583                 nfc->data |= RXH_IP_SRC;
3584
3585         return 0;
3586 }
3587
3588 static int hclge_get_tc_size(struct hnae3_handle *handle)
3589 {
3590         struct hclge_vport *vport = hclge_get_vport(handle);
3591         struct hclge_dev *hdev = vport->back;
3592
3593         return hdev->rss_size_max;
3594 }
3595
3596 int hclge_rss_init_hw(struct hclge_dev *hdev)
3597 {
3598         struct hclge_vport *vport = hdev->vport;
3599         u8 *rss_indir = vport[0].rss_indirection_tbl;
3600         u16 rss_size = vport[0].alloc_rss_size;
3601         u8 *key = vport[0].rss_hash_key;
3602         u8 hfunc = vport[0].rss_algo;
3603         u16 tc_offset[HCLGE_MAX_TC_NUM];
3604         u16 tc_valid[HCLGE_MAX_TC_NUM];
3605         u16 tc_size[HCLGE_MAX_TC_NUM];
3606         u16 roundup_size;
3607         int i, ret;
3608
3609         ret = hclge_set_rss_indir_table(hdev, rss_indir);
3610         if (ret)
3611                 return ret;
3612
3613         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3614         if (ret)
3615                 return ret;
3616
3617         ret = hclge_set_rss_input_tuple(hdev);
3618         if (ret)
3619                 return ret;
3620
3621         /* Each TC have the same queue size, and tc_size set to hardware is
3622          * the log2 of roundup power of two of rss_size, the acutal queue
3623          * size is limited by indirection table.
3624          */
3625         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3626                 dev_err(&hdev->pdev->dev,
3627                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3628                         rss_size);
3629                 return -EINVAL;
3630         }
3631
3632         roundup_size = roundup_pow_of_two(rss_size);
3633         roundup_size = ilog2(roundup_size);
3634
3635         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3636                 tc_valid[i] = 0;
3637
3638                 if (!(hdev->hw_tc_map & BIT(i)))
3639                         continue;
3640
3641                 tc_valid[i] = 1;
3642                 tc_size[i] = roundup_size;
3643                 tc_offset[i] = rss_size * i;
3644         }
3645
3646         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3647 }
3648
3649 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3650 {
3651         struct hclge_vport *vport = hdev->vport;
3652         int i, j;
3653
3654         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3655                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3656                         vport[j].rss_indirection_tbl[i] =
3657                                 i % vport[j].alloc_rss_size;
3658         }
3659 }
3660
3661 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3662 {
3663         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3664         struct hclge_vport *vport = hdev->vport;
3665
3666         if (hdev->pdev->revision >= 0x21)
3667                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3668
3669         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3670                 vport[i].rss_tuple_sets.ipv4_tcp_en =
3671                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3672                 vport[i].rss_tuple_sets.ipv4_udp_en =
3673                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3674                 vport[i].rss_tuple_sets.ipv4_sctp_en =
3675                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3676                 vport[i].rss_tuple_sets.ipv4_fragment_en =
3677                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3678                 vport[i].rss_tuple_sets.ipv6_tcp_en =
3679                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3680                 vport[i].rss_tuple_sets.ipv6_udp_en =
3681                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3682                 vport[i].rss_tuple_sets.ipv6_sctp_en =
3683                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3684                 vport[i].rss_tuple_sets.ipv6_fragment_en =
3685                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3686
3687                 vport[i].rss_algo = rss_algo;
3688
3689                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3690                        HCLGE_RSS_KEY_SIZE);
3691         }
3692
3693         hclge_rss_indir_init_cfg(hdev);
3694 }
3695
3696 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3697                                 int vector_id, bool en,
3698                                 struct hnae3_ring_chain_node *ring_chain)
3699 {
3700         struct hclge_dev *hdev = vport->back;
3701         struct hnae3_ring_chain_node *node;
3702         struct hclge_desc desc;
3703         struct hclge_ctrl_vector_chain_cmd *req
3704                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3705         enum hclge_cmd_status status;
3706         enum hclge_opcode_type op;
3707         u16 tqp_type_and_id;
3708         int i;
3709
3710         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3711         hclge_cmd_setup_basic_desc(&desc, op, false);
3712         req->int_vector_id = vector_id;
3713
3714         i = 0;
3715         for (node = ring_chain; node; node = node->next) {
3716                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3717                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3718                                 HCLGE_INT_TYPE_S,
3719                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3720                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3721                                 HCLGE_TQP_ID_S, node->tqp_index);
3722                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3723                                 HCLGE_INT_GL_IDX_S,
3724                                 hnae3_get_field(node->int_gl_idx,
3725                                                 HNAE3_RING_GL_IDX_M,
3726                                                 HNAE3_RING_GL_IDX_S));
3727                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3728                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3729                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3730                         req->vfid = vport->vport_id;
3731
3732                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
3733                         if (status) {
3734                                 dev_err(&hdev->pdev->dev,
3735                                         "Map TQP fail, status is %d.\n",
3736                                         status);
3737                                 return -EIO;
3738                         }
3739                         i = 0;
3740
3741                         hclge_cmd_setup_basic_desc(&desc,
3742                                                    op,
3743                                                    false);
3744                         req->int_vector_id = vector_id;
3745                 }
3746         }
3747
3748         if (i > 0) {
3749                 req->int_cause_num = i;
3750                 req->vfid = vport->vport_id;
3751                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3752                 if (status) {
3753                         dev_err(&hdev->pdev->dev,
3754                                 "Map TQP fail, status is %d.\n", status);
3755                         return -EIO;
3756                 }
3757         }
3758
3759         return 0;
3760 }
3761
3762 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3763                                     int vector,
3764                                     struct hnae3_ring_chain_node *ring_chain)
3765 {
3766         struct hclge_vport *vport = hclge_get_vport(handle);
3767         struct hclge_dev *hdev = vport->back;
3768         int vector_id;
3769
3770         vector_id = hclge_get_vector_index(hdev, vector);
3771         if (vector_id < 0) {
3772                 dev_err(&hdev->pdev->dev,
3773                         "Get vector index fail. vector_id =%d\n", vector_id);
3774                 return vector_id;
3775         }
3776
3777         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3778 }
3779
3780 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3781                                        int vector,
3782                                        struct hnae3_ring_chain_node *ring_chain)
3783 {
3784         struct hclge_vport *vport = hclge_get_vport(handle);
3785         struct hclge_dev *hdev = vport->back;
3786         int vector_id, ret;
3787
3788         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3789                 return 0;
3790
3791         vector_id = hclge_get_vector_index(hdev, vector);
3792         if (vector_id < 0) {
3793                 dev_err(&handle->pdev->dev,
3794                         "Get vector index fail. ret =%d\n", vector_id);
3795                 return vector_id;
3796         }
3797
3798         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3799         if (ret)
3800                 dev_err(&handle->pdev->dev,
3801                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3802                         vector_id,
3803                         ret);
3804
3805         return ret;
3806 }
3807
3808 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3809                                struct hclge_promisc_param *param)
3810 {
3811         struct hclge_promisc_cfg_cmd *req;
3812         struct hclge_desc desc;
3813         int ret;
3814
3815         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3816
3817         req = (struct hclge_promisc_cfg_cmd *)desc.data;
3818         req->vf_id = param->vf_id;
3819
3820         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3821          * pdev revision(0x20), new revision support them. The
3822          * value of this two fields will not return error when driver
3823          * send command to fireware in revision(0x20).
3824          */
3825         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3826                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3827
3828         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3829         if (ret)
3830                 dev_err(&hdev->pdev->dev,
3831                         "Set promisc mode fail, status is %d.\n", ret);
3832
3833         return ret;
3834 }
3835
3836 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3837                               bool en_mc, bool en_bc, int vport_id)
3838 {
3839         if (!param)
3840                 return;
3841
3842         memset(param, 0, sizeof(struct hclge_promisc_param));
3843         if (en_uc)
3844                 param->enable = HCLGE_PROMISC_EN_UC;
3845         if (en_mc)
3846                 param->enable |= HCLGE_PROMISC_EN_MC;
3847         if (en_bc)
3848                 param->enable |= HCLGE_PROMISC_EN_BC;
3849         param->vf_id = vport_id;
3850 }
3851
3852 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3853                                   bool en_mc_pmc)
3854 {
3855         struct hclge_vport *vport = hclge_get_vport(handle);
3856         struct hclge_dev *hdev = vport->back;
3857         struct hclge_promisc_param param;
3858         bool en_bc_pmc = true;
3859
3860         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
3861          * always bypassed. So broadcast promisc should be disabled until
3862          * user enable promisc mode
3863          */
3864         if (handle->pdev->revision == 0x20)
3865                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3866
3867         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3868                                  vport->vport_id);
3869         return hclge_cmd_set_promisc_mode(hdev, &param);
3870 }
3871
3872 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3873 {
3874         struct hclge_get_fd_mode_cmd *req;
3875         struct hclge_desc desc;
3876         int ret;
3877
3878         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3879
3880         req = (struct hclge_get_fd_mode_cmd *)desc.data;
3881
3882         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3883         if (ret) {
3884                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3885                 return ret;
3886         }
3887
3888         *fd_mode = req->mode;
3889
3890         return ret;
3891 }
3892
3893 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3894                                    u32 *stage1_entry_num,
3895                                    u32 *stage2_entry_num,
3896                                    u16 *stage1_counter_num,
3897                                    u16 *stage2_counter_num)
3898 {
3899         struct hclge_get_fd_allocation_cmd *req;
3900         struct hclge_desc desc;
3901         int ret;
3902
3903         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3904
3905         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3906
3907         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3908         if (ret) {
3909                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3910                         ret);
3911                 return ret;
3912         }
3913
3914         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3915         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3916         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3917         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3918
3919         return ret;
3920 }
3921
3922 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3923 {
3924         struct hclge_set_fd_key_config_cmd *req;
3925         struct hclge_fd_key_cfg *stage;
3926         struct hclge_desc desc;
3927         int ret;
3928
3929         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3930
3931         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3932         stage = &hdev->fd_cfg.key_cfg[stage_num];
3933         req->stage = stage_num;
3934         req->key_select = stage->key_sel;
3935         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3936         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3937         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3938         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3939         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3940         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3941
3942         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3943         if (ret)
3944                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3945
3946         return ret;
3947 }
3948
3949 static int hclge_init_fd_config(struct hclge_dev *hdev)
3950 {
3951 #define LOW_2_WORDS             0x03
3952         struct hclge_fd_key_cfg *key_cfg;
3953         int ret;
3954
3955         if (!hnae3_dev_fd_supported(hdev))
3956                 return 0;
3957
3958         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3959         if (ret)
3960                 return ret;
3961
3962         switch (hdev->fd_cfg.fd_mode) {
3963         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3964                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3965                 break;
3966         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3967                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3968                 break;
3969         default:
3970                 dev_err(&hdev->pdev->dev,
3971                         "Unsupported flow director mode %d\n",
3972                         hdev->fd_cfg.fd_mode);
3973                 return -EOPNOTSUPP;
3974         }
3975
3976         hdev->fd_cfg.fd_en = true;
3977         hdev->fd_cfg.proto_support =
3978                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3979                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3980         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3981         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3982         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3983         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3984         key_cfg->outer_sipv6_word_en = 0;
3985         key_cfg->outer_dipv6_word_en = 0;
3986
3987         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3988                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3989                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3990                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3991
3992         /* If use max 400bit key, we can support tuples for ether type */
3993         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3994                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3995                 key_cfg->tuple_active |=
3996                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3997         }
3998
3999         /* roce_type is used to filter roce frames
4000          * dst_vport is used to specify the rule
4001          */
4002         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4003
4004         ret = hclge_get_fd_allocation(hdev,
4005                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4006                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4007                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4008                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4009         if (ret)
4010                 return ret;
4011
4012         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4013 }
4014
4015 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4016                                 int loc, u8 *key, bool is_add)
4017 {
4018         struct hclge_fd_tcam_config_1_cmd *req1;
4019         struct hclge_fd_tcam_config_2_cmd *req2;
4020         struct hclge_fd_tcam_config_3_cmd *req3;
4021         struct hclge_desc desc[3];
4022         int ret;
4023
4024         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4025         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4026         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4027         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4028         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4029
4030         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4031         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4032         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4033
4034         req1->stage = stage;
4035         req1->xy_sel = sel_x ? 1 : 0;
4036         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4037         req1->index = cpu_to_le32(loc);
4038         req1->entry_vld = sel_x ? is_add : 0;
4039
4040         if (key) {
4041                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4042                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4043                        sizeof(req2->tcam_data));
4044                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4045                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4046         }
4047
4048         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4049         if (ret)
4050                 dev_err(&hdev->pdev->dev,
4051                         "config tcam key fail, ret=%d\n",
4052                         ret);
4053
4054         return ret;
4055 }
4056
4057 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4058                               struct hclge_fd_ad_data *action)
4059 {
4060         struct hclge_fd_ad_config_cmd *req;
4061         struct hclge_desc desc;
4062         u64 ad_data = 0;
4063         int ret;
4064
4065         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4066
4067         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4068         req->index = cpu_to_le32(loc);
4069         req->stage = stage;
4070
4071         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4072                       action->write_rule_id_to_bd);
4073         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4074                         action->rule_id);
4075         ad_data <<= 32;
4076         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4077         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4078                       action->forward_to_direct_queue);
4079         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4080                         action->queue_id);
4081         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4082         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4083                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4084         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4085         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4086                         action->counter_id);
4087
4088         req->ad_data = cpu_to_le64(ad_data);
4089         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4090         if (ret)
4091                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4092
4093         return ret;
4094 }
4095
4096 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4097                                    struct hclge_fd_rule *rule)
4098 {
4099         u16 tmp_x_s, tmp_y_s;
4100         u32 tmp_x_l, tmp_y_l;
4101         int i;
4102
4103         if (rule->unused_tuple & tuple_bit)
4104                 return true;
4105
4106         switch (tuple_bit) {
4107         case 0:
4108                 return false;
4109         case BIT(INNER_DST_MAC):
4110                 for (i = 0; i < 6; i++) {
4111                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4112                                rule->tuples_mask.dst_mac[i]);
4113                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4114                                rule->tuples_mask.dst_mac[i]);
4115                 }
4116
4117                 return true;
4118         case BIT(INNER_SRC_MAC):
4119                 for (i = 0; i < 6; i++) {
4120                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4121                                rule->tuples.src_mac[i]);
4122                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4123                                rule->tuples.src_mac[i]);
4124                 }
4125
4126                 return true;
4127         case BIT(INNER_VLAN_TAG_FST):
4128                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4129                        rule->tuples_mask.vlan_tag1);
4130                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4131                        rule->tuples_mask.vlan_tag1);
4132                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4133                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4134
4135                 return true;
4136         case BIT(INNER_ETH_TYPE):
4137                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4138                        rule->tuples_mask.ether_proto);
4139                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4140                        rule->tuples_mask.ether_proto);
4141                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4142                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4143
4144                 return true;
4145         case BIT(INNER_IP_TOS):
4146                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4147                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4148
4149                 return true;
4150         case BIT(INNER_IP_PROTO):
4151                 calc_x(*key_x, rule->tuples.ip_proto,
4152                        rule->tuples_mask.ip_proto);
4153                 calc_y(*key_y, rule->tuples.ip_proto,
4154                        rule->tuples_mask.ip_proto);
4155
4156                 return true;
4157         case BIT(INNER_SRC_IP):
4158                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4159                        rule->tuples_mask.src_ip[3]);
4160                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4161                        rule->tuples_mask.src_ip[3]);
4162                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4163                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4164
4165                 return true;
4166         case BIT(INNER_DST_IP):
4167                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4168                        rule->tuples_mask.dst_ip[3]);
4169                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4170                        rule->tuples_mask.dst_ip[3]);
4171                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4172                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4173
4174                 return true;
4175         case BIT(INNER_SRC_PORT):
4176                 calc_x(tmp_x_s, rule->tuples.src_port,
4177                        rule->tuples_mask.src_port);
4178                 calc_y(tmp_y_s, rule->tuples.src_port,
4179                        rule->tuples_mask.src_port);
4180                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4181                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4182
4183                 return true;
4184         case BIT(INNER_DST_PORT):
4185                 calc_x(tmp_x_s, rule->tuples.dst_port,
4186                        rule->tuples_mask.dst_port);
4187                 calc_y(tmp_y_s, rule->tuples.dst_port,
4188                        rule->tuples_mask.dst_port);
4189                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4190                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4191
4192                 return true;
4193         default:
4194                 return false;
4195         }
4196 }
4197
4198 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4199                                  u8 vf_id, u8 network_port_id)
4200 {
4201         u32 port_number = 0;
4202
4203         if (port_type == HOST_PORT) {
4204                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4205                                 pf_id);
4206                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4207                                 vf_id);
4208                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4209         } else {
4210                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4211                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4212                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4213         }
4214
4215         return port_number;
4216 }
4217
4218 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4219                                        __le32 *key_x, __le32 *key_y,
4220                                        struct hclge_fd_rule *rule)
4221 {
4222         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4223         u8 cur_pos = 0, tuple_size, shift_bits;
4224         int i;
4225
4226         for (i = 0; i < MAX_META_DATA; i++) {
4227                 tuple_size = meta_data_key_info[i].key_length;
4228                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4229
4230                 switch (tuple_bit) {
4231                 case BIT(ROCE_TYPE):
4232                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4233                         cur_pos += tuple_size;
4234                         break;
4235                 case BIT(DST_VPORT):
4236                         port_number = hclge_get_port_number(HOST_PORT, 0,
4237                                                             rule->vf_id, 0);
4238                         hnae3_set_field(meta_data,
4239                                         GENMASK(cur_pos + tuple_size, cur_pos),
4240                                         cur_pos, port_number);
4241                         cur_pos += tuple_size;
4242                         break;
4243                 default:
4244                         break;
4245                 }
4246         }
4247
4248         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4249         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4250         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4251
4252         *key_x = cpu_to_le32(tmp_x << shift_bits);
4253         *key_y = cpu_to_le32(tmp_y << shift_bits);
4254 }
4255
4256 /* A complete key is combined with meta data key and tuple key.
4257  * Meta data key is stored at the MSB region, and tuple key is stored at
4258  * the LSB region, unused bits will be filled 0.
4259  */
4260 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4261                             struct hclge_fd_rule *rule)
4262 {
4263         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4264         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4265         u8 *cur_key_x, *cur_key_y;
4266         int i, ret, tuple_size;
4267         u8 meta_data_region;
4268
4269         memset(key_x, 0, sizeof(key_x));
4270         memset(key_y, 0, sizeof(key_y));
4271         cur_key_x = key_x;
4272         cur_key_y = key_y;
4273
4274         for (i = 0 ; i < MAX_TUPLE; i++) {
4275                 bool tuple_valid;
4276                 u32 check_tuple;
4277
4278                 tuple_size = tuple_key_info[i].key_length / 8;
4279                 check_tuple = key_cfg->tuple_active & BIT(i);
4280
4281                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4282                                                      cur_key_y, rule);
4283                 if (tuple_valid) {
4284                         cur_key_x += tuple_size;
4285                         cur_key_y += tuple_size;
4286                 }
4287         }
4288
4289         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4290                         MAX_META_DATA_LENGTH / 8;
4291
4292         hclge_fd_convert_meta_data(key_cfg,
4293                                    (__le32 *)(key_x + meta_data_region),
4294                                    (__le32 *)(key_y + meta_data_region),
4295                                    rule);
4296
4297         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4298                                    true);
4299         if (ret) {
4300                 dev_err(&hdev->pdev->dev,
4301                         "fd key_y config fail, loc=%d, ret=%d\n",
4302                         rule->queue_id, ret);
4303                 return ret;
4304         }
4305
4306         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4307                                    true);
4308         if (ret)
4309                 dev_err(&hdev->pdev->dev,
4310                         "fd key_x config fail, loc=%d, ret=%d\n",
4311                         rule->queue_id, ret);
4312         return ret;
4313 }
4314
4315 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4316                                struct hclge_fd_rule *rule)
4317 {
4318         struct hclge_fd_ad_data ad_data;
4319
4320         ad_data.ad_id = rule->location;
4321
4322         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4323                 ad_data.drop_packet = true;
4324                 ad_data.forward_to_direct_queue = false;
4325                 ad_data.queue_id = 0;
4326         } else {
4327                 ad_data.drop_packet = false;
4328                 ad_data.forward_to_direct_queue = true;
4329                 ad_data.queue_id = rule->queue_id;
4330         }
4331
4332         ad_data.use_counter = false;
4333         ad_data.counter_id = 0;
4334
4335         ad_data.use_next_stage = false;
4336         ad_data.next_input_key = 0;
4337
4338         ad_data.write_rule_id_to_bd = true;
4339         ad_data.rule_id = rule->location;
4340
4341         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4342 }
4343
4344 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4345                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4346 {
4347         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4348         struct ethtool_usrip4_spec *usr_ip4_spec;
4349         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4350         struct ethtool_usrip6_spec *usr_ip6_spec;
4351         struct ethhdr *ether_spec;
4352
4353         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4354                 return -EINVAL;
4355
4356         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4357                 return -EOPNOTSUPP;
4358
4359         if ((fs->flow_type & FLOW_EXT) &&
4360             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4361                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4362                 return -EOPNOTSUPP;
4363         }
4364
4365         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4366         case SCTP_V4_FLOW:
4367         case TCP_V4_FLOW:
4368         case UDP_V4_FLOW:
4369                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4370                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4371
4372                 if (!tcp_ip4_spec->ip4src)
4373                         *unused |= BIT(INNER_SRC_IP);
4374
4375                 if (!tcp_ip4_spec->ip4dst)
4376                         *unused |= BIT(INNER_DST_IP);
4377
4378                 if (!tcp_ip4_spec->psrc)
4379                         *unused |= BIT(INNER_SRC_PORT);
4380
4381                 if (!tcp_ip4_spec->pdst)
4382                         *unused |= BIT(INNER_DST_PORT);
4383
4384                 if (!tcp_ip4_spec->tos)
4385                         *unused |= BIT(INNER_IP_TOS);
4386
4387                 break;
4388         case IP_USER_FLOW:
4389                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4390                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4391                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4392
4393                 if (!usr_ip4_spec->ip4src)
4394                         *unused |= BIT(INNER_SRC_IP);
4395
4396                 if (!usr_ip4_spec->ip4dst)
4397                         *unused |= BIT(INNER_DST_IP);
4398
4399                 if (!usr_ip4_spec->tos)
4400                         *unused |= BIT(INNER_IP_TOS);
4401
4402                 if (!usr_ip4_spec->proto)
4403                         *unused |= BIT(INNER_IP_PROTO);
4404
4405                 if (usr_ip4_spec->l4_4_bytes)
4406                         return -EOPNOTSUPP;
4407
4408                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4409                         return -EOPNOTSUPP;
4410
4411                 break;
4412         case SCTP_V6_FLOW:
4413         case TCP_V6_FLOW:
4414         case UDP_V6_FLOW:
4415                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4416                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4417                         BIT(INNER_IP_TOS);
4418
4419                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4420                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4421                         *unused |= BIT(INNER_SRC_IP);
4422
4423                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4424                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4425                         *unused |= BIT(INNER_DST_IP);
4426
4427                 if (!tcp_ip6_spec->psrc)
4428                         *unused |= BIT(INNER_SRC_PORT);
4429
4430                 if (!tcp_ip6_spec->pdst)
4431                         *unused |= BIT(INNER_DST_PORT);
4432
4433                 if (tcp_ip6_spec->tclass)
4434                         return -EOPNOTSUPP;
4435
4436                 break;
4437         case IPV6_USER_FLOW:
4438                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4439                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4440                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4441                         BIT(INNER_DST_PORT);
4442
4443                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4444                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4445                         *unused |= BIT(INNER_SRC_IP);
4446
4447                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4448                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4449                         *unused |= BIT(INNER_DST_IP);
4450
4451                 if (!usr_ip6_spec->l4_proto)
4452                         *unused |= BIT(INNER_IP_PROTO);
4453
4454                 if (usr_ip6_spec->tclass)
4455                         return -EOPNOTSUPP;
4456
4457                 if (usr_ip6_spec->l4_4_bytes)
4458                         return -EOPNOTSUPP;
4459
4460                 break;
4461         case ETHER_FLOW:
4462                 ether_spec = &fs->h_u.ether_spec;
4463                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4464                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4465                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4466
4467                 if (is_zero_ether_addr(ether_spec->h_source))
4468                         *unused |= BIT(INNER_SRC_MAC);
4469
4470                 if (is_zero_ether_addr(ether_spec->h_dest))
4471                         *unused |= BIT(INNER_DST_MAC);
4472
4473                 if (!ether_spec->h_proto)
4474                         *unused |= BIT(INNER_ETH_TYPE);
4475
4476                 break;
4477         default:
4478                 return -EOPNOTSUPP;
4479         }
4480
4481         if ((fs->flow_type & FLOW_EXT)) {
4482                 if (fs->h_ext.vlan_etype)
4483                         return -EOPNOTSUPP;
4484                 if (!fs->h_ext.vlan_tci)
4485                         *unused |= BIT(INNER_VLAN_TAG_FST);
4486
4487                 if (fs->m_ext.vlan_tci) {
4488                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4489                                 return -EINVAL;
4490                 }
4491         } else {
4492                 *unused |= BIT(INNER_VLAN_TAG_FST);
4493         }
4494
4495         if (fs->flow_type & FLOW_MAC_EXT) {
4496                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4497                         return -EOPNOTSUPP;
4498
4499                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4500                         *unused |= BIT(INNER_DST_MAC);
4501                 else
4502                         *unused &= ~(BIT(INNER_DST_MAC));
4503         }
4504
4505         return 0;
4506 }
4507
4508 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4509 {
4510         struct hclge_fd_rule *rule = NULL;
4511         struct hlist_node *node2;
4512
4513         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4514                 if (rule->location >= location)
4515                         break;
4516         }
4517
4518         return  rule && rule->location == location;
4519 }
4520
4521 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4522                                      struct hclge_fd_rule *new_rule,
4523                                      u16 location,
4524                                      bool is_add)
4525 {
4526         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4527         struct hlist_node *node2;
4528
4529         if (is_add && !new_rule)
4530                 return -EINVAL;
4531
4532         hlist_for_each_entry_safe(rule, node2,
4533                                   &hdev->fd_rule_list, rule_node) {
4534                 if (rule->location >= location)
4535                         break;
4536                 parent = rule;
4537         }
4538
4539         if (rule && rule->location == location) {
4540                 hlist_del(&rule->rule_node);
4541                 kfree(rule);
4542                 hdev->hclge_fd_rule_num--;
4543
4544                 if (!is_add)
4545                         return 0;
4546
4547         } else if (!is_add) {
4548                 dev_err(&hdev->pdev->dev,
4549                         "delete fail, rule %d is inexistent\n",
4550                         location);
4551                 return -EINVAL;
4552         }
4553
4554         INIT_HLIST_NODE(&new_rule->rule_node);
4555
4556         if (parent)
4557                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4558         else
4559                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4560
4561         hdev->hclge_fd_rule_num++;
4562
4563         return 0;
4564 }
4565
4566 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4567                               struct ethtool_rx_flow_spec *fs,
4568                               struct hclge_fd_rule *rule)
4569 {
4570         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4571
4572         switch (flow_type) {
4573         case SCTP_V4_FLOW:
4574         case TCP_V4_FLOW:
4575         case UDP_V4_FLOW:
4576                 rule->tuples.src_ip[3] =
4577                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4578                 rule->tuples_mask.src_ip[3] =
4579                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4580
4581                 rule->tuples.dst_ip[3] =
4582                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4583                 rule->tuples_mask.dst_ip[3] =
4584                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4585
4586                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4587                 rule->tuples_mask.src_port =
4588                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4589
4590                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4591                 rule->tuples_mask.dst_port =
4592                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4593
4594                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4595                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4596
4597                 rule->tuples.ether_proto = ETH_P_IP;
4598                 rule->tuples_mask.ether_proto = 0xFFFF;
4599
4600                 break;
4601         case IP_USER_FLOW:
4602                 rule->tuples.src_ip[3] =
4603                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4604                 rule->tuples_mask.src_ip[3] =
4605                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4606
4607                 rule->tuples.dst_ip[3] =
4608                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4609                 rule->tuples_mask.dst_ip[3] =
4610                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4611
4612                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4613                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4614
4615                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4616                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4617
4618                 rule->tuples.ether_proto = ETH_P_IP;
4619                 rule->tuples_mask.ether_proto = 0xFFFF;
4620
4621                 break;
4622         case SCTP_V6_FLOW:
4623         case TCP_V6_FLOW:
4624         case UDP_V6_FLOW:
4625                 be32_to_cpu_array(rule->tuples.src_ip,
4626                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
4627                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4628                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
4629
4630                 be32_to_cpu_array(rule->tuples.dst_ip,
4631                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
4632                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4633                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
4634
4635                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4636                 rule->tuples_mask.src_port =
4637                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4638
4639                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4640                 rule->tuples_mask.dst_port =
4641                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4642
4643                 rule->tuples.ether_proto = ETH_P_IPV6;
4644                 rule->tuples_mask.ether_proto = 0xFFFF;
4645
4646                 break;
4647         case IPV6_USER_FLOW:
4648                 be32_to_cpu_array(rule->tuples.src_ip,
4649                                   fs->h_u.usr_ip6_spec.ip6src, 4);
4650                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4651                                   fs->m_u.usr_ip6_spec.ip6src, 4);
4652
4653                 be32_to_cpu_array(rule->tuples.dst_ip,
4654                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
4655                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4656                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
4657
4658                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4659                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4660
4661                 rule->tuples.ether_proto = ETH_P_IPV6;
4662                 rule->tuples_mask.ether_proto = 0xFFFF;
4663
4664                 break;
4665         case ETHER_FLOW:
4666                 ether_addr_copy(rule->tuples.src_mac,
4667                                 fs->h_u.ether_spec.h_source);
4668                 ether_addr_copy(rule->tuples_mask.src_mac,
4669                                 fs->m_u.ether_spec.h_source);
4670
4671                 ether_addr_copy(rule->tuples.dst_mac,
4672                                 fs->h_u.ether_spec.h_dest);
4673                 ether_addr_copy(rule->tuples_mask.dst_mac,
4674                                 fs->m_u.ether_spec.h_dest);
4675
4676                 rule->tuples.ether_proto =
4677                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4678                 rule->tuples_mask.ether_proto =
4679                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4680
4681                 break;
4682         default:
4683                 return -EOPNOTSUPP;
4684         }
4685
4686         switch (flow_type) {
4687         case SCTP_V4_FLOW:
4688         case SCTP_V6_FLOW:
4689                 rule->tuples.ip_proto = IPPROTO_SCTP;
4690                 rule->tuples_mask.ip_proto = 0xFF;
4691                 break;
4692         case TCP_V4_FLOW:
4693         case TCP_V6_FLOW:
4694                 rule->tuples.ip_proto = IPPROTO_TCP;
4695                 rule->tuples_mask.ip_proto = 0xFF;
4696                 break;
4697         case UDP_V4_FLOW:
4698         case UDP_V6_FLOW:
4699                 rule->tuples.ip_proto = IPPROTO_UDP;
4700                 rule->tuples_mask.ip_proto = 0xFF;
4701                 break;
4702         default:
4703                 break;
4704         }
4705
4706         if ((fs->flow_type & FLOW_EXT)) {
4707                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4708                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4709         }
4710
4711         if (fs->flow_type & FLOW_MAC_EXT) {
4712                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4713                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4714         }
4715
4716         return 0;
4717 }
4718
4719 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4720                               struct ethtool_rxnfc *cmd)
4721 {
4722         struct hclge_vport *vport = hclge_get_vport(handle);
4723         struct hclge_dev *hdev = vport->back;
4724         u16 dst_vport_id = 0, q_index = 0;
4725         struct ethtool_rx_flow_spec *fs;
4726         struct hclge_fd_rule *rule;
4727         u32 unused = 0;
4728         u8 action;
4729         int ret;
4730
4731         if (!hnae3_dev_fd_supported(hdev))
4732                 return -EOPNOTSUPP;
4733
4734         if (!hdev->fd_cfg.fd_en) {
4735                 dev_warn(&hdev->pdev->dev,
4736                          "Please enable flow director first\n");
4737                 return -EOPNOTSUPP;
4738         }
4739
4740         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4741
4742         ret = hclge_fd_check_spec(hdev, fs, &unused);
4743         if (ret) {
4744                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4745                 return ret;
4746         }
4747
4748         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4749                 action = HCLGE_FD_ACTION_DROP_PACKET;
4750         } else {
4751                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4752                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4753                 u16 tqps;
4754
4755                 if (vf > hdev->num_req_vfs) {
4756                         dev_err(&hdev->pdev->dev,
4757                                 "Error: vf id (%d) > max vf num (%d)\n",
4758                                 vf, hdev->num_req_vfs);
4759                         return -EINVAL;
4760                 }
4761
4762                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4763                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4764
4765                 if (ring >= tqps) {
4766                         dev_err(&hdev->pdev->dev,
4767                                 "Error: queue id (%d) > max tqp num (%d)\n",
4768                                 ring, tqps - 1);
4769                         return -EINVAL;
4770                 }
4771
4772                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4773                 q_index = ring;
4774         }
4775
4776         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4777         if (!rule)
4778                 return -ENOMEM;
4779
4780         ret = hclge_fd_get_tuple(hdev, fs, rule);
4781         if (ret)
4782                 goto free_rule;
4783
4784         rule->flow_type = fs->flow_type;
4785
4786         rule->location = fs->location;
4787         rule->unused_tuple = unused;
4788         rule->vf_id = dst_vport_id;
4789         rule->queue_id = q_index;
4790         rule->action = action;
4791
4792         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4793         if (ret)
4794                 goto free_rule;
4795
4796         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4797         if (ret)
4798                 goto free_rule;
4799
4800         ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4801         if (ret)
4802                 goto free_rule;
4803
4804         return ret;
4805
4806 free_rule:
4807         kfree(rule);
4808         return ret;
4809 }
4810
4811 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4812                               struct ethtool_rxnfc *cmd)
4813 {
4814         struct hclge_vport *vport = hclge_get_vport(handle);
4815         struct hclge_dev *hdev = vport->back;
4816         struct ethtool_rx_flow_spec *fs;
4817         int ret;
4818
4819         if (!hnae3_dev_fd_supported(hdev))
4820                 return -EOPNOTSUPP;
4821
4822         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4823
4824         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4825                 return -EINVAL;
4826
4827         if (!hclge_fd_rule_exist(hdev, fs->location)) {
4828                 dev_err(&hdev->pdev->dev,
4829                         "Delete fail, rule %d is inexistent\n",
4830                         fs->location);
4831                 return -ENOENT;
4832         }
4833
4834         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4835                                    fs->location, NULL, false);
4836         if (ret)
4837                 return ret;
4838
4839         return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4840                                          false);
4841 }
4842
4843 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4844                                      bool clear_list)
4845 {
4846         struct hclge_vport *vport = hclge_get_vport(handle);
4847         struct hclge_dev *hdev = vport->back;
4848         struct hclge_fd_rule *rule;
4849         struct hlist_node *node;
4850
4851         if (!hnae3_dev_fd_supported(hdev))
4852                 return;
4853
4854         if (clear_list) {
4855                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4856                                           rule_node) {
4857                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4858                                              rule->location, NULL, false);
4859                         hlist_del(&rule->rule_node);
4860                         kfree(rule);
4861                         hdev->hclge_fd_rule_num--;
4862                 }
4863         } else {
4864                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4865                                           rule_node)
4866                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4867                                              rule->location, NULL, false);
4868         }
4869 }
4870
4871 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4872 {
4873         struct hclge_vport *vport = hclge_get_vport(handle);
4874         struct hclge_dev *hdev = vport->back;
4875         struct hclge_fd_rule *rule;
4876         struct hlist_node *node;
4877         int ret;
4878
4879         /* Return ok here, because reset error handling will check this
4880          * return value. If error is returned here, the reset process will
4881          * fail.
4882          */
4883         if (!hnae3_dev_fd_supported(hdev))
4884                 return 0;
4885
4886         /* if fd is disabled, should not restore it when reset */
4887         if (!hdev->fd_cfg.fd_en)
4888                 return 0;
4889
4890         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4891                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4892                 if (!ret)
4893                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4894
4895                 if (ret) {
4896                         dev_warn(&hdev->pdev->dev,
4897                                  "Restore rule %d failed, remove it\n",
4898                                  rule->location);
4899                         hlist_del(&rule->rule_node);
4900                         kfree(rule);
4901                         hdev->hclge_fd_rule_num--;
4902                 }
4903         }
4904         return 0;
4905 }
4906
4907 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4908                                  struct ethtool_rxnfc *cmd)
4909 {
4910         struct hclge_vport *vport = hclge_get_vport(handle);
4911         struct hclge_dev *hdev = vport->back;
4912
4913         if (!hnae3_dev_fd_supported(hdev))
4914                 return -EOPNOTSUPP;
4915
4916         cmd->rule_cnt = hdev->hclge_fd_rule_num;
4917         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4918
4919         return 0;
4920 }
4921
4922 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4923                                   struct ethtool_rxnfc *cmd)
4924 {
4925         struct hclge_vport *vport = hclge_get_vport(handle);
4926         struct hclge_fd_rule *rule = NULL;
4927         struct hclge_dev *hdev = vport->back;
4928         struct ethtool_rx_flow_spec *fs;
4929         struct hlist_node *node2;
4930
4931         if (!hnae3_dev_fd_supported(hdev))
4932                 return -EOPNOTSUPP;
4933
4934         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4935
4936         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4937                 if (rule->location >= fs->location)
4938                         break;
4939         }
4940
4941         if (!rule || fs->location != rule->location)
4942                 return -ENOENT;
4943
4944         fs->flow_type = rule->flow_type;
4945         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4946         case SCTP_V4_FLOW:
4947         case TCP_V4_FLOW:
4948         case UDP_V4_FLOW:
4949                 fs->h_u.tcp_ip4_spec.ip4src =
4950                                 cpu_to_be32(rule->tuples.src_ip[3]);
4951                 fs->m_u.tcp_ip4_spec.ip4src =
4952                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4953                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4954
4955                 fs->h_u.tcp_ip4_spec.ip4dst =
4956                                 cpu_to_be32(rule->tuples.dst_ip[3]);
4957                 fs->m_u.tcp_ip4_spec.ip4dst =
4958                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
4959                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4960
4961                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4962                 fs->m_u.tcp_ip4_spec.psrc =
4963                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4964                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
4965
4966                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4967                 fs->m_u.tcp_ip4_spec.pdst =
4968                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4969                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4970
4971                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4972                 fs->m_u.tcp_ip4_spec.tos =
4973                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4974                                 0 : rule->tuples_mask.ip_tos;
4975
4976                 break;
4977         case IP_USER_FLOW:
4978                 fs->h_u.usr_ip4_spec.ip4src =
4979                                 cpu_to_be32(rule->tuples.src_ip[3]);
4980                 fs->m_u.tcp_ip4_spec.ip4src =
4981                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4982                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4983
4984                 fs->h_u.usr_ip4_spec.ip4dst =
4985                                 cpu_to_be32(rule->tuples.dst_ip[3]);
4986                 fs->m_u.usr_ip4_spec.ip4dst =
4987                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
4988                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4989
4990                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4991                 fs->m_u.usr_ip4_spec.tos =
4992                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4993                                 0 : rule->tuples_mask.ip_tos;
4994
4995                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4996                 fs->m_u.usr_ip4_spec.proto =
4997                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4998                                 0 : rule->tuples_mask.ip_proto;
4999
5000                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5001
5002                 break;
5003         case SCTP_V6_FLOW:
5004         case TCP_V6_FLOW:
5005         case UDP_V6_FLOW:
5006                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5007                                   rule->tuples.src_ip, 4);
5008                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5009                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5010                 else
5011                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5012                                           rule->tuples_mask.src_ip, 4);
5013
5014                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5015                                   rule->tuples.dst_ip, 4);
5016                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5017                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5018                 else
5019                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5020                                           rule->tuples_mask.dst_ip, 4);
5021
5022                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5023                 fs->m_u.tcp_ip6_spec.psrc =
5024                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5025                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5026
5027                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5028                 fs->m_u.tcp_ip6_spec.pdst =
5029                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5030                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5031
5032                 break;
5033         case IPV6_USER_FLOW:
5034                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5035                                   rule->tuples.src_ip, 4);
5036                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5037                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5038                 else
5039                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5040                                           rule->tuples_mask.src_ip, 4);
5041
5042                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5043                                   rule->tuples.dst_ip, 4);
5044                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5045                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5046                 else
5047                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5048                                           rule->tuples_mask.dst_ip, 4);
5049
5050                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5051                 fs->m_u.usr_ip6_spec.l4_proto =
5052                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5053                                 0 : rule->tuples_mask.ip_proto;
5054
5055                 break;
5056         case ETHER_FLOW:
5057                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5058                                 rule->tuples.src_mac);
5059                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5060                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5061                 else
5062                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5063                                         rule->tuples_mask.src_mac);
5064
5065                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5066                                 rule->tuples.dst_mac);
5067                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5068                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5069                 else
5070                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5071                                         rule->tuples_mask.dst_mac);
5072
5073                 fs->h_u.ether_spec.h_proto =
5074                                 cpu_to_be16(rule->tuples.ether_proto);
5075                 fs->m_u.ether_spec.h_proto =
5076                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5077                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5078
5079                 break;
5080         default:
5081                 return -EOPNOTSUPP;
5082         }
5083
5084         if (fs->flow_type & FLOW_EXT) {
5085                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5086                 fs->m_ext.vlan_tci =
5087                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5088                                 cpu_to_be16(VLAN_VID_MASK) :
5089                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5090         }
5091
5092         if (fs->flow_type & FLOW_MAC_EXT) {
5093                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5094                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5095                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5096                 else
5097                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5098                                         rule->tuples_mask.dst_mac);
5099         }
5100
5101         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5102                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5103         } else {
5104                 u64 vf_id;
5105
5106                 fs->ring_cookie = rule->queue_id;
5107                 vf_id = rule->vf_id;
5108                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5109                 fs->ring_cookie |= vf_id;
5110         }
5111
5112         return 0;
5113 }
5114
5115 static int hclge_get_all_rules(struct hnae3_handle *handle,
5116                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5117 {
5118         struct hclge_vport *vport = hclge_get_vport(handle);
5119         struct hclge_dev *hdev = vport->back;
5120         struct hclge_fd_rule *rule;
5121         struct hlist_node *node2;
5122         int cnt = 0;
5123
5124         if (!hnae3_dev_fd_supported(hdev))
5125                 return -EOPNOTSUPP;
5126
5127         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5128
5129         hlist_for_each_entry_safe(rule, node2,
5130                                   &hdev->fd_rule_list, rule_node) {
5131                 if (cnt == cmd->rule_cnt)
5132                         return -EMSGSIZE;
5133
5134                 rule_locs[cnt] = rule->location;
5135                 cnt++;
5136         }
5137
5138         cmd->rule_cnt = cnt;
5139
5140         return 0;
5141 }
5142
5143 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5144 {
5145         struct hclge_vport *vport = hclge_get_vport(handle);
5146         struct hclge_dev *hdev = vport->back;
5147
5148         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5149                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5150 }
5151
5152 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5153 {
5154         struct hclge_vport *vport = hclge_get_vport(handle);
5155         struct hclge_dev *hdev = vport->back;
5156
5157         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5158 }
5159
5160 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5161 {
5162         struct hclge_vport *vport = hclge_get_vport(handle);
5163         struct hclge_dev *hdev = vport->back;
5164
5165         return hdev->reset_count;
5166 }
5167
5168 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5169 {
5170         struct hclge_vport *vport = hclge_get_vport(handle);
5171         struct hclge_dev *hdev = vport->back;
5172
5173         hdev->fd_cfg.fd_en = enable;
5174         if (!enable)
5175                 hclge_del_all_fd_entries(handle, false);
5176         else
5177                 hclge_restore_fd_entries(handle);
5178 }
5179
5180 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5181 {
5182         struct hclge_desc desc;
5183         struct hclge_config_mac_mode_cmd *req =
5184                 (struct hclge_config_mac_mode_cmd *)desc.data;
5185         u32 loop_en = 0;
5186         int ret;
5187
5188         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5189         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5190         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5191         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5192         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5193         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5194         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5195         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5196         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5197         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5198         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5199         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5200         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5201         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5202         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5203         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5204
5205         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5206         if (ret)
5207                 dev_err(&hdev->pdev->dev,
5208                         "mac enable fail, ret =%d.\n", ret);
5209 }
5210
5211 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5212 {
5213         struct hclge_config_mac_mode_cmd *req;
5214         struct hclge_desc desc;
5215         u32 loop_en;
5216         int ret;
5217
5218         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5219         /* 1 Read out the MAC mode config at first */
5220         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5222         if (ret) {
5223                 dev_err(&hdev->pdev->dev,
5224                         "mac loopback get fail, ret =%d.\n", ret);
5225                 return ret;
5226         }
5227
5228         /* 2 Then setup the loopback flag */
5229         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5230         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5231         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5232         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5233
5234         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5235
5236         /* 3 Config mac work mode with loopback flag
5237          * and its original configure parameters
5238          */
5239         hclge_cmd_reuse_desc(&desc, false);
5240         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5241         if (ret)
5242                 dev_err(&hdev->pdev->dev,
5243                         "mac loopback set fail, ret =%d.\n", ret);
5244         return ret;
5245 }
5246
5247 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5248                                      enum hnae3_loop loop_mode)
5249 {
5250 #define HCLGE_SERDES_RETRY_MS   10
5251 #define HCLGE_SERDES_RETRY_NUM  100
5252
5253 #define HCLGE_MAC_LINK_STATUS_MS   20
5254 #define HCLGE_MAC_LINK_STATUS_NUM  10
5255 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5256 #define HCLGE_MAC_LINK_STATUS_UP   1
5257
5258         struct hclge_serdes_lb_cmd *req;
5259         struct hclge_desc desc;
5260         int mac_link_ret = 0;
5261         int ret, i = 0;
5262         u8 loop_mode_b;
5263
5264         req = (struct hclge_serdes_lb_cmd *)desc.data;
5265         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5266
5267         switch (loop_mode) {
5268         case HNAE3_LOOP_SERIAL_SERDES:
5269                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5270                 break;
5271         case HNAE3_LOOP_PARALLEL_SERDES:
5272                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5273                 break;
5274         default:
5275                 dev_err(&hdev->pdev->dev,
5276                         "unsupported serdes loopback mode %d\n", loop_mode);
5277                 return -ENOTSUPP;
5278         }
5279
5280         if (en) {
5281                 req->enable = loop_mode_b;
5282                 req->mask = loop_mode_b;
5283                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5284         } else {
5285                 req->mask = loop_mode_b;
5286                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5287         }
5288
5289         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5290         if (ret) {
5291                 dev_err(&hdev->pdev->dev,
5292                         "serdes loopback set fail, ret = %d\n", ret);
5293                 return ret;
5294         }
5295
5296         do {
5297                 msleep(HCLGE_SERDES_RETRY_MS);
5298                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5299                                            true);
5300                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5301                 if (ret) {
5302                         dev_err(&hdev->pdev->dev,
5303                                 "serdes loopback get, ret = %d\n", ret);
5304                         return ret;
5305                 }
5306         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5307                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5308
5309         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5310                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5311                 return -EBUSY;
5312         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5313                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5314                 return -EIO;
5315         }
5316
5317         hclge_cfg_mac_mode(hdev, en);
5318
5319         i = 0;
5320         do {
5321                 /* serdes Internal loopback, independent of the network cable.*/
5322                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5323                 ret = hclge_get_mac_link_status(hdev);
5324                 if (ret == mac_link_ret)
5325                         return 0;
5326         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5327
5328         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5329
5330         return -EBUSY;
5331 }
5332
5333 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5334                             int stream_id, bool enable)
5335 {
5336         struct hclge_desc desc;
5337         struct hclge_cfg_com_tqp_queue_cmd *req =
5338                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5339         int ret;
5340
5341         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5342         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5343         req->stream_id = cpu_to_le16(stream_id);
5344         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5345
5346         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5347         if (ret)
5348                 dev_err(&hdev->pdev->dev,
5349                         "Tqp enable fail, status =%d.\n", ret);
5350         return ret;
5351 }
5352
5353 static int hclge_set_loopback(struct hnae3_handle *handle,
5354                               enum hnae3_loop loop_mode, bool en)
5355 {
5356         struct hclge_vport *vport = hclge_get_vport(handle);
5357         struct hnae3_knic_private_info *kinfo;
5358         struct hclge_dev *hdev = vport->back;
5359         int i, ret;
5360
5361         switch (loop_mode) {
5362         case HNAE3_LOOP_APP:
5363                 ret = hclge_set_app_loopback(hdev, en);
5364                 break;
5365         case HNAE3_LOOP_SERIAL_SERDES:
5366         case HNAE3_LOOP_PARALLEL_SERDES:
5367                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5368                 break;
5369         default:
5370                 ret = -ENOTSUPP;
5371                 dev_err(&hdev->pdev->dev,
5372                         "loop_mode %d is not supported\n", loop_mode);
5373                 break;
5374         }
5375
5376         if (ret)
5377                 return ret;
5378
5379         kinfo = &vport->nic.kinfo;
5380         for (i = 0; i < kinfo->num_tqps; i++) {
5381                 ret = hclge_tqp_enable(hdev, i, 0, en);
5382                 if (ret)
5383                         return ret;
5384         }
5385
5386         return 0;
5387 }
5388
5389 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5390 {
5391         struct hclge_vport *vport = hclge_get_vport(handle);
5392         struct hnae3_knic_private_info *kinfo;
5393         struct hnae3_queue *queue;
5394         struct hclge_tqp *tqp;
5395         int i;
5396
5397         kinfo = &vport->nic.kinfo;
5398         for (i = 0; i < kinfo->num_tqps; i++) {
5399                 queue = handle->kinfo.tqp[i];
5400                 tqp = container_of(queue, struct hclge_tqp, q);
5401                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5402         }
5403 }
5404
5405 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5406 {
5407         struct hclge_vport *vport = hclge_get_vport(handle);
5408         struct hclge_dev *hdev = vport->back;
5409
5410         if (enable) {
5411                 mod_timer(&hdev->service_timer, jiffies + HZ);
5412         } else {
5413                 del_timer_sync(&hdev->service_timer);
5414                 cancel_work_sync(&hdev->service_task);
5415                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5416         }
5417 }
5418
5419 static int hclge_ae_start(struct hnae3_handle *handle)
5420 {
5421         struct hclge_vport *vport = hclge_get_vport(handle);
5422         struct hclge_dev *hdev = vport->back;
5423
5424         /* mac enable */
5425         hclge_cfg_mac_mode(hdev, true);
5426         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5427         hdev->hw.mac.link = 0;
5428
5429         /* reset tqp stats */
5430         hclge_reset_tqp_stats(handle);
5431
5432         hclge_mac_start_phy(hdev);
5433
5434         return 0;
5435 }
5436
5437 static void hclge_ae_stop(struct hnae3_handle *handle)
5438 {
5439         struct hclge_vport *vport = hclge_get_vport(handle);
5440         struct hclge_dev *hdev = vport->back;
5441         int i;
5442
5443         set_bit(HCLGE_STATE_DOWN, &hdev->state);
5444
5445         /* If it is not PF reset, the firmware will disable the MAC,
5446          * so it only need to stop phy here.
5447          */
5448         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5449             hdev->reset_type != HNAE3_FUNC_RESET) {
5450                 hclge_mac_stop_phy(hdev);
5451                 return;
5452         }
5453
5454         for (i = 0; i < handle->kinfo.num_tqps; i++)
5455                 hclge_reset_tqp(handle, i);
5456
5457         /* Mac disable */
5458         hclge_cfg_mac_mode(hdev, false);
5459
5460         hclge_mac_stop_phy(hdev);
5461
5462         /* reset tqp stats */
5463         hclge_reset_tqp_stats(handle);
5464         hclge_update_link_status(hdev);
5465 }
5466
5467 int hclge_vport_start(struct hclge_vport *vport)
5468 {
5469         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5470         vport->last_active_jiffies = jiffies;
5471         return 0;
5472 }
5473
5474 void hclge_vport_stop(struct hclge_vport *vport)
5475 {
5476         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5477 }
5478
5479 static int hclge_client_start(struct hnae3_handle *handle)
5480 {
5481         struct hclge_vport *vport = hclge_get_vport(handle);
5482
5483         return hclge_vport_start(vport);
5484 }
5485
5486 static void hclge_client_stop(struct hnae3_handle *handle)
5487 {
5488         struct hclge_vport *vport = hclge_get_vport(handle);
5489
5490         hclge_vport_stop(vport);
5491 }
5492
5493 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5494                                          u16 cmdq_resp, u8  resp_code,
5495                                          enum hclge_mac_vlan_tbl_opcode op)
5496 {
5497         struct hclge_dev *hdev = vport->back;
5498         int return_status = -EIO;
5499
5500         if (cmdq_resp) {
5501                 dev_err(&hdev->pdev->dev,
5502                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5503                         cmdq_resp);
5504                 return -EIO;
5505         }
5506
5507         if (op == HCLGE_MAC_VLAN_ADD) {
5508                 if ((!resp_code) || (resp_code == 1)) {
5509                         return_status = 0;
5510                 } else if (resp_code == 2) {
5511                         return_status = -ENOSPC;
5512                         dev_err(&hdev->pdev->dev,
5513                                 "add mac addr failed for uc_overflow.\n");
5514                 } else if (resp_code == 3) {
5515                         return_status = -ENOSPC;
5516                         dev_err(&hdev->pdev->dev,
5517                                 "add mac addr failed for mc_overflow.\n");
5518                 } else {
5519                         dev_err(&hdev->pdev->dev,
5520                                 "add mac addr failed for undefined, code=%d.\n",
5521                                 resp_code);
5522                 }
5523         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5524                 if (!resp_code) {
5525                         return_status = 0;
5526                 } else if (resp_code == 1) {
5527                         return_status = -ENOENT;
5528                         dev_dbg(&hdev->pdev->dev,
5529                                 "remove mac addr failed for miss.\n");
5530                 } else {
5531                         dev_err(&hdev->pdev->dev,
5532                                 "remove mac addr failed for undefined, code=%d.\n",
5533                                 resp_code);
5534                 }
5535         } else if (op == HCLGE_MAC_VLAN_LKUP) {
5536                 if (!resp_code) {
5537                         return_status = 0;
5538                 } else if (resp_code == 1) {
5539                         return_status = -ENOENT;
5540                         dev_dbg(&hdev->pdev->dev,
5541                                 "lookup mac addr failed for miss.\n");
5542                 } else {
5543                         dev_err(&hdev->pdev->dev,
5544                                 "lookup mac addr failed for undefined, code=%d.\n",
5545                                 resp_code);
5546                 }
5547         } else {
5548                 return_status = -EINVAL;
5549                 dev_err(&hdev->pdev->dev,
5550                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5551                         op);
5552         }
5553
5554         return return_status;
5555 }
5556
5557 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5558 {
5559         int word_num;
5560         int bit_num;
5561
5562         if (vfid > 255 || vfid < 0)
5563                 return -EIO;
5564
5565         if (vfid >= 0 && vfid <= 191) {
5566                 word_num = vfid / 32;
5567                 bit_num  = vfid % 32;
5568                 if (clr)
5569                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5570                 else
5571                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5572         } else {
5573                 word_num = (vfid - 192) / 32;
5574                 bit_num  = vfid % 32;
5575                 if (clr)
5576                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5577                 else
5578                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5579         }
5580
5581         return 0;
5582 }
5583
5584 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5585 {
5586 #define HCLGE_DESC_NUMBER 3
5587 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5588         int i, j;
5589
5590         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5591                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5592                         if (desc[i].data[j])
5593                                 return false;
5594
5595         return true;
5596 }
5597
5598 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5599                                    const u8 *addr)
5600 {
5601         const unsigned char *mac_addr = addr;
5602         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5603                        (mac_addr[0]) | (mac_addr[1] << 8);
5604         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5605
5606         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5607         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5608 }
5609
5610 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5611                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
5612 {
5613         struct hclge_dev *hdev = vport->back;
5614         struct hclge_desc desc;
5615         u8 resp_code;
5616         u16 retval;
5617         int ret;
5618
5619         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5620
5621         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5622
5623         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5624         if (ret) {
5625                 dev_err(&hdev->pdev->dev,
5626                         "del mac addr failed for cmd_send, ret =%d.\n",
5627                         ret);
5628                 return ret;
5629         }
5630         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5631         retval = le16_to_cpu(desc.retval);
5632
5633         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5634                                              HCLGE_MAC_VLAN_REMOVE);
5635 }
5636
5637 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5638                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
5639                                      struct hclge_desc *desc,
5640                                      bool is_mc)
5641 {
5642         struct hclge_dev *hdev = vport->back;
5643         u8 resp_code;
5644         u16 retval;
5645         int ret;
5646
5647         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5648         if (is_mc) {
5649                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5650                 memcpy(desc[0].data,
5651                        req,
5652                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5653                 hclge_cmd_setup_basic_desc(&desc[1],
5654                                            HCLGE_OPC_MAC_VLAN_ADD,
5655                                            true);
5656                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5657                 hclge_cmd_setup_basic_desc(&desc[2],
5658                                            HCLGE_OPC_MAC_VLAN_ADD,
5659                                            true);
5660                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5661         } else {
5662                 memcpy(desc[0].data,
5663                        req,
5664                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5665                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5666         }
5667         if (ret) {
5668                 dev_err(&hdev->pdev->dev,
5669                         "lookup mac addr failed for cmd_send, ret =%d.\n",
5670                         ret);
5671                 return ret;
5672         }
5673         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5674         retval = le16_to_cpu(desc[0].retval);
5675
5676         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5677                                              HCLGE_MAC_VLAN_LKUP);
5678 }
5679
5680 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5681                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
5682                                   struct hclge_desc *mc_desc)
5683 {
5684         struct hclge_dev *hdev = vport->back;
5685         int cfg_status;
5686         u8 resp_code;
5687         u16 retval;
5688         int ret;
5689
5690         if (!mc_desc) {
5691                 struct hclge_desc desc;
5692
5693                 hclge_cmd_setup_basic_desc(&desc,
5694                                            HCLGE_OPC_MAC_VLAN_ADD,
5695                                            false);
5696                 memcpy(desc.data, req,
5697                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5698                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5699                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5700                 retval = le16_to_cpu(desc.retval);
5701
5702                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5703                                                            resp_code,
5704                                                            HCLGE_MAC_VLAN_ADD);
5705         } else {
5706                 hclge_cmd_reuse_desc(&mc_desc[0], false);
5707                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5708                 hclge_cmd_reuse_desc(&mc_desc[1], false);
5709                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5710                 hclge_cmd_reuse_desc(&mc_desc[2], false);
5711                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5712                 memcpy(mc_desc[0].data, req,
5713                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5714                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5715                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5716                 retval = le16_to_cpu(mc_desc[0].retval);
5717
5718                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5719                                                            resp_code,
5720                                                            HCLGE_MAC_VLAN_ADD);
5721         }
5722
5723         if (ret) {
5724                 dev_err(&hdev->pdev->dev,
5725                         "add mac addr failed for cmd_send, ret =%d.\n",
5726                         ret);
5727                 return ret;
5728         }
5729
5730         return cfg_status;
5731 }
5732
5733 static int hclge_init_umv_space(struct hclge_dev *hdev)
5734 {
5735         u16 allocated_size = 0;
5736         int ret;
5737
5738         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5739                                   true);
5740         if (ret)
5741                 return ret;
5742
5743         if (allocated_size < hdev->wanted_umv_size)
5744                 dev_warn(&hdev->pdev->dev,
5745                          "Alloc umv space failed, want %d, get %d\n",
5746                          hdev->wanted_umv_size, allocated_size);
5747
5748         mutex_init(&hdev->umv_mutex);
5749         hdev->max_umv_size = allocated_size;
5750         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5751         hdev->share_umv_size = hdev->priv_umv_size +
5752                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5753
5754         return 0;
5755 }
5756
5757 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5758 {
5759         int ret;
5760
5761         if (hdev->max_umv_size > 0) {
5762                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5763                                           false);
5764                 if (ret)
5765                         return ret;
5766                 hdev->max_umv_size = 0;
5767         }
5768         mutex_destroy(&hdev->umv_mutex);
5769
5770         return 0;
5771 }
5772
5773 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5774                                u16 *allocated_size, bool is_alloc)
5775 {
5776         struct hclge_umv_spc_alc_cmd *req;
5777         struct hclge_desc desc;
5778         int ret;
5779
5780         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5781         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5782         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5783         req->space_size = cpu_to_le32(space_size);
5784
5785         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5786         if (ret) {
5787                 dev_err(&hdev->pdev->dev,
5788                         "%s umv space failed for cmd_send, ret =%d\n",
5789                         is_alloc ? "allocate" : "free", ret);
5790                 return ret;
5791         }
5792
5793         if (is_alloc && allocated_size)
5794                 *allocated_size = le32_to_cpu(desc.data[1]);
5795
5796         return 0;
5797 }
5798
5799 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5800 {
5801         struct hclge_vport *vport;
5802         int i;
5803
5804         for (i = 0; i < hdev->num_alloc_vport; i++) {
5805                 vport = &hdev->vport[i];
5806                 vport->used_umv_num = 0;
5807         }
5808
5809         mutex_lock(&hdev->umv_mutex);
5810         hdev->share_umv_size = hdev->priv_umv_size +
5811                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5812         mutex_unlock(&hdev->umv_mutex);
5813 }
5814
5815 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5816 {
5817         struct hclge_dev *hdev = vport->back;
5818         bool is_full;
5819
5820         mutex_lock(&hdev->umv_mutex);
5821         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5822                    hdev->share_umv_size == 0);
5823         mutex_unlock(&hdev->umv_mutex);
5824
5825         return is_full;
5826 }
5827
5828 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5829 {
5830         struct hclge_dev *hdev = vport->back;
5831
5832         mutex_lock(&hdev->umv_mutex);
5833         if (is_free) {
5834                 if (vport->used_umv_num > hdev->priv_umv_size)
5835                         hdev->share_umv_size++;
5836                 vport->used_umv_num--;
5837         } else {
5838                 if (vport->used_umv_num >= hdev->priv_umv_size)
5839                         hdev->share_umv_size--;
5840                 vport->used_umv_num++;
5841         }
5842         mutex_unlock(&hdev->umv_mutex);
5843 }
5844
5845 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5846                              const unsigned char *addr)
5847 {
5848         struct hclge_vport *vport = hclge_get_vport(handle);
5849
5850         return hclge_add_uc_addr_common(vport, addr);
5851 }
5852
5853 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5854                              const unsigned char *addr)
5855 {
5856         struct hclge_dev *hdev = vport->back;
5857         struct hclge_mac_vlan_tbl_entry_cmd req;
5858         struct hclge_desc desc;
5859         u16 egress_port = 0;
5860         int ret;
5861
5862         /* mac addr check */
5863         if (is_zero_ether_addr(addr) ||
5864             is_broadcast_ether_addr(addr) ||
5865             is_multicast_ether_addr(addr)) {
5866                 dev_err(&hdev->pdev->dev,
5867                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5868                          addr,
5869                          is_zero_ether_addr(addr),
5870                          is_broadcast_ether_addr(addr),
5871                          is_multicast_ether_addr(addr));
5872                 return -EINVAL;
5873         }
5874
5875         memset(&req, 0, sizeof(req));
5876         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5877
5878         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5879                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5880
5881         req.egress_port = cpu_to_le16(egress_port);
5882
5883         hclge_prepare_mac_addr(&req, addr);
5884
5885         /* Lookup the mac address in the mac_vlan table, and add
5886          * it if the entry is inexistent. Repeated unicast entry
5887          * is not allowed in the mac vlan table.
5888          */
5889         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5890         if (ret == -ENOENT) {
5891                 if (!hclge_is_umv_space_full(vport)) {
5892                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5893                         if (!ret)
5894                                 hclge_update_umv_space(vport, false);
5895                         return ret;
5896                 }
5897
5898                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5899                         hdev->priv_umv_size);
5900
5901                 return -ENOSPC;
5902         }
5903
5904         /* check if we just hit the duplicate */
5905         if (!ret)
5906                 ret = -EINVAL;
5907
5908         dev_err(&hdev->pdev->dev,
5909                 "PF failed to add unicast entry(%pM) in the MAC table\n",
5910                 addr);
5911
5912         return ret;
5913 }
5914
5915 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5916                             const unsigned char *addr)
5917 {
5918         struct hclge_vport *vport = hclge_get_vport(handle);
5919
5920         return hclge_rm_uc_addr_common(vport, addr);
5921 }
5922
5923 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5924                             const unsigned char *addr)
5925 {
5926         struct hclge_dev *hdev = vport->back;
5927         struct hclge_mac_vlan_tbl_entry_cmd req;
5928         int ret;
5929
5930         /* mac addr check */
5931         if (is_zero_ether_addr(addr) ||
5932             is_broadcast_ether_addr(addr) ||
5933             is_multicast_ether_addr(addr)) {
5934                 dev_dbg(&hdev->pdev->dev,
5935                         "Remove mac err! invalid mac:%pM.\n",
5936                          addr);
5937                 return -EINVAL;
5938         }
5939
5940         memset(&req, 0, sizeof(req));
5941         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5942         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5943         hclge_prepare_mac_addr(&req, addr);
5944         ret = hclge_remove_mac_vlan_tbl(vport, &req);
5945         if (!ret)
5946                 hclge_update_umv_space(vport, true);
5947
5948         return ret;
5949 }
5950
5951 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5952                              const unsigned char *addr)
5953 {
5954         struct hclge_vport *vport = hclge_get_vport(handle);
5955
5956         return hclge_add_mc_addr_common(vport, addr);
5957 }
5958
5959 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5960                              const unsigned char *addr)
5961 {
5962         struct hclge_dev *hdev = vport->back;
5963         struct hclge_mac_vlan_tbl_entry_cmd req;
5964         struct hclge_desc desc[3];
5965         int status;
5966
5967         /* mac addr check */
5968         if (!is_multicast_ether_addr(addr)) {
5969                 dev_err(&hdev->pdev->dev,
5970                         "Add mc mac err! invalid mac:%pM.\n",
5971                          addr);
5972                 return -EINVAL;
5973         }
5974         memset(&req, 0, sizeof(req));
5975         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5976         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5977         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5978         hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5979         hclge_prepare_mac_addr(&req, addr);
5980         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5981         if (!status) {
5982                 /* This mac addr exist, update VFID for it */
5983                 hclge_update_desc_vfid(desc, vport->vport_id, false);
5984                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5985         } else {
5986                 /* This mac addr do not exist, add new entry for it */
5987                 memset(desc[0].data, 0, sizeof(desc[0].data));
5988                 memset(desc[1].data, 0, sizeof(desc[0].data));
5989                 memset(desc[2].data, 0, sizeof(desc[0].data));
5990                 hclge_update_desc_vfid(desc, vport->vport_id, false);
5991                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5992         }
5993
5994         if (status == -ENOSPC)
5995                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5996
5997         return status;
5998 }
5999
6000 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6001                             const unsigned char *addr)
6002 {
6003         struct hclge_vport *vport = hclge_get_vport(handle);
6004
6005         return hclge_rm_mc_addr_common(vport, addr);
6006 }
6007
6008 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6009                             const unsigned char *addr)
6010 {
6011         struct hclge_dev *hdev = vport->back;
6012         struct hclge_mac_vlan_tbl_entry_cmd req;
6013         enum hclge_cmd_status status;
6014         struct hclge_desc desc[3];
6015
6016         /* mac addr check */
6017         if (!is_multicast_ether_addr(addr)) {
6018                 dev_dbg(&hdev->pdev->dev,
6019                         "Remove mc mac err! invalid mac:%pM.\n",
6020                          addr);
6021                 return -EINVAL;
6022         }
6023
6024         memset(&req, 0, sizeof(req));
6025         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6026         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6027         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6028         hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6029         hclge_prepare_mac_addr(&req, addr);
6030         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6031         if (!status) {
6032                 /* This mac addr exist, remove this handle's VFID for it */
6033                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6034
6035                 if (hclge_is_all_function_id_zero(desc))
6036                         /* All the vfid is zero, so need to delete this entry */
6037                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6038                 else
6039                         /* Not all the vfid is zero, update the vfid */
6040                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6041
6042         } else {
6043                 /* Maybe this mac address is in mta table, but it cannot be
6044                  * deleted here because an entry of mta represents an address
6045                  * range rather than a specific address. the delete action to
6046                  * all entries will take effect in update_mta_status called by
6047                  * hns3_nic_set_rx_mode.
6048                  */
6049                 status = 0;
6050         }
6051
6052         return status;
6053 }
6054
6055 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6056                                               u16 cmdq_resp, u8 resp_code)
6057 {
6058 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6059 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6060 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6061 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6062
6063         int return_status;
6064
6065         if (cmdq_resp) {
6066                 dev_err(&hdev->pdev->dev,
6067                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6068                         cmdq_resp);
6069                 return -EIO;
6070         }
6071
6072         switch (resp_code) {
6073         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6074         case HCLGE_ETHERTYPE_ALREADY_ADD:
6075                 return_status = 0;
6076                 break;
6077         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6078                 dev_err(&hdev->pdev->dev,
6079                         "add mac ethertype failed for manager table overflow.\n");
6080                 return_status = -EIO;
6081                 break;
6082         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6083                 dev_err(&hdev->pdev->dev,
6084                         "add mac ethertype failed for key conflict.\n");
6085                 return_status = -EIO;
6086                 break;
6087         default:
6088                 dev_err(&hdev->pdev->dev,
6089                         "add mac ethertype failed for undefined, code=%d.\n",
6090                         resp_code);
6091                 return_status = -EIO;
6092         }
6093
6094         return return_status;
6095 }
6096
6097 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6098                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6099 {
6100         struct hclge_desc desc;
6101         u8 resp_code;
6102         u16 retval;
6103         int ret;
6104
6105         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6106         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6107
6108         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6109         if (ret) {
6110                 dev_err(&hdev->pdev->dev,
6111                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6112                         ret);
6113                 return ret;
6114         }
6115
6116         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6117         retval = le16_to_cpu(desc.retval);
6118
6119         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6120 }
6121
6122 static int init_mgr_tbl(struct hclge_dev *hdev)
6123 {
6124         int ret;
6125         int i;
6126
6127         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6128                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6129                 if (ret) {
6130                         dev_err(&hdev->pdev->dev,
6131                                 "add mac ethertype failed, ret =%d.\n",
6132                                 ret);
6133                         return ret;
6134                 }
6135         }
6136
6137         return 0;
6138 }
6139
6140 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6141 {
6142         struct hclge_vport *vport = hclge_get_vport(handle);
6143         struct hclge_dev *hdev = vport->back;
6144
6145         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6146 }
6147
6148 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6149                               bool is_first)
6150 {
6151         const unsigned char *new_addr = (const unsigned char *)p;
6152         struct hclge_vport *vport = hclge_get_vport(handle);
6153         struct hclge_dev *hdev = vport->back;
6154         int ret;
6155
6156         /* mac addr check */
6157         if (is_zero_ether_addr(new_addr) ||
6158             is_broadcast_ether_addr(new_addr) ||
6159             is_multicast_ether_addr(new_addr)) {
6160                 dev_err(&hdev->pdev->dev,
6161                         "Change uc mac err! invalid mac:%p.\n",
6162                          new_addr);
6163                 return -EINVAL;
6164         }
6165
6166         if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6167                 dev_warn(&hdev->pdev->dev,
6168                          "remove old uc mac address fail.\n");
6169
6170         ret = hclge_add_uc_addr(handle, new_addr);
6171         if (ret) {
6172                 dev_err(&hdev->pdev->dev,
6173                         "add uc mac address fail, ret =%d.\n",
6174                         ret);
6175
6176                 if (!is_first &&
6177                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6178                         dev_err(&hdev->pdev->dev,
6179                                 "restore uc mac address fail.\n");
6180
6181                 return -EIO;
6182         }
6183
6184         ret = hclge_pause_addr_cfg(hdev, new_addr);
6185         if (ret) {
6186                 dev_err(&hdev->pdev->dev,
6187                         "configure mac pause address fail, ret =%d.\n",
6188                         ret);
6189                 return -EIO;
6190         }
6191
6192         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6193
6194         return 0;
6195 }
6196
6197 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6198                           int cmd)
6199 {
6200         struct hclge_vport *vport = hclge_get_vport(handle);
6201         struct hclge_dev *hdev = vport->back;
6202
6203         if (!hdev->hw.mac.phydev)
6204                 return -EOPNOTSUPP;
6205
6206         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6207 }
6208
6209 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6210                                       u8 fe_type, bool filter_en)
6211 {
6212         struct hclge_vlan_filter_ctrl_cmd *req;
6213         struct hclge_desc desc;
6214         int ret;
6215
6216         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6217
6218         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6219         req->vlan_type = vlan_type;
6220         req->vlan_fe = filter_en ? fe_type : 0;
6221
6222         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6223         if (ret)
6224                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6225                         ret);
6226
6227         return ret;
6228 }
6229
6230 #define HCLGE_FILTER_TYPE_VF            0
6231 #define HCLGE_FILTER_TYPE_PORT          1
6232 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
6233 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
6234 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
6235 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
6236 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
6237 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
6238                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6239 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
6240                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6241
6242 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6243 {
6244         struct hclge_vport *vport = hclge_get_vport(handle);
6245         struct hclge_dev *hdev = vport->back;
6246
6247         if (hdev->pdev->revision >= 0x21) {
6248                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6249                                            HCLGE_FILTER_FE_EGRESS, enable);
6250                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6251                                            HCLGE_FILTER_FE_INGRESS, enable);
6252         } else {
6253                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6254                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6255         }
6256         if (enable)
6257                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6258         else
6259                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6260 }
6261
6262 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6263                                     bool is_kill, u16 vlan, u8 qos,
6264                                     __be16 proto)
6265 {
6266 #define HCLGE_MAX_VF_BYTES  16
6267         struct hclge_vlan_filter_vf_cfg_cmd *req0;
6268         struct hclge_vlan_filter_vf_cfg_cmd *req1;
6269         struct hclge_desc desc[2];
6270         u8 vf_byte_val;
6271         u8 vf_byte_off;
6272         int ret;
6273
6274         hclge_cmd_setup_basic_desc(&desc[0],
6275                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6276         hclge_cmd_setup_basic_desc(&desc[1],
6277                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6278
6279         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6280
6281         vf_byte_off = vfid / 8;
6282         vf_byte_val = 1 << (vfid % 8);
6283
6284         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6285         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6286
6287         req0->vlan_id  = cpu_to_le16(vlan);
6288         req0->vlan_cfg = is_kill;
6289
6290         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6291                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6292         else
6293                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6294
6295         ret = hclge_cmd_send(&hdev->hw, desc, 2);
6296         if (ret) {
6297                 dev_err(&hdev->pdev->dev,
6298                         "Send vf vlan command fail, ret =%d.\n",
6299                         ret);
6300                 return ret;
6301         }
6302
6303         if (!is_kill) {
6304 #define HCLGE_VF_VLAN_NO_ENTRY  2
6305                 if (!req0->resp_code || req0->resp_code == 1)
6306                         return 0;
6307
6308                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6309                         dev_warn(&hdev->pdev->dev,
6310                                  "vf vlan table is full, vf vlan filter is disabled\n");
6311                         return 0;
6312                 }
6313
6314                 dev_err(&hdev->pdev->dev,
6315                         "Add vf vlan filter fail, ret =%d.\n",
6316                         req0->resp_code);
6317         } else {
6318 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
6319                 if (!req0->resp_code)
6320                         return 0;
6321
6322                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6323                         dev_warn(&hdev->pdev->dev,
6324                                  "vlan %d filter is not in vf vlan table\n",
6325                                  vlan);
6326                         return 0;
6327                 }
6328
6329                 dev_err(&hdev->pdev->dev,
6330                         "Kill vf vlan filter fail, ret =%d.\n",
6331                         req0->resp_code);
6332         }
6333
6334         return -EIO;
6335 }
6336
6337 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6338                                       u16 vlan_id, bool is_kill)
6339 {
6340         struct hclge_vlan_filter_pf_cfg_cmd *req;
6341         struct hclge_desc desc;
6342         u8 vlan_offset_byte_val;
6343         u8 vlan_offset_byte;
6344         u8 vlan_offset_160;
6345         int ret;
6346
6347         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6348
6349         vlan_offset_160 = vlan_id / 160;
6350         vlan_offset_byte = (vlan_id % 160) / 8;
6351         vlan_offset_byte_val = 1 << (vlan_id % 8);
6352
6353         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6354         req->vlan_offset = vlan_offset_160;
6355         req->vlan_cfg = is_kill;
6356         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6357
6358         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6359         if (ret)
6360                 dev_err(&hdev->pdev->dev,
6361                         "port vlan command, send fail, ret =%d.\n", ret);
6362         return ret;
6363 }
6364
6365 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6366                                     u16 vport_id, u16 vlan_id, u8 qos,
6367                                     bool is_kill)
6368 {
6369         u16 vport_idx, vport_num = 0;
6370         int ret;
6371
6372         if (is_kill && !vlan_id)
6373                 return 0;
6374
6375         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6376                                        0, proto);
6377         if (ret) {
6378                 dev_err(&hdev->pdev->dev,
6379                         "Set %d vport vlan filter config fail, ret =%d.\n",
6380                         vport_id, ret);
6381                 return ret;
6382         }
6383
6384         /* vlan 0 may be added twice when 8021q module is enabled */
6385         if (!is_kill && !vlan_id &&
6386             test_bit(vport_id, hdev->vlan_table[vlan_id]))
6387                 return 0;
6388
6389         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6390                 dev_err(&hdev->pdev->dev,
6391                         "Add port vlan failed, vport %d is already in vlan %d\n",
6392                         vport_id, vlan_id);
6393                 return -EINVAL;
6394         }
6395
6396         if (is_kill &&
6397             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6398                 dev_err(&hdev->pdev->dev,
6399                         "Delete port vlan failed, vport %d is not in vlan %d\n",
6400                         vport_id, vlan_id);
6401                 return -EINVAL;
6402         }
6403
6404         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6405                 vport_num++;
6406
6407         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6408                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6409                                                  is_kill);
6410
6411         return ret;
6412 }
6413
6414 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6415                           u16 vlan_id, bool is_kill)
6416 {
6417         struct hclge_vport *vport = hclge_get_vport(handle);
6418         struct hclge_dev *hdev = vport->back;
6419
6420         return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6421                                         0, is_kill);
6422 }
6423
6424 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6425                                     u16 vlan, u8 qos, __be16 proto)
6426 {
6427         struct hclge_vport *vport = hclge_get_vport(handle);
6428         struct hclge_dev *hdev = vport->back;
6429
6430         if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6431                 return -EINVAL;
6432         if (proto != htons(ETH_P_8021Q))
6433                 return -EPROTONOSUPPORT;
6434
6435         return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6436 }
6437
6438 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6439 {
6440         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6441         struct hclge_vport_vtag_tx_cfg_cmd *req;
6442         struct hclge_dev *hdev = vport->back;
6443         struct hclge_desc desc;
6444         int status;
6445
6446         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6447
6448         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6449         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6450         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6451         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6452                       vcfg->accept_tag1 ? 1 : 0);
6453         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6454                       vcfg->accept_untag1 ? 1 : 0);
6455         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6456                       vcfg->accept_tag2 ? 1 : 0);
6457         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6458                       vcfg->accept_untag2 ? 1 : 0);
6459         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6460                       vcfg->insert_tag1_en ? 1 : 0);
6461         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6462                       vcfg->insert_tag2_en ? 1 : 0);
6463         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6464
6465         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6466         req->vf_bitmap[req->vf_offset] =
6467                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6468
6469         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6470         if (status)
6471                 dev_err(&hdev->pdev->dev,
6472                         "Send port txvlan cfg command fail, ret =%d\n",
6473                         status);
6474
6475         return status;
6476 }
6477
6478 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6479 {
6480         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6481         struct hclge_vport_vtag_rx_cfg_cmd *req;
6482         struct hclge_dev *hdev = vport->back;
6483         struct hclge_desc desc;
6484         int status;
6485
6486         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6487
6488         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6489         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6490                       vcfg->strip_tag1_en ? 1 : 0);
6491         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6492                       vcfg->strip_tag2_en ? 1 : 0);
6493         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6494                       vcfg->vlan1_vlan_prionly ? 1 : 0);
6495         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6496                       vcfg->vlan2_vlan_prionly ? 1 : 0);
6497
6498         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6499         req->vf_bitmap[req->vf_offset] =
6500                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6501
6502         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6503         if (status)
6504                 dev_err(&hdev->pdev->dev,
6505                         "Send port rxvlan cfg command fail, ret =%d\n",
6506                         status);
6507
6508         return status;
6509 }
6510
6511 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6512 {
6513         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6514         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6515         struct hclge_desc desc;
6516         int status;
6517
6518         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6519         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6520         rx_req->ot_fst_vlan_type =
6521                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6522         rx_req->ot_sec_vlan_type =
6523                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6524         rx_req->in_fst_vlan_type =
6525                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6526         rx_req->in_sec_vlan_type =
6527                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6528
6529         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6530         if (status) {
6531                 dev_err(&hdev->pdev->dev,
6532                         "Send rxvlan protocol type command fail, ret =%d\n",
6533                         status);
6534                 return status;
6535         }
6536
6537         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6538
6539         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6540         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6541         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6542
6543         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6544         if (status)
6545                 dev_err(&hdev->pdev->dev,
6546                         "Send txvlan protocol type command fail, ret =%d\n",
6547                         status);
6548
6549         return status;
6550 }
6551
6552 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6553 {
6554 #define HCLGE_DEF_VLAN_TYPE             0x8100
6555
6556         struct hnae3_handle *handle = &hdev->vport[0].nic;
6557         struct hclge_vport *vport;
6558         int ret;
6559         int i;
6560
6561         if (hdev->pdev->revision >= 0x21) {
6562                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6563                                                  HCLGE_FILTER_FE_EGRESS, true);
6564                 if (ret)
6565                         return ret;
6566
6567                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6568                                                  HCLGE_FILTER_FE_INGRESS, true);
6569                 if (ret)
6570                         return ret;
6571         } else {
6572                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6573                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
6574                                                  true);
6575                 if (ret)
6576                         return ret;
6577         }
6578
6579         handle->netdev_flags |= HNAE3_VLAN_FLTR;
6580
6581         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6582         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6583         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6584         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6585         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6586         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6587
6588         ret = hclge_set_vlan_protocol_type(hdev);
6589         if (ret)
6590                 return ret;
6591
6592         for (i = 0; i < hdev->num_alloc_vport; i++) {
6593                 vport = &hdev->vport[i];
6594                 vport->txvlan_cfg.accept_tag1 = true;
6595                 vport->txvlan_cfg.accept_untag1 = true;
6596
6597                 /* accept_tag2 and accept_untag2 are not supported on
6598                  * pdev revision(0x20), new revision support them. The
6599                  * value of this two fields will not return error when driver
6600                  * send command to fireware in revision(0x20).
6601                  * This two fields can not configured by user.
6602                  */
6603                 vport->txvlan_cfg.accept_tag2 = true;
6604                 vport->txvlan_cfg.accept_untag2 = true;
6605
6606                 vport->txvlan_cfg.insert_tag1_en = false;
6607                 vport->txvlan_cfg.insert_tag2_en = false;
6608                 vport->txvlan_cfg.default_tag1 = 0;
6609                 vport->txvlan_cfg.default_tag2 = 0;
6610
6611                 ret = hclge_set_vlan_tx_offload_cfg(vport);
6612                 if (ret)
6613                         return ret;
6614
6615                 vport->rxvlan_cfg.strip_tag1_en = false;
6616                 vport->rxvlan_cfg.strip_tag2_en = true;
6617                 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6618                 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6619
6620                 ret = hclge_set_vlan_rx_offload_cfg(vport);
6621                 if (ret)
6622                         return ret;
6623         }
6624
6625         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6626 }
6627
6628 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6629 {
6630         struct hclge_vport *vport = hclge_get_vport(handle);
6631
6632         vport->rxvlan_cfg.strip_tag1_en = false;
6633         vport->rxvlan_cfg.strip_tag2_en = enable;
6634         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6635         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6636
6637         return hclge_set_vlan_rx_offload_cfg(vport);
6638 }
6639
6640 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6641 {
6642         struct hclge_config_max_frm_size_cmd *req;
6643         struct hclge_desc desc;
6644
6645         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6646
6647         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6648         req->max_frm_size = cpu_to_le16(new_mps);
6649         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6650
6651         return hclge_cmd_send(&hdev->hw, &desc, 1);
6652 }
6653
6654 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6655 {
6656         struct hclge_vport *vport = hclge_get_vport(handle);
6657
6658         return hclge_set_vport_mtu(vport, new_mtu);
6659 }
6660
6661 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6662 {
6663         struct hclge_dev *hdev = vport->back;
6664         int i, max_frm_size, ret = 0;
6665
6666         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6667         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6668             max_frm_size > HCLGE_MAC_MAX_FRAME)
6669                 return -EINVAL;
6670
6671         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6672         mutex_lock(&hdev->vport_lock);
6673         /* VF's mps must fit within hdev->mps */
6674         if (vport->vport_id && max_frm_size > hdev->mps) {
6675                 mutex_unlock(&hdev->vport_lock);
6676                 return -EINVAL;
6677         } else if (vport->vport_id) {
6678                 vport->mps = max_frm_size;
6679                 mutex_unlock(&hdev->vport_lock);
6680                 return 0;
6681         }
6682
6683         /* PF's mps must be greater then VF's mps */
6684         for (i = 1; i < hdev->num_alloc_vport; i++)
6685                 if (max_frm_size < hdev->vport[i].mps) {
6686                         mutex_unlock(&hdev->vport_lock);
6687                         return -EINVAL;
6688                 }
6689
6690         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6691
6692         ret = hclge_set_mac_mtu(hdev, max_frm_size);
6693         if (ret) {
6694                 dev_err(&hdev->pdev->dev,
6695                         "Change mtu fail, ret =%d\n", ret);
6696                 goto out;
6697         }
6698
6699         hdev->mps = max_frm_size;
6700         vport->mps = max_frm_size;
6701
6702         ret = hclge_buffer_alloc(hdev);
6703         if (ret)
6704                 dev_err(&hdev->pdev->dev,
6705                         "Allocate buffer fail, ret =%d\n", ret);
6706
6707 out:
6708         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6709         mutex_unlock(&hdev->vport_lock);
6710         return ret;
6711 }
6712
6713 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6714                                     bool enable)
6715 {
6716         struct hclge_reset_tqp_queue_cmd *req;
6717         struct hclge_desc desc;
6718         int ret;
6719
6720         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6721
6722         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6723         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6724         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6725
6726         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6727         if (ret) {
6728                 dev_err(&hdev->pdev->dev,
6729                         "Send tqp reset cmd error, status =%d\n", ret);
6730                 return ret;
6731         }
6732
6733         return 0;
6734 }
6735
6736 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6737 {
6738         struct hclge_reset_tqp_queue_cmd *req;
6739         struct hclge_desc desc;
6740         int ret;
6741
6742         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6743
6744         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6745         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6746
6747         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6748         if (ret) {
6749                 dev_err(&hdev->pdev->dev,
6750                         "Get reset status error, status =%d\n", ret);
6751                 return ret;
6752         }
6753
6754         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6755 }
6756
6757 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
6758 {
6759         struct hnae3_queue *queue;
6760         struct hclge_tqp *tqp;
6761
6762         queue = handle->kinfo.tqp[queue_id];
6763         tqp = container_of(queue, struct hclge_tqp, q);
6764
6765         return tqp->index;
6766 }
6767
6768 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6769 {
6770         struct hclge_vport *vport = hclge_get_vport(handle);
6771         struct hclge_dev *hdev = vport->back;
6772         int reset_try_times = 0;
6773         int reset_status;
6774         u16 queue_gid;
6775         int ret = 0;
6776
6777         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6778
6779         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6780         if (ret) {
6781                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6782                 return ret;
6783         }
6784
6785         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6786         if (ret) {
6787                 dev_err(&hdev->pdev->dev,
6788                         "Send reset tqp cmd fail, ret = %d\n", ret);
6789                 return ret;
6790         }
6791
6792         reset_try_times = 0;
6793         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6794                 /* Wait for tqp hw reset */
6795                 msleep(20);
6796                 reset_status = hclge_get_reset_status(hdev, queue_gid);
6797                 if (reset_status)
6798                         break;
6799         }
6800
6801         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6802                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6803                 return ret;
6804         }
6805
6806         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6807         if (ret)
6808                 dev_err(&hdev->pdev->dev,
6809                         "Deassert the soft reset fail, ret = %d\n", ret);
6810
6811         return ret;
6812 }
6813
6814 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6815 {
6816         struct hclge_dev *hdev = vport->back;
6817         int reset_try_times = 0;
6818         int reset_status;
6819         u16 queue_gid;
6820         int ret;
6821
6822         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6823
6824         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6825         if (ret) {
6826                 dev_warn(&hdev->pdev->dev,
6827                          "Send reset tqp cmd fail, ret = %d\n", ret);
6828                 return;
6829         }
6830
6831         reset_try_times = 0;
6832         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6833                 /* Wait for tqp hw reset */
6834                 msleep(20);
6835                 reset_status = hclge_get_reset_status(hdev, queue_gid);
6836                 if (reset_status)
6837                         break;
6838         }
6839
6840         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6841                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6842                 return;
6843         }
6844
6845         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6846         if (ret)
6847                 dev_warn(&hdev->pdev->dev,
6848                          "Deassert the soft reset fail, ret = %d\n", ret);
6849 }
6850
6851 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6852 {
6853         struct hclge_vport *vport = hclge_get_vport(handle);
6854         struct hclge_dev *hdev = vport->back;
6855
6856         return hdev->fw_version;
6857 }
6858
6859 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6860 {
6861         struct phy_device *phydev = hdev->hw.mac.phydev;
6862
6863         if (!phydev)
6864                 return;
6865
6866         phy_set_asym_pause(phydev, rx_en, tx_en);
6867 }
6868
6869 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6870 {
6871         int ret;
6872
6873         if (rx_en && tx_en)
6874                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
6875         else if (rx_en && !tx_en)
6876                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6877         else if (!rx_en && tx_en)
6878                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6879         else
6880                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
6881
6882         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6883                 return 0;
6884
6885         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6886         if (ret) {
6887                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6888                         ret);
6889                 return ret;
6890         }
6891
6892         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6893
6894         return 0;
6895 }
6896
6897 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6898 {
6899         struct phy_device *phydev = hdev->hw.mac.phydev;
6900         u16 remote_advertising = 0;
6901         u16 local_advertising = 0;
6902         u32 rx_pause, tx_pause;
6903         u8 flowctl;
6904
6905         if (!phydev->link || !phydev->autoneg)
6906                 return 0;
6907
6908         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6909
6910         if (phydev->pause)
6911                 remote_advertising = LPA_PAUSE_CAP;
6912
6913         if (phydev->asym_pause)
6914                 remote_advertising |= LPA_PAUSE_ASYM;
6915
6916         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6917                                            remote_advertising);
6918         tx_pause = flowctl & FLOW_CTRL_TX;
6919         rx_pause = flowctl & FLOW_CTRL_RX;
6920
6921         if (phydev->duplex == HCLGE_MAC_HALF) {
6922                 tx_pause = 0;
6923                 rx_pause = 0;
6924         }
6925
6926         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6927 }
6928
6929 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6930                                  u32 *rx_en, u32 *tx_en)
6931 {
6932         struct hclge_vport *vport = hclge_get_vport(handle);
6933         struct hclge_dev *hdev = vport->back;
6934
6935         *auto_neg = hclge_get_autoneg(handle);
6936
6937         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6938                 *rx_en = 0;
6939                 *tx_en = 0;
6940                 return;
6941         }
6942
6943         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6944                 *rx_en = 1;
6945                 *tx_en = 0;
6946         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6947                 *tx_en = 1;
6948                 *rx_en = 0;
6949         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6950                 *rx_en = 1;
6951                 *tx_en = 1;
6952         } else {
6953                 *rx_en = 0;
6954                 *tx_en = 0;
6955         }
6956 }
6957
6958 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6959                                 u32 rx_en, u32 tx_en)
6960 {
6961         struct hclge_vport *vport = hclge_get_vport(handle);
6962         struct hclge_dev *hdev = vport->back;
6963         struct phy_device *phydev = hdev->hw.mac.phydev;
6964         u32 fc_autoneg;
6965
6966         fc_autoneg = hclge_get_autoneg(handle);
6967         if (auto_neg != fc_autoneg) {
6968                 dev_info(&hdev->pdev->dev,
6969                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6970                 return -EOPNOTSUPP;
6971         }
6972
6973         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6974                 dev_info(&hdev->pdev->dev,
6975                          "Priority flow control enabled. Cannot set link flow control.\n");
6976                 return -EOPNOTSUPP;
6977         }
6978
6979         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6980
6981         if (!fc_autoneg)
6982                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6983
6984         /* Only support flow control negotiation for netdev with
6985          * phy attached for now.
6986          */
6987         if (!phydev)
6988                 return -EOPNOTSUPP;
6989
6990         return phy_start_aneg(phydev);
6991 }
6992
6993 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6994                                           u8 *auto_neg, u32 *speed, u8 *duplex)
6995 {
6996         struct hclge_vport *vport = hclge_get_vport(handle);
6997         struct hclge_dev *hdev = vport->back;
6998
6999         if (speed)
7000                 *speed = hdev->hw.mac.speed;
7001         if (duplex)
7002                 *duplex = hdev->hw.mac.duplex;
7003         if (auto_neg)
7004                 *auto_neg = hdev->hw.mac.autoneg;
7005 }
7006
7007 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7008 {
7009         struct hclge_vport *vport = hclge_get_vport(handle);
7010         struct hclge_dev *hdev = vport->back;
7011
7012         if (media_type)
7013                 *media_type = hdev->hw.mac.media_type;
7014 }
7015
7016 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7017                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7018 {
7019         struct hclge_vport *vport = hclge_get_vport(handle);
7020         struct hclge_dev *hdev = vport->back;
7021         struct phy_device *phydev = hdev->hw.mac.phydev;
7022         int mdix_ctrl, mdix, retval, is_resolved;
7023
7024         if (!phydev) {
7025                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7026                 *tp_mdix = ETH_TP_MDI_INVALID;
7027                 return;
7028         }
7029
7030         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7031
7032         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7033         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7034                                     HCLGE_PHY_MDIX_CTRL_S);
7035
7036         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7037         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7038         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7039
7040         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7041
7042         switch (mdix_ctrl) {
7043         case 0x0:
7044                 *tp_mdix_ctrl = ETH_TP_MDI;
7045                 break;
7046         case 0x1:
7047                 *tp_mdix_ctrl = ETH_TP_MDI_X;
7048                 break;
7049         case 0x3:
7050                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7051                 break;
7052         default:
7053                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7054                 break;
7055         }
7056
7057         if (!is_resolved)
7058                 *tp_mdix = ETH_TP_MDI_INVALID;
7059         else if (mdix)
7060                 *tp_mdix = ETH_TP_MDI_X;
7061         else
7062                 *tp_mdix = ETH_TP_MDI;
7063 }
7064
7065 static int hclge_init_client_instance(struct hnae3_client *client,
7066                                       struct hnae3_ae_dev *ae_dev)
7067 {
7068         struct hclge_dev *hdev = ae_dev->priv;
7069         struct hclge_vport *vport;
7070         int i, ret;
7071
7072         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7073                 vport = &hdev->vport[i];
7074
7075                 switch (client->type) {
7076                 case HNAE3_CLIENT_KNIC:
7077
7078                         hdev->nic_client = client;
7079                         vport->nic.client = client;
7080                         ret = client->ops->init_instance(&vport->nic);
7081                         if (ret)
7082                                 goto clear_nic;
7083
7084                         hnae3_set_client_init_flag(client, ae_dev, 1);
7085
7086                         if (hdev->roce_client &&
7087                             hnae3_dev_roce_supported(hdev)) {
7088                                 struct hnae3_client *rc = hdev->roce_client;
7089
7090                                 ret = hclge_init_roce_base_info(vport);
7091                                 if (ret)
7092                                         goto clear_roce;
7093
7094                                 ret = rc->ops->init_instance(&vport->roce);
7095                                 if (ret)
7096                                         goto clear_roce;
7097
7098                                 hnae3_set_client_init_flag(hdev->roce_client,
7099                                                            ae_dev, 1);
7100                         }
7101
7102                         break;
7103                 case HNAE3_CLIENT_UNIC:
7104                         hdev->nic_client = client;
7105                         vport->nic.client = client;
7106
7107                         ret = client->ops->init_instance(&vport->nic);
7108                         if (ret)
7109                                 goto clear_nic;
7110
7111                         hnae3_set_client_init_flag(client, ae_dev, 1);
7112
7113                         break;
7114                 case HNAE3_CLIENT_ROCE:
7115                         if (hnae3_dev_roce_supported(hdev)) {
7116                                 hdev->roce_client = client;
7117                                 vport->roce.client = client;
7118                         }
7119
7120                         if (hdev->roce_client && hdev->nic_client) {
7121                                 ret = hclge_init_roce_base_info(vport);
7122                                 if (ret)
7123                                         goto clear_roce;
7124
7125                                 ret = client->ops->init_instance(&vport->roce);
7126                                 if (ret)
7127                                         goto clear_roce;
7128
7129                                 hnae3_set_client_init_flag(client, ae_dev, 1);
7130                         }
7131
7132                         break;
7133                 default:
7134                         return -EINVAL;
7135                 }
7136         }
7137
7138         return 0;
7139
7140 clear_nic:
7141         hdev->nic_client = NULL;
7142         vport->nic.client = NULL;
7143         return ret;
7144 clear_roce:
7145         hdev->roce_client = NULL;
7146         vport->roce.client = NULL;
7147         return ret;
7148 }
7149
7150 static void hclge_uninit_client_instance(struct hnae3_client *client,
7151                                          struct hnae3_ae_dev *ae_dev)
7152 {
7153         struct hclge_dev *hdev = ae_dev->priv;
7154         struct hclge_vport *vport;
7155         int i;
7156
7157         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7158                 vport = &hdev->vport[i];
7159                 if (hdev->roce_client) {
7160                         hdev->roce_client->ops->uninit_instance(&vport->roce,
7161                                                                 0);
7162                         hdev->roce_client = NULL;
7163                         vport->roce.client = NULL;
7164                 }
7165                 if (client->type == HNAE3_CLIENT_ROCE)
7166                         return;
7167                 if (hdev->nic_client && client->ops->uninit_instance) {
7168                         client->ops->uninit_instance(&vport->nic, 0);
7169                         hdev->nic_client = NULL;
7170                         vport->nic.client = NULL;
7171                 }
7172         }
7173 }
7174
7175 static int hclge_pci_init(struct hclge_dev *hdev)
7176 {
7177         struct pci_dev *pdev = hdev->pdev;
7178         struct hclge_hw *hw;
7179         int ret;
7180
7181         ret = pci_enable_device(pdev);
7182         if (ret) {
7183                 dev_err(&pdev->dev, "failed to enable PCI device\n");
7184                 return ret;
7185         }
7186
7187         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7188         if (ret) {
7189                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7190                 if (ret) {
7191                         dev_err(&pdev->dev,
7192                                 "can't set consistent PCI DMA");
7193                         goto err_disable_device;
7194                 }
7195                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7196         }
7197
7198         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7199         if (ret) {
7200                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7201                 goto err_disable_device;
7202         }
7203
7204         pci_set_master(pdev);
7205         hw = &hdev->hw;
7206         hw->io_base = pcim_iomap(pdev, 2, 0);
7207         if (!hw->io_base) {
7208                 dev_err(&pdev->dev, "Can't map configuration register space\n");
7209                 ret = -ENOMEM;
7210                 goto err_clr_master;
7211         }
7212
7213         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7214
7215         return 0;
7216 err_clr_master:
7217         pci_clear_master(pdev);
7218         pci_release_regions(pdev);
7219 err_disable_device:
7220         pci_disable_device(pdev);
7221
7222         return ret;
7223 }
7224
7225 static void hclge_pci_uninit(struct hclge_dev *hdev)
7226 {
7227         struct pci_dev *pdev = hdev->pdev;
7228
7229         pcim_iounmap(pdev, hdev->hw.io_base);
7230         pci_free_irq_vectors(pdev);
7231         pci_clear_master(pdev);
7232         pci_release_mem_regions(pdev);
7233         pci_disable_device(pdev);
7234 }
7235
7236 static void hclge_state_init(struct hclge_dev *hdev)
7237 {
7238         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7239         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7240         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7241         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7242         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7243         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7244 }
7245
7246 static void hclge_state_uninit(struct hclge_dev *hdev)
7247 {
7248         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7249
7250         if (hdev->service_timer.function)
7251                 del_timer_sync(&hdev->service_timer);
7252         if (hdev->reset_timer.function)
7253                 del_timer_sync(&hdev->reset_timer);
7254         if (hdev->service_task.func)
7255                 cancel_work_sync(&hdev->service_task);
7256         if (hdev->rst_service_task.func)
7257                 cancel_work_sync(&hdev->rst_service_task);
7258         if (hdev->mbx_service_task.func)
7259                 cancel_work_sync(&hdev->mbx_service_task);
7260 }
7261
7262 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7263 {
7264 #define HCLGE_FLR_WAIT_MS       100
7265 #define HCLGE_FLR_WAIT_CNT      50
7266         struct hclge_dev *hdev = ae_dev->priv;
7267         int cnt = 0;
7268
7269         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7270         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7271         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7272         hclge_reset_event(hdev->pdev, NULL);
7273
7274         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7275                cnt++ < HCLGE_FLR_WAIT_CNT)
7276                 msleep(HCLGE_FLR_WAIT_MS);
7277
7278         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7279                 dev_err(&hdev->pdev->dev,
7280                         "flr wait down timeout: %d\n", cnt);
7281 }
7282
7283 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7284 {
7285         struct hclge_dev *hdev = ae_dev->priv;
7286
7287         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7288 }
7289
7290 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7291 {
7292         struct pci_dev *pdev = ae_dev->pdev;
7293         struct hclge_dev *hdev;
7294         int ret;
7295
7296         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7297         if (!hdev) {
7298                 ret = -ENOMEM;
7299                 goto out;
7300         }
7301
7302         hdev->pdev = pdev;
7303         hdev->ae_dev = ae_dev;
7304         hdev->reset_type = HNAE3_NONE_RESET;
7305         hdev->reset_level = HNAE3_FUNC_RESET;
7306         ae_dev->priv = hdev;
7307         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7308
7309         mutex_init(&hdev->vport_lock);
7310
7311         ret = hclge_pci_init(hdev);
7312         if (ret) {
7313                 dev_err(&pdev->dev, "PCI init failed\n");
7314                 goto out;
7315         }
7316
7317         /* Firmware command queue initialize */
7318         ret = hclge_cmd_queue_init(hdev);
7319         if (ret) {
7320                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7321                 goto err_pci_uninit;
7322         }
7323
7324         /* Firmware command initialize */
7325         ret = hclge_cmd_init(hdev);
7326         if (ret)
7327                 goto err_cmd_uninit;
7328
7329         ret = hclge_get_cap(hdev);
7330         if (ret) {
7331                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7332                         ret);
7333                 goto err_cmd_uninit;
7334         }
7335
7336         ret = hclge_configure(hdev);
7337         if (ret) {
7338                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7339                 goto err_cmd_uninit;
7340         }
7341
7342         ret = hclge_init_msi(hdev);
7343         if (ret) {
7344                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7345                 goto err_cmd_uninit;
7346         }
7347
7348         ret = hclge_misc_irq_init(hdev);
7349         if (ret) {
7350                 dev_err(&pdev->dev,
7351                         "Misc IRQ(vector0) init error, ret = %d.\n",
7352                         ret);
7353                 goto err_msi_uninit;
7354         }
7355
7356         ret = hclge_alloc_tqps(hdev);
7357         if (ret) {
7358                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7359                 goto err_msi_irq_uninit;
7360         }
7361
7362         ret = hclge_alloc_vport(hdev);
7363         if (ret) {
7364                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7365                 goto err_msi_irq_uninit;
7366         }
7367
7368         ret = hclge_map_tqp(hdev);
7369         if (ret) {
7370                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7371                 goto err_msi_irq_uninit;
7372         }
7373
7374         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7375                 ret = hclge_mac_mdio_config(hdev);
7376                 if (ret) {
7377                         dev_err(&hdev->pdev->dev,
7378                                 "mdio config fail ret=%d\n", ret);
7379                         goto err_msi_irq_uninit;
7380                 }
7381         }
7382
7383         ret = hclge_init_umv_space(hdev);
7384         if (ret) {
7385                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7386                 goto err_mdiobus_unreg;
7387         }
7388
7389         ret = hclge_mac_init(hdev);
7390         if (ret) {
7391                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7392                 goto err_mdiobus_unreg;
7393         }
7394
7395         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7396         if (ret) {
7397                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7398                 goto err_mdiobus_unreg;
7399         }
7400
7401         ret = hclge_config_gro(hdev, true);
7402         if (ret)
7403                 goto err_mdiobus_unreg;
7404
7405         ret = hclge_init_vlan_config(hdev);
7406         if (ret) {
7407                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7408                 goto err_mdiobus_unreg;
7409         }
7410
7411         ret = hclge_tm_schd_init(hdev);
7412         if (ret) {
7413                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7414                 goto err_mdiobus_unreg;
7415         }
7416
7417         hclge_rss_init_cfg(hdev);
7418         ret = hclge_rss_init_hw(hdev);
7419         if (ret) {
7420                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7421                 goto err_mdiobus_unreg;
7422         }
7423
7424         ret = init_mgr_tbl(hdev);
7425         if (ret) {
7426                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7427                 goto err_mdiobus_unreg;
7428         }
7429
7430         ret = hclge_init_fd_config(hdev);
7431         if (ret) {
7432                 dev_err(&pdev->dev,
7433                         "fd table init fail, ret=%d\n", ret);
7434                 goto err_mdiobus_unreg;
7435         }
7436
7437         ret = hclge_hw_error_set_state(hdev, true);
7438         if (ret) {
7439                 dev_err(&pdev->dev,
7440                         "fail(%d) to enable hw error interrupts\n", ret);
7441                 goto err_mdiobus_unreg;
7442         }
7443
7444         hclge_dcb_ops_set(hdev);
7445
7446         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7447         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7448         INIT_WORK(&hdev->service_task, hclge_service_task);
7449         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7450         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7451
7452         hclge_clear_all_event_cause(hdev);
7453
7454         /* Enable MISC vector(vector0) */
7455         hclge_enable_vector(&hdev->misc_vector, true);
7456
7457         hclge_state_init(hdev);
7458         hdev->last_reset_time = jiffies;
7459
7460         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7461         return 0;
7462
7463 err_mdiobus_unreg:
7464         if (hdev->hw.mac.phydev)
7465                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7466 err_msi_irq_uninit:
7467         hclge_misc_irq_uninit(hdev);
7468 err_msi_uninit:
7469         pci_free_irq_vectors(pdev);
7470 err_cmd_uninit:
7471         hclge_destroy_cmd_queue(&hdev->hw);
7472 err_pci_uninit:
7473         pcim_iounmap(pdev, hdev->hw.io_base);
7474         pci_clear_master(pdev);
7475         pci_release_regions(pdev);
7476         pci_disable_device(pdev);
7477 out:
7478         return ret;
7479 }
7480
7481 static void hclge_stats_clear(struct hclge_dev *hdev)
7482 {
7483         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7484 }
7485
7486 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7487 {
7488         struct hclge_vport *vport = hdev->vport;
7489         int i;
7490
7491         for (i = 0; i < hdev->num_alloc_vport; i++) {
7492                 hclge_vport_start(vport);
7493                 vport++;
7494         }
7495 }
7496
7497 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7498 {
7499         struct hclge_dev *hdev = ae_dev->priv;
7500         struct pci_dev *pdev = ae_dev->pdev;
7501         int ret;
7502
7503         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7504
7505         hclge_stats_clear(hdev);
7506         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7507
7508         ret = hclge_cmd_init(hdev);
7509         if (ret) {
7510                 dev_err(&pdev->dev, "Cmd queue init failed\n");
7511                 return ret;
7512         }
7513
7514         ret = hclge_map_tqp(hdev);
7515         if (ret) {
7516                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7517                 return ret;
7518         }
7519
7520         hclge_reset_umv_space(hdev);
7521
7522         ret = hclge_mac_init(hdev);
7523         if (ret) {
7524                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7525                 return ret;
7526         }
7527
7528         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7529         if (ret) {
7530                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7531                 return ret;
7532         }
7533
7534         ret = hclge_config_gro(hdev, true);
7535         if (ret)
7536                 return ret;
7537
7538         ret = hclge_init_vlan_config(hdev);
7539         if (ret) {
7540                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7541                 return ret;
7542         }
7543
7544         ret = hclge_tm_init_hw(hdev, true);
7545         if (ret) {
7546                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7547                 return ret;
7548         }
7549
7550         ret = hclge_rss_init_hw(hdev);
7551         if (ret) {
7552                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7553                 return ret;
7554         }
7555
7556         ret = hclge_init_fd_config(hdev);
7557         if (ret) {
7558                 dev_err(&pdev->dev,
7559                         "fd table init fail, ret=%d\n", ret);
7560                 return ret;
7561         }
7562
7563         /* Re-enable the hw error interrupts because
7564          * the interrupts get disabled on core/global reset.
7565          */
7566         ret = hclge_hw_error_set_state(hdev, true);
7567         if (ret) {
7568                 dev_err(&pdev->dev,
7569                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
7570                 return ret;
7571         }
7572
7573         hclge_reset_vport_state(hdev);
7574
7575         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7576                  HCLGE_DRIVER_NAME);
7577
7578         return 0;
7579 }
7580
7581 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7582 {
7583         struct hclge_dev *hdev = ae_dev->priv;
7584         struct hclge_mac *mac = &hdev->hw.mac;
7585
7586         hclge_state_uninit(hdev);
7587
7588         if (mac->phydev)
7589                 mdiobus_unregister(mac->mdio_bus);
7590
7591         hclge_uninit_umv_space(hdev);
7592
7593         /* Disable MISC vector(vector0) */
7594         hclge_enable_vector(&hdev->misc_vector, false);
7595         synchronize_irq(hdev->misc_vector.vector_irq);
7596
7597         hclge_hw_error_set_state(hdev, false);
7598         hclge_destroy_cmd_queue(&hdev->hw);
7599         hclge_misc_irq_uninit(hdev);
7600         hclge_pci_uninit(hdev);
7601         mutex_destroy(&hdev->vport_lock);
7602         ae_dev->priv = NULL;
7603 }
7604
7605 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7606 {
7607         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7608         struct hclge_vport *vport = hclge_get_vport(handle);
7609         struct hclge_dev *hdev = vport->back;
7610
7611         return min_t(u32, hdev->rss_size_max,
7612                      vport->alloc_tqps / kinfo->num_tc);
7613 }
7614
7615 static void hclge_get_channels(struct hnae3_handle *handle,
7616                                struct ethtool_channels *ch)
7617 {
7618         ch->max_combined = hclge_get_max_channels(handle);
7619         ch->other_count = 1;
7620         ch->max_other = 1;
7621         ch->combined_count = handle->kinfo.rss_size;
7622 }
7623
7624 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7625                                         u16 *alloc_tqps, u16 *max_rss_size)
7626 {
7627         struct hclge_vport *vport = hclge_get_vport(handle);
7628         struct hclge_dev *hdev = vport->back;
7629
7630         *alloc_tqps = vport->alloc_tqps;
7631         *max_rss_size = hdev->rss_size_max;
7632 }
7633
7634 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7635                               bool rxfh_configured)
7636 {
7637         struct hclge_vport *vport = hclge_get_vport(handle);
7638         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7639         struct hclge_dev *hdev = vport->back;
7640         int cur_rss_size = kinfo->rss_size;
7641         int cur_tqps = kinfo->num_tqps;
7642         u16 tc_offset[HCLGE_MAX_TC_NUM];
7643         u16 tc_valid[HCLGE_MAX_TC_NUM];
7644         u16 tc_size[HCLGE_MAX_TC_NUM];
7645         u16 roundup_size;
7646         u32 *rss_indir;
7647         int ret, i;
7648
7649         kinfo->req_rss_size = new_tqps_num;
7650
7651         ret = hclge_tm_vport_map_update(hdev);
7652         if (ret) {
7653                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7654                 return ret;
7655         }
7656
7657         roundup_size = roundup_pow_of_two(kinfo->rss_size);
7658         roundup_size = ilog2(roundup_size);
7659         /* Set the RSS TC mode according to the new RSS size */
7660         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7661                 tc_valid[i] = 0;
7662
7663                 if (!(hdev->hw_tc_map & BIT(i)))
7664                         continue;
7665
7666                 tc_valid[i] = 1;
7667                 tc_size[i] = roundup_size;
7668                 tc_offset[i] = kinfo->rss_size * i;
7669         }
7670         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7671         if (ret)
7672                 return ret;
7673
7674         /* RSS indirection table has been configuared by user */
7675         if (rxfh_configured)
7676                 goto out;
7677
7678         /* Reinitializes the rss indirect table according to the new RSS size */
7679         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7680         if (!rss_indir)
7681                 return -ENOMEM;
7682
7683         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7684                 rss_indir[i] = i % kinfo->rss_size;
7685
7686         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7687         if (ret)
7688                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7689                         ret);
7690
7691         kfree(rss_indir);
7692
7693 out:
7694         if (!ret)
7695                 dev_info(&hdev->pdev->dev,
7696                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7697                          cur_rss_size, kinfo->rss_size,
7698                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
7699
7700         return ret;
7701 }
7702
7703 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7704                               u32 *regs_num_64_bit)
7705 {
7706         struct hclge_desc desc;
7707         u32 total_num;
7708         int ret;
7709
7710         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7711         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7712         if (ret) {
7713                 dev_err(&hdev->pdev->dev,
7714                         "Query register number cmd failed, ret = %d.\n", ret);
7715                 return ret;
7716         }
7717
7718         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7719         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7720
7721         total_num = *regs_num_32_bit + *regs_num_64_bit;
7722         if (!total_num)
7723                 return -EINVAL;
7724
7725         return 0;
7726 }
7727
7728 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7729                                  void *data)
7730 {
7731 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7732
7733         struct hclge_desc *desc;
7734         u32 *reg_val = data;
7735         __le32 *desc_data;
7736         int cmd_num;
7737         int i, k, n;
7738         int ret;
7739
7740         if (regs_num == 0)
7741                 return 0;
7742
7743         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7744         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7745         if (!desc)
7746                 return -ENOMEM;
7747
7748         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7749         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7750         if (ret) {
7751                 dev_err(&hdev->pdev->dev,
7752                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
7753                 kfree(desc);
7754                 return ret;
7755         }
7756
7757         for (i = 0; i < cmd_num; i++) {
7758                 if (i == 0) {
7759                         desc_data = (__le32 *)(&desc[i].data[0]);
7760                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7761                 } else {
7762                         desc_data = (__le32 *)(&desc[i]);
7763                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
7764                 }
7765                 for (k = 0; k < n; k++) {
7766                         *reg_val++ = le32_to_cpu(*desc_data++);
7767
7768                         regs_num--;
7769                         if (!regs_num)
7770                                 break;
7771                 }
7772         }
7773
7774         kfree(desc);
7775         return 0;
7776 }
7777
7778 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7779                                  void *data)
7780 {
7781 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7782
7783         struct hclge_desc *desc;
7784         u64 *reg_val = data;
7785         __le64 *desc_data;
7786         int cmd_num;
7787         int i, k, n;
7788         int ret;
7789
7790         if (regs_num == 0)
7791                 return 0;
7792
7793         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7794         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7795         if (!desc)
7796                 return -ENOMEM;
7797
7798         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7799         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7800         if (ret) {
7801                 dev_err(&hdev->pdev->dev,
7802                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
7803                 kfree(desc);
7804                 return ret;
7805         }
7806
7807         for (i = 0; i < cmd_num; i++) {
7808                 if (i == 0) {
7809                         desc_data = (__le64 *)(&desc[i].data[0]);
7810                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7811                 } else {
7812                         desc_data = (__le64 *)(&desc[i]);
7813                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
7814                 }
7815                 for (k = 0; k < n; k++) {
7816                         *reg_val++ = le64_to_cpu(*desc_data++);
7817
7818                         regs_num--;
7819                         if (!regs_num)
7820                                 break;
7821                 }
7822         }
7823
7824         kfree(desc);
7825         return 0;
7826 }
7827
7828 #define MAX_SEPARATE_NUM        4
7829 #define SEPARATOR_VALUE         0xFFFFFFFF
7830 #define REG_NUM_PER_LINE        4
7831 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
7832
7833 static int hclge_get_regs_len(struct hnae3_handle *handle)
7834 {
7835         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
7836         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7837         struct hclge_vport *vport = hclge_get_vport(handle);
7838         struct hclge_dev *hdev = vport->back;
7839         u32 regs_num_32_bit, regs_num_64_bit;
7840         int ret;
7841
7842         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7843         if (ret) {
7844                 dev_err(&hdev->pdev->dev,
7845                         "Get register number failed, ret = %d.\n", ret);
7846                 return -EOPNOTSUPP;
7847         }
7848
7849         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
7850         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
7851         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
7852         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
7853
7854         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
7855                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
7856                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7857 }
7858
7859 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7860                            void *data)
7861 {
7862         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7863         struct hclge_vport *vport = hclge_get_vport(handle);
7864         struct hclge_dev *hdev = vport->back;
7865         u32 regs_num_32_bit, regs_num_64_bit;
7866         int i, j, reg_um, separator_num;
7867         u32 *reg = data;
7868         int ret;
7869
7870         *version = hdev->fw_version;
7871
7872         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7873         if (ret) {
7874                 dev_err(&hdev->pdev->dev,
7875                         "Get register number failed, ret = %d.\n", ret);
7876                 return;
7877         }
7878
7879         /* fetching per-PF registers valus from PF PCIe register space */
7880         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
7881         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7882         for (i = 0; i < reg_um; i++)
7883                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
7884         for (i = 0; i < separator_num; i++)
7885                 *reg++ = SEPARATOR_VALUE;
7886
7887         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
7888         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7889         for (i = 0; i < reg_um; i++)
7890                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
7891         for (i = 0; i < separator_num; i++)
7892                 *reg++ = SEPARATOR_VALUE;
7893
7894         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
7895         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7896         for (j = 0; j < kinfo->num_tqps; j++) {
7897                 for (i = 0; i < reg_um; i++)
7898                         *reg++ = hclge_read_dev(&hdev->hw,
7899                                                 ring_reg_addr_list[i] +
7900                                                 0x200 * j);
7901                 for (i = 0; i < separator_num; i++)
7902                         *reg++ = SEPARATOR_VALUE;
7903         }
7904
7905         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
7906         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7907         for (j = 0; j < hdev->num_msi_used - 1; j++) {
7908                 for (i = 0; i < reg_um; i++)
7909                         *reg++ = hclge_read_dev(&hdev->hw,
7910                                                 tqp_intr_reg_addr_list[i] +
7911                                                 4 * j);
7912                 for (i = 0; i < separator_num; i++)
7913                         *reg++ = SEPARATOR_VALUE;
7914         }
7915
7916         /* fetching PF common registers values from firmware */
7917         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
7918         if (ret) {
7919                 dev_err(&hdev->pdev->dev,
7920                         "Get 32 bit register failed, ret = %d.\n", ret);
7921                 return;
7922         }
7923
7924         reg += regs_num_32_bit;
7925         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
7926         if (ret)
7927                 dev_err(&hdev->pdev->dev,
7928                         "Get 64 bit register failed, ret = %d.\n", ret);
7929 }
7930
7931 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7932 {
7933         struct hclge_set_led_state_cmd *req;
7934         struct hclge_desc desc;
7935         int ret;
7936
7937         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7938
7939         req = (struct hclge_set_led_state_cmd *)desc.data;
7940         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7941                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7942
7943         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7944         if (ret)
7945                 dev_err(&hdev->pdev->dev,
7946                         "Send set led state cmd error, ret =%d\n", ret);
7947
7948         return ret;
7949 }
7950
7951 enum hclge_led_status {
7952         HCLGE_LED_OFF,
7953         HCLGE_LED_ON,
7954         HCLGE_LED_NO_CHANGE = 0xFF,
7955 };
7956
7957 static int hclge_set_led_id(struct hnae3_handle *handle,
7958                             enum ethtool_phys_id_state status)
7959 {
7960         struct hclge_vport *vport = hclge_get_vport(handle);
7961         struct hclge_dev *hdev = vport->back;
7962
7963         switch (status) {
7964         case ETHTOOL_ID_ACTIVE:
7965                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
7966         case ETHTOOL_ID_INACTIVE:
7967                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7968         default:
7969                 return -EINVAL;
7970         }
7971 }
7972
7973 static void hclge_get_link_mode(struct hnae3_handle *handle,
7974                                 unsigned long *supported,
7975                                 unsigned long *advertising)
7976 {
7977         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7978         struct hclge_vport *vport = hclge_get_vport(handle);
7979         struct hclge_dev *hdev = vport->back;
7980         unsigned int idx = 0;
7981
7982         for (; idx < size; idx++) {
7983                 supported[idx] = hdev->hw.mac.supported[idx];
7984                 advertising[idx] = hdev->hw.mac.advertising[idx];
7985         }
7986 }
7987
7988 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7989 {
7990         struct hclge_vport *vport = hclge_get_vport(handle);
7991         struct hclge_dev *hdev = vport->back;
7992
7993         return hclge_config_gro(hdev, enable);
7994 }
7995
7996 static const struct hnae3_ae_ops hclge_ops = {
7997         .init_ae_dev = hclge_init_ae_dev,
7998         .uninit_ae_dev = hclge_uninit_ae_dev,
7999         .flr_prepare = hclge_flr_prepare,
8000         .flr_done = hclge_flr_done,
8001         .init_client_instance = hclge_init_client_instance,
8002         .uninit_client_instance = hclge_uninit_client_instance,
8003         .map_ring_to_vector = hclge_map_ring_to_vector,
8004         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8005         .get_vector = hclge_get_vector,
8006         .put_vector = hclge_put_vector,
8007         .set_promisc_mode = hclge_set_promisc_mode,
8008         .set_loopback = hclge_set_loopback,
8009         .start = hclge_ae_start,
8010         .stop = hclge_ae_stop,
8011         .client_start = hclge_client_start,
8012         .client_stop = hclge_client_stop,
8013         .get_status = hclge_get_status,
8014         .get_ksettings_an_result = hclge_get_ksettings_an_result,
8015         .update_speed_duplex_h = hclge_update_speed_duplex_h,
8016         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8017         .get_media_type = hclge_get_media_type,
8018         .get_rss_key_size = hclge_get_rss_key_size,
8019         .get_rss_indir_size = hclge_get_rss_indir_size,
8020         .get_rss = hclge_get_rss,
8021         .set_rss = hclge_set_rss,
8022         .set_rss_tuple = hclge_set_rss_tuple,
8023         .get_rss_tuple = hclge_get_rss_tuple,
8024         .get_tc_size = hclge_get_tc_size,
8025         .get_mac_addr = hclge_get_mac_addr,
8026         .set_mac_addr = hclge_set_mac_addr,
8027         .do_ioctl = hclge_do_ioctl,
8028         .add_uc_addr = hclge_add_uc_addr,
8029         .rm_uc_addr = hclge_rm_uc_addr,
8030         .add_mc_addr = hclge_add_mc_addr,
8031         .rm_mc_addr = hclge_rm_mc_addr,
8032         .set_autoneg = hclge_set_autoneg,
8033         .get_autoneg = hclge_get_autoneg,
8034         .get_pauseparam = hclge_get_pauseparam,
8035         .set_pauseparam = hclge_set_pauseparam,
8036         .set_mtu = hclge_set_mtu,
8037         .reset_queue = hclge_reset_tqp,
8038         .get_stats = hclge_get_stats,
8039         .update_stats = hclge_update_stats,
8040         .get_strings = hclge_get_strings,
8041         .get_sset_count = hclge_get_sset_count,
8042         .get_fw_version = hclge_get_fw_version,
8043         .get_mdix_mode = hclge_get_mdix_mode,
8044         .enable_vlan_filter = hclge_enable_vlan_filter,
8045         .set_vlan_filter = hclge_set_vlan_filter,
8046         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8047         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8048         .reset_event = hclge_reset_event,
8049         .set_default_reset_request = hclge_set_def_reset_request,
8050         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8051         .set_channels = hclge_set_channels,
8052         .get_channels = hclge_get_channels,
8053         .get_regs_len = hclge_get_regs_len,
8054         .get_regs = hclge_get_regs,
8055         .set_led_id = hclge_set_led_id,
8056         .get_link_mode = hclge_get_link_mode,
8057         .add_fd_entry = hclge_add_fd_entry,
8058         .del_fd_entry = hclge_del_fd_entry,
8059         .del_all_fd_entries = hclge_del_all_fd_entries,
8060         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8061         .get_fd_rule_info = hclge_get_fd_rule_info,
8062         .get_fd_all_rules = hclge_get_all_rules,
8063         .restore_fd_rules = hclge_restore_fd_entries,
8064         .enable_fd = hclge_enable_fd,
8065         .dbg_run_cmd = hclge_dbg_run_cmd,
8066         .handle_hw_ras_error = hclge_handle_hw_ras_error,
8067         .get_hw_reset_stat = hclge_get_hw_reset_stat,
8068         .ae_dev_resetting = hclge_ae_dev_resetting,
8069         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8070         .set_gro_en = hclge_gro_en,
8071         .get_global_queue_id = hclge_covert_handle_qid_global,
8072         .set_timer_task = hclge_set_timer_task,
8073         .mac_connect_phy = hclge_mac_connect_phy,
8074         .mac_disconnect_phy = hclge_mac_disconnect_phy,
8075 };
8076
8077 static struct hnae3_ae_algo ae_algo = {
8078         .ops = &hclge_ops,
8079         .pdev_id_table = ae_algo_pci_tbl,
8080 };
8081
8082 static int hclge_init(void)
8083 {
8084         pr_info("%s is initializing\n", HCLGE_NAME);
8085
8086         hnae3_register_ae_algo(&ae_algo);
8087
8088         return 0;
8089 }
8090
8091 static void hclge_exit(void)
8092 {
8093         hnae3_unregister_ae_algo(&ae_algo);
8094 }
8095 module_init(hclge_init);
8096 module_exit(hclge_exit);
8097
8098 MODULE_LICENSE("GPL");
8099 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8100 MODULE_DESCRIPTION("HCLGE Driver");
8101 MODULE_VERSION(HCLGE_MOD_VERSION);