]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
ce532d264c71c66f8e4c4e7f26408903bf0dd9c8
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
21 #include "hclge_tm.h"
22 #include "hnae3.h"
23
24 #define HCLGE_NAME                      "hclge"
25 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
26 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
27
28 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
29                                      enum hclge_mta_dmac_sel_type mta_mac_sel,
30                                      bool enable);
31 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
32 static int hclge_init_vlan_config(struct hclge_dev *hdev);
33 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
34
35 static struct hnae3_ae_algo ae_algo;
36
37 static const struct pci_device_id ae_algo_pci_tbl[] = {
38         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
39         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
40         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
41         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
45         /* required last entry */
46         {0, }
47 };
48
49 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
50
51 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
52         "App    Loopback test",
53         "Serdes serial Loopback test",
54         "Serdes parallel Loopback test",
55         "Phy    Loopback test"
56 };
57
58 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
59         {"mac_tx_mac_pause_num",
60                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
61         {"mac_rx_mac_pause_num",
62                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
63         {"mac_tx_pfc_pri0_pkt_num",
64                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
65         {"mac_tx_pfc_pri1_pkt_num",
66                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
67         {"mac_tx_pfc_pri2_pkt_num",
68                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
69         {"mac_tx_pfc_pri3_pkt_num",
70                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
71         {"mac_tx_pfc_pri4_pkt_num",
72                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
73         {"mac_tx_pfc_pri5_pkt_num",
74                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
75         {"mac_tx_pfc_pri6_pkt_num",
76                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
77         {"mac_tx_pfc_pri7_pkt_num",
78                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
79         {"mac_rx_pfc_pri0_pkt_num",
80                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
81         {"mac_rx_pfc_pri1_pkt_num",
82                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
83         {"mac_rx_pfc_pri2_pkt_num",
84                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
85         {"mac_rx_pfc_pri3_pkt_num",
86                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
87         {"mac_rx_pfc_pri4_pkt_num",
88                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
89         {"mac_rx_pfc_pri5_pkt_num",
90                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
91         {"mac_rx_pfc_pri6_pkt_num",
92                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
93         {"mac_rx_pfc_pri7_pkt_num",
94                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
95         {"mac_tx_total_pkt_num",
96                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
97         {"mac_tx_total_oct_num",
98                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
99         {"mac_tx_good_pkt_num",
100                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
101         {"mac_tx_bad_pkt_num",
102                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
103         {"mac_tx_good_oct_num",
104                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
105         {"mac_tx_bad_oct_num",
106                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
107         {"mac_tx_uni_pkt_num",
108                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
109         {"mac_tx_multi_pkt_num",
110                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
111         {"mac_tx_broad_pkt_num",
112                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
113         {"mac_tx_undersize_pkt_num",
114                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
115         {"mac_tx_oversize_pkt_num",
116                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
117         {"mac_tx_64_oct_pkt_num",
118                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
119         {"mac_tx_65_127_oct_pkt_num",
120                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
121         {"mac_tx_128_255_oct_pkt_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
123         {"mac_tx_256_511_oct_pkt_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
125         {"mac_tx_512_1023_oct_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
127         {"mac_tx_1024_1518_oct_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
129         {"mac_tx_1519_2047_oct_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
131         {"mac_tx_2048_4095_oct_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
133         {"mac_tx_4096_8191_oct_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
135         {"mac_tx_8192_9216_oct_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
137         {"mac_tx_9217_12287_oct_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
139         {"mac_tx_12288_16383_oct_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
141         {"mac_tx_1519_max_good_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
143         {"mac_tx_1519_max_bad_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
145         {"mac_rx_total_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
147         {"mac_rx_total_oct_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
149         {"mac_rx_good_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
151         {"mac_rx_bad_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
153         {"mac_rx_good_oct_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
155         {"mac_rx_bad_oct_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
157         {"mac_rx_uni_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
159         {"mac_rx_multi_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
161         {"mac_rx_broad_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
163         {"mac_rx_undersize_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
165         {"mac_rx_oversize_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
167         {"mac_rx_64_oct_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
169         {"mac_rx_65_127_oct_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
171         {"mac_rx_128_255_oct_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
173         {"mac_rx_256_511_oct_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
175         {"mac_rx_512_1023_oct_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
177         {"mac_rx_1024_1518_oct_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
179         {"mac_rx_1519_2047_oct_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
181         {"mac_rx_2048_4095_oct_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
183         {"mac_rx_4096_8191_oct_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
185         {"mac_rx_8192_9216_oct_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
187         {"mac_rx_9217_12287_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
189         {"mac_rx_12288_16383_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
191         {"mac_rx_1519_max_good_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
193         {"mac_rx_1519_max_bad_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
195
196         {"mac_tx_fragment_pkt_num",
197                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
198         {"mac_tx_undermin_pkt_num",
199                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
200         {"mac_tx_jabber_pkt_num",
201                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
202         {"mac_tx_err_all_pkt_num",
203                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
204         {"mac_tx_from_app_good_pkt_num",
205                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
206         {"mac_tx_from_app_bad_pkt_num",
207                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
208         {"mac_rx_fragment_pkt_num",
209                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
210         {"mac_rx_undermin_pkt_num",
211                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
212         {"mac_rx_jabber_pkt_num",
213                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
214         {"mac_rx_fcs_err_pkt_num",
215                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
216         {"mac_rx_send_app_good_pkt_num",
217                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
218         {"mac_rx_send_app_bad_pkt_num",
219                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
220 };
221
222 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
223         {
224                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
225                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
226                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
227                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
228                 .i_port_bitmap = 0x1,
229         },
230 };
231
232 static int hclge_mac_update_stats(struct hclge_dev *hdev)
233 {
234 #define HCLGE_MAC_CMD_NUM 21
235 #define HCLGE_RTN_DATA_NUM 4
236
237         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
238         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
239         __le64 *desc_data;
240         int i, k, n;
241         int ret;
242
243         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
244         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
245         if (ret) {
246                 dev_err(&hdev->pdev->dev,
247                         "Get MAC pkt stats fail, status = %d.\n", ret);
248
249                 return ret;
250         }
251
252         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
253                 if (unlikely(i == 0)) {
254                         desc_data = (__le64 *)(&desc[i].data[0]);
255                         n = HCLGE_RTN_DATA_NUM - 2;
256                 } else {
257                         desc_data = (__le64 *)(&desc[i]);
258                         n = HCLGE_RTN_DATA_NUM;
259                 }
260                 for (k = 0; k < n; k++) {
261                         *data++ += le64_to_cpu(*desc_data);
262                         desc_data++;
263                 }
264         }
265
266         return 0;
267 }
268
269 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
270 {
271         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
272         struct hclge_vport *vport = hclge_get_vport(handle);
273         struct hclge_dev *hdev = vport->back;
274         struct hnae3_queue *queue;
275         struct hclge_desc desc[1];
276         struct hclge_tqp *tqp;
277         int ret, i;
278
279         for (i = 0; i < kinfo->num_tqps; i++) {
280                 queue = handle->kinfo.tqp[i];
281                 tqp = container_of(queue, struct hclge_tqp, q);
282                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
283                 hclge_cmd_setup_basic_desc(&desc[0],
284                                            HCLGE_OPC_QUERY_RX_STATUS,
285                                            true);
286
287                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
288                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
289                 if (ret) {
290                         dev_err(&hdev->pdev->dev,
291                                 "Query tqp stat fail, status = %d,queue = %d\n",
292                                 ret,    i);
293                         return ret;
294                 }
295                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
296                         le32_to_cpu(desc[0].data[1]);
297         }
298
299         for (i = 0; i < kinfo->num_tqps; i++) {
300                 queue = handle->kinfo.tqp[i];
301                 tqp = container_of(queue, struct hclge_tqp, q);
302                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
303                 hclge_cmd_setup_basic_desc(&desc[0],
304                                            HCLGE_OPC_QUERY_TX_STATUS,
305                                            true);
306
307                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
308                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
309                 if (ret) {
310                         dev_err(&hdev->pdev->dev,
311                                 "Query tqp stat fail, status = %d,queue = %d\n",
312                                 ret, i);
313                         return ret;
314                 }
315                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
316                         le32_to_cpu(desc[0].data[1]);
317         }
318
319         return 0;
320 }
321
322 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
323 {
324         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
325         struct hclge_tqp *tqp;
326         u64 *buff = data;
327         int i;
328
329         for (i = 0; i < kinfo->num_tqps; i++) {
330                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
331                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
332         }
333
334         for (i = 0; i < kinfo->num_tqps; i++) {
335                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
336                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
337         }
338
339         return buff;
340 }
341
342 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
343 {
344         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
345
346         return kinfo->num_tqps * (2);
347 }
348
349 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
350 {
351         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
352         u8 *buff = data;
353         int i = 0;
354
355         for (i = 0; i < kinfo->num_tqps; i++) {
356                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
357                         struct hclge_tqp, q);
358                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
359                          tqp->index);
360                 buff = buff + ETH_GSTRING_LEN;
361         }
362
363         for (i = 0; i < kinfo->num_tqps; i++) {
364                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
365                         struct hclge_tqp, q);
366                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
367                          tqp->index);
368                 buff = buff + ETH_GSTRING_LEN;
369         }
370
371         return buff;
372 }
373
374 static u64 *hclge_comm_get_stats(void *comm_stats,
375                                  const struct hclge_comm_stats_str strs[],
376                                  int size, u64 *data)
377 {
378         u64 *buf = data;
379         u32 i;
380
381         for (i = 0; i < size; i++)
382                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
383
384         return buf + size;
385 }
386
387 static u8 *hclge_comm_get_strings(u32 stringset,
388                                   const struct hclge_comm_stats_str strs[],
389                                   int size, u8 *data)
390 {
391         char *buff = (char *)data;
392         u32 i;
393
394         if (stringset != ETH_SS_STATS)
395                 return buff;
396
397         for (i = 0; i < size; i++) {
398                 snprintf(buff, ETH_GSTRING_LEN,
399                          strs[i].desc);
400                 buff = buff + ETH_GSTRING_LEN;
401         }
402
403         return (u8 *)buff;
404 }
405
406 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
407                                  struct net_device_stats *net_stats)
408 {
409         net_stats->tx_dropped = 0;
410         net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
411         net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
412         net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
413
414         net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
415         net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
416
417         net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
418         net_stats->rx_length_errors =
419                 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
420         net_stats->rx_length_errors +=
421                 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
422         net_stats->rx_over_errors =
423                 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
424 }
425
426 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
427 {
428         struct hnae3_handle *handle;
429         int status;
430
431         handle = &hdev->vport[0].nic;
432         if (handle->client) {
433                 status = hclge_tqps_update_stats(handle);
434                 if (status) {
435                         dev_err(&hdev->pdev->dev,
436                                 "Update TQPS stats fail, status = %d.\n",
437                                 status);
438                 }
439         }
440
441         status = hclge_mac_update_stats(hdev);
442         if (status)
443                 dev_err(&hdev->pdev->dev,
444                         "Update MAC stats fail, status = %d.\n", status);
445
446         hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
447 }
448
449 static void hclge_update_stats(struct hnae3_handle *handle,
450                                struct net_device_stats *net_stats)
451 {
452         struct hclge_vport *vport = hclge_get_vport(handle);
453         struct hclge_dev *hdev = vport->back;
454         struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
455         int status;
456
457         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
458                 return;
459
460         status = hclge_mac_update_stats(hdev);
461         if (status)
462                 dev_err(&hdev->pdev->dev,
463                         "Update MAC stats fail, status = %d.\n",
464                         status);
465
466         status = hclge_tqps_update_stats(handle);
467         if (status)
468                 dev_err(&hdev->pdev->dev,
469                         "Update TQPS stats fail, status = %d.\n",
470                         status);
471
472         hclge_update_netstat(hw_stats, net_stats);
473
474         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
475 }
476
477 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
478 {
479 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
480                 HNAE3_SUPPORT_PHY_LOOPBACK |\
481                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
482                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
483
484         struct hclge_vport *vport = hclge_get_vport(handle);
485         struct hclge_dev *hdev = vport->back;
486         int count = 0;
487
488         /* Loopback test support rules:
489          * mac: only GE mode support
490          * serdes: all mac mode will support include GE/XGE/LGE/CGE
491          * phy: only support when phy device exist on board
492          */
493         if (stringset == ETH_SS_TEST) {
494                 /* clear loopback bit flags at first */
495                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
496                 if (hdev->pdev->revision >= 0x21 ||
497                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
498                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
499                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
500                         count += 1;
501                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
502                 }
503
504                 count += 2;
505                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
506                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
507         } else if (stringset == ETH_SS_STATS) {
508                 count = ARRAY_SIZE(g_mac_stats_string) +
509                         hclge_tqps_get_sset_count(handle, stringset);
510         }
511
512         return count;
513 }
514
515 static void hclge_get_strings(struct hnae3_handle *handle,
516                               u32 stringset,
517                               u8 *data)
518 {
519         u8 *p = (char *)data;
520         int size;
521
522         if (stringset == ETH_SS_STATS) {
523                 size = ARRAY_SIZE(g_mac_stats_string);
524                 p = hclge_comm_get_strings(stringset,
525                                            g_mac_stats_string,
526                                            size,
527                                            p);
528                 p = hclge_tqps_get_strings(handle, p);
529         } else if (stringset == ETH_SS_TEST) {
530                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
531                         memcpy(p,
532                                hns3_nic_test_strs[HNAE3_LOOP_APP],
533                                ETH_GSTRING_LEN);
534                         p += ETH_GSTRING_LEN;
535                 }
536                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
537                         memcpy(p,
538                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
539                                ETH_GSTRING_LEN);
540                         p += ETH_GSTRING_LEN;
541                 }
542                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
543                         memcpy(p,
544                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
545                                ETH_GSTRING_LEN);
546                         p += ETH_GSTRING_LEN;
547                 }
548                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
549                         memcpy(p,
550                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
551                                ETH_GSTRING_LEN);
552                         p += ETH_GSTRING_LEN;
553                 }
554         }
555 }
556
557 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
558 {
559         struct hclge_vport *vport = hclge_get_vport(handle);
560         struct hclge_dev *hdev = vport->back;
561         u64 *p;
562
563         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
564                                  g_mac_stats_string,
565                                  ARRAY_SIZE(g_mac_stats_string),
566                                  data);
567         p = hclge_tqps_get_stats(handle, p);
568 }
569
570 static int hclge_parse_func_status(struct hclge_dev *hdev,
571                                    struct hclge_func_status_cmd *status)
572 {
573         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
574                 return -EINVAL;
575
576         /* Set the pf to main pf */
577         if (status->pf_state & HCLGE_PF_STATE_MAIN)
578                 hdev->flag |= HCLGE_FLAG_MAIN;
579         else
580                 hdev->flag &= ~HCLGE_FLAG_MAIN;
581
582         return 0;
583 }
584
585 static int hclge_query_function_status(struct hclge_dev *hdev)
586 {
587         struct hclge_func_status_cmd *req;
588         struct hclge_desc desc;
589         int timeout = 0;
590         int ret;
591
592         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
593         req = (struct hclge_func_status_cmd *)desc.data;
594
595         do {
596                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
597                 if (ret) {
598                         dev_err(&hdev->pdev->dev,
599                                 "query function status failed %d.\n",
600                                 ret);
601
602                         return ret;
603                 }
604
605                 /* Check pf reset is done */
606                 if (req->pf_state)
607                         break;
608                 usleep_range(1000, 2000);
609         } while (timeout++ < 5);
610
611         ret = hclge_parse_func_status(hdev, req);
612
613         return ret;
614 }
615
616 static int hclge_query_pf_resource(struct hclge_dev *hdev)
617 {
618         struct hclge_pf_res_cmd *req;
619         struct hclge_desc desc;
620         int ret;
621
622         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
623         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
624         if (ret) {
625                 dev_err(&hdev->pdev->dev,
626                         "query pf resource failed %d.\n", ret);
627                 return ret;
628         }
629
630         req = (struct hclge_pf_res_cmd *)desc.data;
631         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
632         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
633
634         if (hnae3_dev_roce_supported(hdev)) {
635                 hdev->roce_base_msix_offset =
636                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
637                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
638                 hdev->num_roce_msi =
639                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
640                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
641
642                 /* PF should have NIC vectors and Roce vectors,
643                  * NIC vectors are queued before Roce vectors.
644                  */
645                 hdev->num_msi = hdev->num_roce_msi  +
646                                 hdev->roce_base_msix_offset;
647         } else {
648                 hdev->num_msi =
649                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
650                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
651         }
652
653         return 0;
654 }
655
656 static int hclge_parse_speed(int speed_cmd, int *speed)
657 {
658         switch (speed_cmd) {
659         case 6:
660                 *speed = HCLGE_MAC_SPEED_10M;
661                 break;
662         case 7:
663                 *speed = HCLGE_MAC_SPEED_100M;
664                 break;
665         case 0:
666                 *speed = HCLGE_MAC_SPEED_1G;
667                 break;
668         case 1:
669                 *speed = HCLGE_MAC_SPEED_10G;
670                 break;
671         case 2:
672                 *speed = HCLGE_MAC_SPEED_25G;
673                 break;
674         case 3:
675                 *speed = HCLGE_MAC_SPEED_40G;
676                 break;
677         case 4:
678                 *speed = HCLGE_MAC_SPEED_50G;
679                 break;
680         case 5:
681                 *speed = HCLGE_MAC_SPEED_100G;
682                 break;
683         default:
684                 return -EINVAL;
685         }
686
687         return 0;
688 }
689
690 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
691                                         u8 speed_ability)
692 {
693         unsigned long *supported = hdev->hw.mac.supported;
694
695         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
696                 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
697                         supported);
698
699         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
700                 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
701                         supported);
702
703         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
704                 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
705                         supported);
706
707         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
708                 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
709                         supported);
710
711         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
712                 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
713                         supported);
714
715         set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
716         set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
717 }
718
719 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
720 {
721         u8 media_type = hdev->hw.mac.media_type;
722
723         if (media_type != HNAE3_MEDIA_TYPE_FIBER)
724                 return;
725
726         hclge_parse_fiber_link_mode(hdev, speed_ability);
727 }
728
729 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
730 {
731         struct hclge_cfg_param_cmd *req;
732         u64 mac_addr_tmp_high;
733         u64 mac_addr_tmp;
734         int i;
735
736         req = (struct hclge_cfg_param_cmd *)desc[0].data;
737
738         /* get the configuration */
739         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
740                                               HCLGE_CFG_VMDQ_M,
741                                               HCLGE_CFG_VMDQ_S);
742         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
743                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
744         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
745                                             HCLGE_CFG_TQP_DESC_N_M,
746                                             HCLGE_CFG_TQP_DESC_N_S);
747
748         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
749                                         HCLGE_CFG_PHY_ADDR_M,
750                                         HCLGE_CFG_PHY_ADDR_S);
751         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
752                                           HCLGE_CFG_MEDIA_TP_M,
753                                           HCLGE_CFG_MEDIA_TP_S);
754         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
755                                           HCLGE_CFG_RX_BUF_LEN_M,
756                                           HCLGE_CFG_RX_BUF_LEN_S);
757         /* get mac_address */
758         mac_addr_tmp = __le32_to_cpu(req->param[2]);
759         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
760                                             HCLGE_CFG_MAC_ADDR_H_M,
761                                             HCLGE_CFG_MAC_ADDR_H_S);
762
763         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
764
765         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
766                                              HCLGE_CFG_DEFAULT_SPEED_M,
767                                              HCLGE_CFG_DEFAULT_SPEED_S);
768         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
769                                             HCLGE_CFG_RSS_SIZE_M,
770                                             HCLGE_CFG_RSS_SIZE_S);
771
772         for (i = 0; i < ETH_ALEN; i++)
773                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
774
775         req = (struct hclge_cfg_param_cmd *)desc[1].data;
776         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
777
778         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
779                                              HCLGE_CFG_SPEED_ABILITY_M,
780                                              HCLGE_CFG_SPEED_ABILITY_S);
781 }
782
783 /* hclge_get_cfg: query the static parameter from flash
784  * @hdev: pointer to struct hclge_dev
785  * @hcfg: the config structure to be getted
786  */
787 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
788 {
789         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
790         struct hclge_cfg_param_cmd *req;
791         int i, ret;
792
793         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
794                 u32 offset = 0;
795
796                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
797                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
798                                            true);
799                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
800                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
801                 /* Len should be united by 4 bytes when send to hardware */
802                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
803                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
804                 req->offset = cpu_to_le32(offset);
805         }
806
807         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
808         if (ret) {
809                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
810                 return ret;
811         }
812
813         hclge_parse_cfg(hcfg, desc);
814
815         return 0;
816 }
817
818 static int hclge_get_cap(struct hclge_dev *hdev)
819 {
820         int ret;
821
822         ret = hclge_query_function_status(hdev);
823         if (ret) {
824                 dev_err(&hdev->pdev->dev,
825                         "query function status error %d.\n", ret);
826                 return ret;
827         }
828
829         /* get pf resource */
830         ret = hclge_query_pf_resource(hdev);
831         if (ret)
832                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
833
834         return ret;
835 }
836
837 static int hclge_configure(struct hclge_dev *hdev)
838 {
839         struct hclge_cfg cfg;
840         int ret, i;
841
842         ret = hclge_get_cfg(hdev, &cfg);
843         if (ret) {
844                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
845                 return ret;
846         }
847
848         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
849         hdev->base_tqp_pid = 0;
850         hdev->rss_size_max = cfg.rss_size_max;
851         hdev->rx_buf_len = cfg.rx_buf_len;
852         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
853         hdev->hw.mac.media_type = cfg.media_type;
854         hdev->hw.mac.phy_addr = cfg.phy_addr;
855         hdev->num_desc = cfg.tqp_desc_num;
856         hdev->tm_info.num_pg = 1;
857         hdev->tc_max = cfg.tc_num;
858         hdev->tm_info.hw_pfc_map = 0;
859
860         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
861         if (ret) {
862                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
863                 return ret;
864         }
865
866         hclge_parse_link_mode(hdev, cfg.speed_ability);
867
868         if ((hdev->tc_max > HNAE3_MAX_TC) ||
869             (hdev->tc_max < 1)) {
870                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
871                          hdev->tc_max);
872                 hdev->tc_max = 1;
873         }
874
875         /* Dev does not support DCB */
876         if (!hnae3_dev_dcb_supported(hdev)) {
877                 hdev->tc_max = 1;
878                 hdev->pfc_max = 0;
879         } else {
880                 hdev->pfc_max = hdev->tc_max;
881         }
882
883         hdev->tm_info.num_tc = hdev->tc_max;
884
885         /* Currently not support uncontiuous tc */
886         for (i = 0; i < hdev->tm_info.num_tc; i++)
887                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
888
889         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
890
891         return ret;
892 }
893
894 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
895                             int tso_mss_max)
896 {
897         struct hclge_cfg_tso_status_cmd *req;
898         struct hclge_desc desc;
899         u16 tso_mss;
900
901         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
902
903         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
904
905         tso_mss = 0;
906         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
907                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
908         req->tso_mss_min = cpu_to_le16(tso_mss);
909
910         tso_mss = 0;
911         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
912                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
913         req->tso_mss_max = cpu_to_le16(tso_mss);
914
915         return hclge_cmd_send(&hdev->hw, &desc, 1);
916 }
917
918 static int hclge_alloc_tqps(struct hclge_dev *hdev)
919 {
920         struct hclge_tqp *tqp;
921         int i;
922
923         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
924                                   sizeof(struct hclge_tqp), GFP_KERNEL);
925         if (!hdev->htqp)
926                 return -ENOMEM;
927
928         tqp = hdev->htqp;
929
930         for (i = 0; i < hdev->num_tqps; i++) {
931                 tqp->dev = &hdev->pdev->dev;
932                 tqp->index = i;
933
934                 tqp->q.ae_algo = &ae_algo;
935                 tqp->q.buf_size = hdev->rx_buf_len;
936                 tqp->q.desc_num = hdev->num_desc;
937                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
938                         i * HCLGE_TQP_REG_SIZE;
939
940                 tqp++;
941         }
942
943         return 0;
944 }
945
946 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
947                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
948 {
949         struct hclge_tqp_map_cmd *req;
950         struct hclge_desc desc;
951         int ret;
952
953         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
954
955         req = (struct hclge_tqp_map_cmd *)desc.data;
956         req->tqp_id = cpu_to_le16(tqp_pid);
957         req->tqp_vf = func_id;
958         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
959                         1 << HCLGE_TQP_MAP_EN_B;
960         req->tqp_vid = cpu_to_le16(tqp_vid);
961
962         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
963         if (ret)
964                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
965
966         return ret;
967 }
968
969 static int  hclge_assign_tqp(struct hclge_vport *vport)
970 {
971         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
972         struct hclge_dev *hdev = vport->back;
973         int i, alloced;
974
975         for (i = 0, alloced = 0; i < hdev->num_tqps &&
976              alloced < kinfo->num_tqps; i++) {
977                 if (!hdev->htqp[i].alloced) {
978                         hdev->htqp[i].q.handle = &vport->nic;
979                         hdev->htqp[i].q.tqp_index = alloced;
980                         hdev->htqp[i].q.desc_num = kinfo->num_desc;
981                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
982                         hdev->htqp[i].alloced = true;
983                         alloced++;
984                 }
985         }
986         vport->alloc_tqps = kinfo->num_tqps;
987
988         return 0;
989 }
990
991 static int hclge_knic_setup(struct hclge_vport *vport,
992                             u16 num_tqps, u16 num_desc)
993 {
994         struct hnae3_handle *nic = &vport->nic;
995         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
996         struct hclge_dev *hdev = vport->back;
997         int i, ret;
998
999         kinfo->num_desc = num_desc;
1000         kinfo->rx_buf_len = hdev->rx_buf_len;
1001         kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1002         kinfo->rss_size
1003                 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1004         kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1005
1006         for (i = 0; i < HNAE3_MAX_TC; i++) {
1007                 if (hdev->hw_tc_map & BIT(i)) {
1008                         kinfo->tc_info[i].enable = true;
1009                         kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1010                         kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1011                         kinfo->tc_info[i].tc = i;
1012                 } else {
1013                         /* Set to default queue if TC is disable */
1014                         kinfo->tc_info[i].enable = false;
1015                         kinfo->tc_info[i].tqp_offset = 0;
1016                         kinfo->tc_info[i].tqp_count = 1;
1017                         kinfo->tc_info[i].tc = 0;
1018                 }
1019         }
1020
1021         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1022                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1023         if (!kinfo->tqp)
1024                 return -ENOMEM;
1025
1026         ret = hclge_assign_tqp(vport);
1027         if (ret)
1028                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1029
1030         return ret;
1031 }
1032
1033 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1034                                   struct hclge_vport *vport)
1035 {
1036         struct hnae3_handle *nic = &vport->nic;
1037         struct hnae3_knic_private_info *kinfo;
1038         u16 i;
1039
1040         kinfo = &nic->kinfo;
1041         for (i = 0; i < kinfo->num_tqps; i++) {
1042                 struct hclge_tqp *q =
1043                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1044                 bool is_pf;
1045                 int ret;
1046
1047                 is_pf = !(vport->vport_id);
1048                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1049                                              i, is_pf);
1050                 if (ret)
1051                         return ret;
1052         }
1053
1054         return 0;
1055 }
1056
1057 static int hclge_map_tqp(struct hclge_dev *hdev)
1058 {
1059         struct hclge_vport *vport = hdev->vport;
1060         u16 i, num_vport;
1061
1062         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1063         for (i = 0; i < num_vport; i++) {
1064                 int ret;
1065
1066                 ret = hclge_map_tqp_to_vport(hdev, vport);
1067                 if (ret)
1068                         return ret;
1069
1070                 vport++;
1071         }
1072
1073         return 0;
1074 }
1075
1076 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1077 {
1078         /* this would be initialized later */
1079 }
1080
1081 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1082 {
1083         struct hnae3_handle *nic = &vport->nic;
1084         struct hclge_dev *hdev = vport->back;
1085         int ret;
1086
1087         nic->pdev = hdev->pdev;
1088         nic->ae_algo = &ae_algo;
1089         nic->numa_node_mask = hdev->numa_node_mask;
1090
1091         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1092                 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1093                 if (ret) {
1094                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1095                                 ret);
1096                         return ret;
1097                 }
1098         } else {
1099                 hclge_unic_setup(vport, num_tqps);
1100         }
1101
1102         return 0;
1103 }
1104
1105 static int hclge_alloc_vport(struct hclge_dev *hdev)
1106 {
1107         struct pci_dev *pdev = hdev->pdev;
1108         struct hclge_vport *vport;
1109         u32 tqp_main_vport;
1110         u32 tqp_per_vport;
1111         int num_vport, i;
1112         int ret;
1113
1114         /* We need to alloc a vport for main NIC of PF */
1115         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1116
1117         if (hdev->num_tqps < num_vport) {
1118                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1119                         hdev->num_tqps, num_vport);
1120                 return -EINVAL;
1121         }
1122
1123         /* Alloc the same number of TQPs for every vport */
1124         tqp_per_vport = hdev->num_tqps / num_vport;
1125         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1126
1127         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1128                              GFP_KERNEL);
1129         if (!vport)
1130                 return -ENOMEM;
1131
1132         hdev->vport = vport;
1133         hdev->num_alloc_vport = num_vport;
1134
1135         if (IS_ENABLED(CONFIG_PCI_IOV))
1136                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1137
1138         for (i = 0; i < num_vport; i++) {
1139                 vport->back = hdev;
1140                 vport->vport_id = i;
1141
1142                 if (i == 0)
1143                         ret = hclge_vport_setup(vport, tqp_main_vport);
1144                 else
1145                         ret = hclge_vport_setup(vport, tqp_per_vport);
1146                 if (ret) {
1147                         dev_err(&pdev->dev,
1148                                 "vport setup failed for vport %d, %d\n",
1149                                 i, ret);
1150                         return ret;
1151                 }
1152
1153                 vport++;
1154         }
1155
1156         return 0;
1157 }
1158
1159 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1160                                     struct hclge_pkt_buf_alloc *buf_alloc)
1161 {
1162 /* TX buffer size is unit by 128 byte */
1163 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1164 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1165         struct hclge_tx_buff_alloc_cmd *req;
1166         struct hclge_desc desc;
1167         int ret;
1168         u8 i;
1169
1170         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1171
1172         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1173         for (i = 0; i < HCLGE_TC_NUM; i++) {
1174                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1175
1176                 req->tx_pkt_buff[i] =
1177                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1178                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1179         }
1180
1181         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1182         if (ret)
1183                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1184                         ret);
1185
1186         return ret;
1187 }
1188
1189 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1190                                  struct hclge_pkt_buf_alloc *buf_alloc)
1191 {
1192         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1193
1194         if (ret)
1195                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1196
1197         return ret;
1198 }
1199
1200 static int hclge_get_tc_num(struct hclge_dev *hdev)
1201 {
1202         int i, cnt = 0;
1203
1204         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1205                 if (hdev->hw_tc_map & BIT(i))
1206                         cnt++;
1207         return cnt;
1208 }
1209
1210 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1211 {
1212         int i, cnt = 0;
1213
1214         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1215                 if (hdev->hw_tc_map & BIT(i) &&
1216                     hdev->tm_info.hw_pfc_map & BIT(i))
1217                         cnt++;
1218         return cnt;
1219 }
1220
1221 /* Get the number of pfc enabled TCs, which have private buffer */
1222 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1223                                   struct hclge_pkt_buf_alloc *buf_alloc)
1224 {
1225         struct hclge_priv_buf *priv;
1226         int i, cnt = 0;
1227
1228         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1229                 priv = &buf_alloc->priv_buf[i];
1230                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1231                     priv->enable)
1232                         cnt++;
1233         }
1234
1235         return cnt;
1236 }
1237
1238 /* Get the number of pfc disabled TCs, which have private buffer */
1239 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1240                                      struct hclge_pkt_buf_alloc *buf_alloc)
1241 {
1242         struct hclge_priv_buf *priv;
1243         int i, cnt = 0;
1244
1245         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1246                 priv = &buf_alloc->priv_buf[i];
1247                 if (hdev->hw_tc_map & BIT(i) &&
1248                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1249                     priv->enable)
1250                         cnt++;
1251         }
1252
1253         return cnt;
1254 }
1255
1256 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1257 {
1258         struct hclge_priv_buf *priv;
1259         u32 rx_priv = 0;
1260         int i;
1261
1262         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1263                 priv = &buf_alloc->priv_buf[i];
1264                 if (priv->enable)
1265                         rx_priv += priv->buf_size;
1266         }
1267         return rx_priv;
1268 }
1269
1270 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1271 {
1272         u32 i, total_tx_size = 0;
1273
1274         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1275                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1276
1277         return total_tx_size;
1278 }
1279
1280 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1281                                 struct hclge_pkt_buf_alloc *buf_alloc,
1282                                 u32 rx_all)
1283 {
1284         u32 shared_buf_min, shared_buf_tc, shared_std;
1285         int tc_num, pfc_enable_num;
1286         u32 shared_buf;
1287         u32 rx_priv;
1288         int i;
1289
1290         tc_num = hclge_get_tc_num(hdev);
1291         pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1292
1293         if (hnae3_dev_dcb_supported(hdev))
1294                 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1295         else
1296                 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1297
1298         shared_buf_tc = pfc_enable_num * hdev->mps +
1299                         (tc_num - pfc_enable_num) * hdev->mps / 2 +
1300                         hdev->mps;
1301         shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1302
1303         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1304         if (rx_all <= rx_priv + shared_std)
1305                 return false;
1306
1307         shared_buf = rx_all - rx_priv;
1308         buf_alloc->s_buf.buf_size = shared_buf;
1309         buf_alloc->s_buf.self.high = shared_buf;
1310         buf_alloc->s_buf.self.low =  2 * hdev->mps;
1311
1312         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1313                 if ((hdev->hw_tc_map & BIT(i)) &&
1314                     (hdev->tm_info.hw_pfc_map & BIT(i))) {
1315                         buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1316                         buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1317                 } else {
1318                         buf_alloc->s_buf.tc_thrd[i].low = 0;
1319                         buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1320                 }
1321         }
1322
1323         return true;
1324 }
1325
1326 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1327                                 struct hclge_pkt_buf_alloc *buf_alloc)
1328 {
1329         u32 i, total_size;
1330
1331         total_size = hdev->pkt_buf_size;
1332
1333         /* alloc tx buffer for all enabled tc */
1334         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1335                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1336
1337                 if (total_size < HCLGE_DEFAULT_TX_BUF)
1338                         return -ENOMEM;
1339
1340                 if (hdev->hw_tc_map & BIT(i))
1341                         priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1342                 else
1343                         priv->tx_buf_size = 0;
1344
1345                 total_size -= priv->tx_buf_size;
1346         }
1347
1348         return 0;
1349 }
1350
1351 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1352  * @hdev: pointer to struct hclge_dev
1353  * @buf_alloc: pointer to buffer calculation data
1354  * @return: 0: calculate sucessful, negative: fail
1355  */
1356 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1357                                 struct hclge_pkt_buf_alloc *buf_alloc)
1358 {
1359 #define HCLGE_BUF_SIZE_UNIT     128
1360         u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1361         int no_pfc_priv_num, pfc_priv_num;
1362         struct hclge_priv_buf *priv;
1363         int i;
1364
1365         aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1366         rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1367
1368         /* When DCB is not supported, rx private
1369          * buffer is not allocated.
1370          */
1371         if (!hnae3_dev_dcb_supported(hdev)) {
1372                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1373                         return -ENOMEM;
1374
1375                 return 0;
1376         }
1377
1378         /* step 1, try to alloc private buffer for all enabled tc */
1379         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1380                 priv = &buf_alloc->priv_buf[i];
1381                 if (hdev->hw_tc_map & BIT(i)) {
1382                         priv->enable = 1;
1383                         if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1384                                 priv->wl.low = aligned_mps;
1385                                 priv->wl.high = priv->wl.low + aligned_mps;
1386                                 priv->buf_size = priv->wl.high +
1387                                                 HCLGE_DEFAULT_DV;
1388                         } else {
1389                                 priv->wl.low = 0;
1390                                 priv->wl.high = 2 * aligned_mps;
1391                                 priv->buf_size = priv->wl.high;
1392                         }
1393                 } else {
1394                         priv->enable = 0;
1395                         priv->wl.low = 0;
1396                         priv->wl.high = 0;
1397                         priv->buf_size = 0;
1398                 }
1399         }
1400
1401         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1402                 return 0;
1403
1404         /* step 2, try to decrease the buffer size of
1405          * no pfc TC's private buffer
1406          */
1407         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1408                 priv = &buf_alloc->priv_buf[i];
1409
1410                 priv->enable = 0;
1411                 priv->wl.low = 0;
1412                 priv->wl.high = 0;
1413                 priv->buf_size = 0;
1414
1415                 if (!(hdev->hw_tc_map & BIT(i)))
1416                         continue;
1417
1418                 priv->enable = 1;
1419
1420                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1421                         priv->wl.low = 128;
1422                         priv->wl.high = priv->wl.low + aligned_mps;
1423                         priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1424                 } else {
1425                         priv->wl.low = 0;
1426                         priv->wl.high = aligned_mps;
1427                         priv->buf_size = priv->wl.high;
1428                 }
1429         }
1430
1431         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1432                 return 0;
1433
1434         /* step 3, try to reduce the number of pfc disabled TCs,
1435          * which have private buffer
1436          */
1437         /* get the total no pfc enable TC number, which have private buffer */
1438         no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1439
1440         /* let the last to be cleared first */
1441         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1442                 priv = &buf_alloc->priv_buf[i];
1443
1444                 if (hdev->hw_tc_map & BIT(i) &&
1445                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1446                         /* Clear the no pfc TC private buffer */
1447                         priv->wl.low = 0;
1448                         priv->wl.high = 0;
1449                         priv->buf_size = 0;
1450                         priv->enable = 0;
1451                         no_pfc_priv_num--;
1452                 }
1453
1454                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1455                     no_pfc_priv_num == 0)
1456                         break;
1457         }
1458
1459         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1460                 return 0;
1461
1462         /* step 4, try to reduce the number of pfc enabled TCs
1463          * which have private buffer.
1464          */
1465         pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1466
1467         /* let the last to be cleared first */
1468         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1469                 priv = &buf_alloc->priv_buf[i];
1470
1471                 if (hdev->hw_tc_map & BIT(i) &&
1472                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1473                         /* Reduce the number of pfc TC with private buffer */
1474                         priv->wl.low = 0;
1475                         priv->enable = 0;
1476                         priv->wl.high = 0;
1477                         priv->buf_size = 0;
1478                         pfc_priv_num--;
1479                 }
1480
1481                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1482                     pfc_priv_num == 0)
1483                         break;
1484         }
1485         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1486                 return 0;
1487
1488         return -ENOMEM;
1489 }
1490
1491 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1492                                    struct hclge_pkt_buf_alloc *buf_alloc)
1493 {
1494         struct hclge_rx_priv_buff_cmd *req;
1495         struct hclge_desc desc;
1496         int ret;
1497         int i;
1498
1499         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1500         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1501
1502         /* Alloc private buffer TCs */
1503         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1504                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1505
1506                 req->buf_num[i] =
1507                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1508                 req->buf_num[i] |=
1509                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1510         }
1511
1512         req->shared_buf =
1513                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1514                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1515
1516         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1517         if (ret)
1518                 dev_err(&hdev->pdev->dev,
1519                         "rx private buffer alloc cmd failed %d\n", ret);
1520
1521         return ret;
1522 }
1523
1524 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1525                                    struct hclge_pkt_buf_alloc *buf_alloc)
1526 {
1527         struct hclge_rx_priv_wl_buf *req;
1528         struct hclge_priv_buf *priv;
1529         struct hclge_desc desc[2];
1530         int i, j;
1531         int ret;
1532
1533         for (i = 0; i < 2; i++) {
1534                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1535                                            false);
1536                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1537
1538                 /* The first descriptor set the NEXT bit to 1 */
1539                 if (i == 0)
1540                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1541                 else
1542                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1543
1544                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1545                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1546
1547                         priv = &buf_alloc->priv_buf[idx];
1548                         req->tc_wl[j].high =
1549                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1550                         req->tc_wl[j].high |=
1551                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1552                         req->tc_wl[j].low =
1553                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1554                         req->tc_wl[j].low |=
1555                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1556                 }
1557         }
1558
1559         /* Send 2 descriptor at one time */
1560         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1561         if (ret)
1562                 dev_err(&hdev->pdev->dev,
1563                         "rx private waterline config cmd failed %d\n",
1564                         ret);
1565         return ret;
1566 }
1567
1568 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1569                                     struct hclge_pkt_buf_alloc *buf_alloc)
1570 {
1571         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1572         struct hclge_rx_com_thrd *req;
1573         struct hclge_desc desc[2];
1574         struct hclge_tc_thrd *tc;
1575         int i, j;
1576         int ret;
1577
1578         for (i = 0; i < 2; i++) {
1579                 hclge_cmd_setup_basic_desc(&desc[i],
1580                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1581                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1582
1583                 /* The first descriptor set the NEXT bit to 1 */
1584                 if (i == 0)
1585                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1586                 else
1587                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1588
1589                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1590                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1591
1592                         req->com_thrd[j].high =
1593                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1594                         req->com_thrd[j].high |=
1595                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1596                         req->com_thrd[j].low =
1597                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1598                         req->com_thrd[j].low |=
1599                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1600                 }
1601         }
1602
1603         /* Send 2 descriptors at one time */
1604         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1605         if (ret)
1606                 dev_err(&hdev->pdev->dev,
1607                         "common threshold config cmd failed %d\n", ret);
1608         return ret;
1609 }
1610
1611 static int hclge_common_wl_config(struct hclge_dev *hdev,
1612                                   struct hclge_pkt_buf_alloc *buf_alloc)
1613 {
1614         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1615         struct hclge_rx_com_wl *req;
1616         struct hclge_desc desc;
1617         int ret;
1618
1619         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1620
1621         req = (struct hclge_rx_com_wl *)desc.data;
1622         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1623         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1624
1625         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1626         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1627
1628         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1629         if (ret)
1630                 dev_err(&hdev->pdev->dev,
1631                         "common waterline config cmd failed %d\n", ret);
1632
1633         return ret;
1634 }
1635
1636 int hclge_buffer_alloc(struct hclge_dev *hdev)
1637 {
1638         struct hclge_pkt_buf_alloc *pkt_buf;
1639         int ret;
1640
1641         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1642         if (!pkt_buf)
1643                 return -ENOMEM;
1644
1645         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1646         if (ret) {
1647                 dev_err(&hdev->pdev->dev,
1648                         "could not calc tx buffer size for all TCs %d\n", ret);
1649                 goto out;
1650         }
1651
1652         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1653         if (ret) {
1654                 dev_err(&hdev->pdev->dev,
1655                         "could not alloc tx buffers %d\n", ret);
1656                 goto out;
1657         }
1658
1659         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1660         if (ret) {
1661                 dev_err(&hdev->pdev->dev,
1662                         "could not calc rx priv buffer size for all TCs %d\n",
1663                         ret);
1664                 goto out;
1665         }
1666
1667         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1668         if (ret) {
1669                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1670                         ret);
1671                 goto out;
1672         }
1673
1674         if (hnae3_dev_dcb_supported(hdev)) {
1675                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1676                 if (ret) {
1677                         dev_err(&hdev->pdev->dev,
1678                                 "could not configure rx private waterline %d\n",
1679                                 ret);
1680                         goto out;
1681                 }
1682
1683                 ret = hclge_common_thrd_config(hdev, pkt_buf);
1684                 if (ret) {
1685                         dev_err(&hdev->pdev->dev,
1686                                 "could not configure common threshold %d\n",
1687                                 ret);
1688                         goto out;
1689                 }
1690         }
1691
1692         ret = hclge_common_wl_config(hdev, pkt_buf);
1693         if (ret)
1694                 dev_err(&hdev->pdev->dev,
1695                         "could not configure common waterline %d\n", ret);
1696
1697 out:
1698         kfree(pkt_buf);
1699         return ret;
1700 }
1701
1702 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1703 {
1704         struct hnae3_handle *roce = &vport->roce;
1705         struct hnae3_handle *nic = &vport->nic;
1706
1707         roce->rinfo.num_vectors = vport->back->num_roce_msi;
1708
1709         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1710             vport->back->num_msi_left == 0)
1711                 return -EINVAL;
1712
1713         roce->rinfo.base_vector = vport->back->roce_base_vector;
1714
1715         roce->rinfo.netdev = nic->kinfo.netdev;
1716         roce->rinfo.roce_io_base = vport->back->hw.io_base;
1717
1718         roce->pdev = nic->pdev;
1719         roce->ae_algo = nic->ae_algo;
1720         roce->numa_node_mask = nic->numa_node_mask;
1721
1722         return 0;
1723 }
1724
1725 static int hclge_init_msi(struct hclge_dev *hdev)
1726 {
1727         struct pci_dev *pdev = hdev->pdev;
1728         int vectors;
1729         int i;
1730
1731         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1732                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
1733         if (vectors < 0) {
1734                 dev_err(&pdev->dev,
1735                         "failed(%d) to allocate MSI/MSI-X vectors\n",
1736                         vectors);
1737                 return vectors;
1738         }
1739         if (vectors < hdev->num_msi)
1740                 dev_warn(&hdev->pdev->dev,
1741                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1742                          hdev->num_msi, vectors);
1743
1744         hdev->num_msi = vectors;
1745         hdev->num_msi_left = vectors;
1746         hdev->base_msi_vector = pdev->irq;
1747         hdev->roce_base_vector = hdev->base_msi_vector +
1748                                 hdev->roce_base_msix_offset;
1749
1750         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1751                                            sizeof(u16), GFP_KERNEL);
1752         if (!hdev->vector_status) {
1753                 pci_free_irq_vectors(pdev);
1754                 return -ENOMEM;
1755         }
1756
1757         for (i = 0; i < hdev->num_msi; i++)
1758                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1759
1760         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1761                                         sizeof(int), GFP_KERNEL);
1762         if (!hdev->vector_irq) {
1763                 pci_free_irq_vectors(pdev);
1764                 return -ENOMEM;
1765         }
1766
1767         return 0;
1768 }
1769
1770 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1771 {
1772
1773         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1774                 duplex = HCLGE_MAC_FULL;
1775
1776         return duplex;
1777 }
1778
1779 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1780                                       u8 duplex)
1781 {
1782         struct hclge_config_mac_speed_dup_cmd *req;
1783         struct hclge_desc desc;
1784         int ret;
1785
1786         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1787
1788         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1789
1790         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1791
1792         switch (speed) {
1793         case HCLGE_MAC_SPEED_10M:
1794                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1795                                 HCLGE_CFG_SPEED_S, 6);
1796                 break;
1797         case HCLGE_MAC_SPEED_100M:
1798                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1799                                 HCLGE_CFG_SPEED_S, 7);
1800                 break;
1801         case HCLGE_MAC_SPEED_1G:
1802                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1803                                 HCLGE_CFG_SPEED_S, 0);
1804                 break;
1805         case HCLGE_MAC_SPEED_10G:
1806                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1807                                 HCLGE_CFG_SPEED_S, 1);
1808                 break;
1809         case HCLGE_MAC_SPEED_25G:
1810                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1811                                 HCLGE_CFG_SPEED_S, 2);
1812                 break;
1813         case HCLGE_MAC_SPEED_40G:
1814                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1815                                 HCLGE_CFG_SPEED_S, 3);
1816                 break;
1817         case HCLGE_MAC_SPEED_50G:
1818                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1819                                 HCLGE_CFG_SPEED_S, 4);
1820                 break;
1821         case HCLGE_MAC_SPEED_100G:
1822                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1823                                 HCLGE_CFG_SPEED_S, 5);
1824                 break;
1825         default:
1826                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1827                 return -EINVAL;
1828         }
1829
1830         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1831                       1);
1832
1833         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1834         if (ret) {
1835                 dev_err(&hdev->pdev->dev,
1836                         "mac speed/duplex config cmd failed %d.\n", ret);
1837                 return ret;
1838         }
1839
1840         return 0;
1841 }
1842
1843 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1844 {
1845         int ret;
1846
1847         duplex = hclge_check_speed_dup(duplex, speed);
1848         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1849                 return 0;
1850
1851         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1852         if (ret)
1853                 return ret;
1854
1855         hdev->hw.mac.speed = speed;
1856         hdev->hw.mac.duplex = duplex;
1857
1858         return 0;
1859 }
1860
1861 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1862                                      u8 duplex)
1863 {
1864         struct hclge_vport *vport = hclge_get_vport(handle);
1865         struct hclge_dev *hdev = vport->back;
1866
1867         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1868 }
1869
1870 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
1871                                         u8 *duplex)
1872 {
1873         struct hclge_query_an_speed_dup_cmd *req;
1874         struct hclge_desc desc;
1875         int speed_tmp;
1876         int ret;
1877
1878         req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
1879
1880         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
1881         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1882         if (ret) {
1883                 dev_err(&hdev->pdev->dev,
1884                         "mac speed/autoneg/duplex query cmd failed %d\n",
1885                         ret);
1886                 return ret;
1887         }
1888
1889         *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
1890         speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
1891                                     HCLGE_QUERY_SPEED_S);
1892
1893         ret = hclge_parse_speed(speed_tmp, speed);
1894         if (ret)
1895                 dev_err(&hdev->pdev->dev,
1896                         "could not parse speed(=%d), %d\n", speed_tmp, ret);
1897
1898         return ret;
1899 }
1900
1901 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1902 {
1903         struct hclge_config_auto_neg_cmd *req;
1904         struct hclge_desc desc;
1905         u32 flag = 0;
1906         int ret;
1907
1908         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1909
1910         req = (struct hclge_config_auto_neg_cmd *)desc.data;
1911         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1912         req->cfg_an_cmd_flag = cpu_to_le32(flag);
1913
1914         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1915         if (ret)
1916                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
1917                         ret);
1918
1919         return ret;
1920 }
1921
1922 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
1923 {
1924         struct hclge_vport *vport = hclge_get_vport(handle);
1925         struct hclge_dev *hdev = vport->back;
1926
1927         return hclge_set_autoneg_en(hdev, enable);
1928 }
1929
1930 static int hclge_get_autoneg(struct hnae3_handle *handle)
1931 {
1932         struct hclge_vport *vport = hclge_get_vport(handle);
1933         struct hclge_dev *hdev = vport->back;
1934         struct phy_device *phydev = hdev->hw.mac.phydev;
1935
1936         if (phydev)
1937                 return phydev->autoneg;
1938
1939         return hdev->hw.mac.autoneg;
1940 }
1941
1942 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
1943                                            bool mask_vlan,
1944                                            u8 *mac_mask)
1945 {
1946         struct hclge_mac_vlan_mask_entry_cmd *req;
1947         struct hclge_desc desc;
1948         int status;
1949
1950         req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
1951         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
1952
1953         hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
1954                       mask_vlan ? 1 : 0);
1955         ether_addr_copy(req->mac_mask, mac_mask);
1956
1957         status = hclge_cmd_send(&hdev->hw, &desc, 1);
1958         if (status)
1959                 dev_err(&hdev->pdev->dev,
1960                         "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
1961                         status);
1962
1963         return status;
1964 }
1965
1966 static int hclge_mac_init(struct hclge_dev *hdev)
1967 {
1968         struct hnae3_handle *handle = &hdev->vport[0].nic;
1969         struct net_device *netdev = handle->kinfo.netdev;
1970         struct hclge_mac *mac = &hdev->hw.mac;
1971         u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1972         struct hclge_vport *vport;
1973         int mtu;
1974         int ret;
1975         int i;
1976
1977         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
1978         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
1979                                          hdev->hw.mac.duplex);
1980         if (ret) {
1981                 dev_err(&hdev->pdev->dev,
1982                         "Config mac speed dup fail ret=%d\n", ret);
1983                 return ret;
1984         }
1985
1986         mac->link = 0;
1987
1988         /* Initialize the MTA table work mode */
1989         hdev->enable_mta        = true;
1990         hdev->mta_mac_sel_type  = HCLGE_MAC_ADDR_47_36;
1991
1992         ret = hclge_set_mta_filter_mode(hdev,
1993                                         hdev->mta_mac_sel_type,
1994                                         hdev->enable_mta);
1995         if (ret) {
1996                 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
1997                         ret);
1998                 return ret;
1999         }
2000
2001         for (i = 0; i < hdev->num_alloc_vport; i++) {
2002                 vport = &hdev->vport[i];
2003                 vport->accept_mta_mc = false;
2004
2005                 memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
2006                 ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
2007                 if (ret) {
2008                         dev_err(&hdev->pdev->dev,
2009                                 "set mta filter mode fail ret=%d\n", ret);
2010                         return ret;
2011                 }
2012         }
2013
2014         ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2015         if (ret) {
2016                 dev_err(&hdev->pdev->dev,
2017                         "set default mac_vlan_mask fail ret=%d\n", ret);
2018                 return ret;
2019         }
2020
2021         if (netdev)
2022                 mtu = netdev->mtu;
2023         else
2024                 mtu = ETH_DATA_LEN;
2025
2026         ret = hclge_set_mtu(handle, mtu);
2027         if (ret)
2028                 dev_err(&hdev->pdev->dev,
2029                         "set mtu failed ret=%d\n", ret);
2030
2031         return ret;
2032 }
2033
2034 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2035 {
2036         if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2037                 schedule_work(&hdev->mbx_service_task);
2038 }
2039
2040 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2041 {
2042         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2043                 schedule_work(&hdev->rst_service_task);
2044 }
2045
2046 static void hclge_task_schedule(struct hclge_dev *hdev)
2047 {
2048         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2049             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2050             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2051                 (void)schedule_work(&hdev->service_task);
2052 }
2053
2054 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2055 {
2056         struct hclge_link_status_cmd *req;
2057         struct hclge_desc desc;
2058         int link_status;
2059         int ret;
2060
2061         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2062         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2063         if (ret) {
2064                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2065                         ret);
2066                 return ret;
2067         }
2068
2069         req = (struct hclge_link_status_cmd *)desc.data;
2070         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2071
2072         return !!link_status;
2073 }
2074
2075 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2076 {
2077         int mac_state;
2078         int link_stat;
2079
2080         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2081                 return 0;
2082
2083         mac_state = hclge_get_mac_link_status(hdev);
2084
2085         if (hdev->hw.mac.phydev) {
2086                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2087                         link_stat = mac_state &
2088                                 hdev->hw.mac.phydev->link;
2089                 else
2090                         link_stat = 0;
2091
2092         } else {
2093                 link_stat = mac_state;
2094         }
2095
2096         return !!link_stat;
2097 }
2098
2099 static void hclge_update_link_status(struct hclge_dev *hdev)
2100 {
2101         struct hnae3_client *client = hdev->nic_client;
2102         struct hnae3_handle *handle;
2103         int state;
2104         int i;
2105
2106         if (!client)
2107                 return;
2108         state = hclge_get_mac_phy_link(hdev);
2109         if (state != hdev->hw.mac.link) {
2110                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2111                         handle = &hdev->vport[i].nic;
2112                         client->ops->link_status_change(handle, state);
2113                 }
2114                 hdev->hw.mac.link = state;
2115         }
2116 }
2117
2118 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2119 {
2120         struct hclge_mac mac = hdev->hw.mac;
2121         u8 duplex;
2122         int speed;
2123         int ret;
2124
2125         /* get the speed and duplex as autoneg'result from mac cmd when phy
2126          * doesn't exit.
2127          */
2128         if (mac.phydev || !mac.autoneg)
2129                 return 0;
2130
2131         ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2132         if (ret) {
2133                 dev_err(&hdev->pdev->dev,
2134                         "mac autoneg/speed/duplex query failed %d\n", ret);
2135                 return ret;
2136         }
2137
2138         ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2139         if (ret) {
2140                 dev_err(&hdev->pdev->dev,
2141                         "mac speed/duplex config failed %d\n", ret);
2142                 return ret;
2143         }
2144
2145         return 0;
2146 }
2147
2148 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2149 {
2150         struct hclge_vport *vport = hclge_get_vport(handle);
2151         struct hclge_dev *hdev = vport->back;
2152
2153         return hclge_update_speed_duplex(hdev);
2154 }
2155
2156 static int hclge_get_status(struct hnae3_handle *handle)
2157 {
2158         struct hclge_vport *vport = hclge_get_vport(handle);
2159         struct hclge_dev *hdev = vport->back;
2160
2161         hclge_update_link_status(hdev);
2162
2163         return hdev->hw.mac.link;
2164 }
2165
2166 static void hclge_service_timer(struct timer_list *t)
2167 {
2168         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2169
2170         mod_timer(&hdev->service_timer, jiffies + HZ);
2171         hdev->hw_stats.stats_timer++;
2172         hclge_task_schedule(hdev);
2173 }
2174
2175 static void hclge_service_complete(struct hclge_dev *hdev)
2176 {
2177         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2178
2179         /* Flush memory before next watchdog */
2180         smp_mb__before_atomic();
2181         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2182 }
2183
2184 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2185 {
2186         u32 rst_src_reg;
2187         u32 cmdq_src_reg;
2188
2189         /* fetch the events from their corresponding regs */
2190         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2191         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2192
2193         /* Assumption: If by any chance reset and mailbox events are reported
2194          * together then we will only process reset event in this go and will
2195          * defer the processing of the mailbox events. Since, we would have not
2196          * cleared RX CMDQ event this time we would receive again another
2197          * interrupt from H/W just for the mailbox.
2198          */
2199
2200         /* check for vector0 reset event sources */
2201         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2202                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2203                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2204                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2205                 return HCLGE_VECTOR0_EVENT_RST;
2206         }
2207
2208         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2209                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2210                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2211                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2212                 return HCLGE_VECTOR0_EVENT_RST;
2213         }
2214
2215         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2216                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2217                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2218                 return HCLGE_VECTOR0_EVENT_RST;
2219         }
2220
2221         /* check for vector0 mailbox(=CMDQ RX) event source */
2222         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2223                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2224                 *clearval = cmdq_src_reg;
2225                 return HCLGE_VECTOR0_EVENT_MBX;
2226         }
2227
2228         return HCLGE_VECTOR0_EVENT_OTHER;
2229 }
2230
2231 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2232                                     u32 regclr)
2233 {
2234         switch (event_type) {
2235         case HCLGE_VECTOR0_EVENT_RST:
2236                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2237                 break;
2238         case HCLGE_VECTOR0_EVENT_MBX:
2239                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2240                 break;
2241         default:
2242                 break;
2243         }
2244 }
2245
2246 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2247 {
2248         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2249                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2250                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2251                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2252         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2253 }
2254
2255 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2256 {
2257         writel(enable ? 1 : 0, vector->addr);
2258 }
2259
2260 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2261 {
2262         struct hclge_dev *hdev = data;
2263         u32 event_cause;
2264         u32 clearval;
2265
2266         hclge_enable_vector(&hdev->misc_vector, false);
2267         event_cause = hclge_check_event_cause(hdev, &clearval);
2268
2269         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2270         switch (event_cause) {
2271         case HCLGE_VECTOR0_EVENT_RST:
2272                 hclge_reset_task_schedule(hdev);
2273                 break;
2274         case HCLGE_VECTOR0_EVENT_MBX:
2275                 /* If we are here then,
2276                  * 1. Either we are not handling any mbx task and we are not
2277                  *    scheduled as well
2278                  *                        OR
2279                  * 2. We could be handling a mbx task but nothing more is
2280                  *    scheduled.
2281                  * In both cases, we should schedule mbx task as there are more
2282                  * mbx messages reported by this interrupt.
2283                  */
2284                 hclge_mbx_task_schedule(hdev);
2285                 break;
2286         default:
2287                 dev_warn(&hdev->pdev->dev,
2288                          "received unknown or unhandled event of vector0\n");
2289                 break;
2290         }
2291
2292         /* clear the source of interrupt if it is not cause by reset */
2293         if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
2294                 hclge_clear_event_cause(hdev, event_cause, clearval);
2295                 hclge_enable_vector(&hdev->misc_vector, true);
2296         }
2297
2298         return IRQ_HANDLED;
2299 }
2300
2301 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2302 {
2303         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2304                 dev_warn(&hdev->pdev->dev,
2305                          "vector(vector_id %d) has been freed.\n", vector_id);
2306                 return;
2307         }
2308
2309         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2310         hdev->num_msi_left += 1;
2311         hdev->num_msi_used -= 1;
2312 }
2313
2314 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2315 {
2316         struct hclge_misc_vector *vector = &hdev->misc_vector;
2317
2318         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2319
2320         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2321         hdev->vector_status[0] = 0;
2322
2323         hdev->num_msi_left -= 1;
2324         hdev->num_msi_used += 1;
2325 }
2326
2327 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2328 {
2329         int ret;
2330
2331         hclge_get_misc_vector(hdev);
2332
2333         /* this would be explicitly freed in the end */
2334         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2335                           0, "hclge_misc", hdev);
2336         if (ret) {
2337                 hclge_free_vector(hdev, 0);
2338                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2339                         hdev->misc_vector.vector_irq);
2340         }
2341
2342         return ret;
2343 }
2344
2345 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2346 {
2347         free_irq(hdev->misc_vector.vector_irq, hdev);
2348         hclge_free_vector(hdev, 0);
2349 }
2350
2351 static int hclge_notify_client(struct hclge_dev *hdev,
2352                                enum hnae3_reset_notify_type type)
2353 {
2354         struct hnae3_client *client = hdev->nic_client;
2355         u16 i;
2356
2357         if (!client->ops->reset_notify)
2358                 return -EOPNOTSUPP;
2359
2360         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2361                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2362                 int ret;
2363
2364                 ret = client->ops->reset_notify(handle, type);
2365                 if (ret)
2366                         return ret;
2367         }
2368
2369         return 0;
2370 }
2371
2372 static int hclge_reset_wait(struct hclge_dev *hdev)
2373 {
2374 #define HCLGE_RESET_WATI_MS     100
2375 #define HCLGE_RESET_WAIT_CNT    5
2376         u32 val, reg, reg_bit;
2377         u32 cnt = 0;
2378
2379         switch (hdev->reset_type) {
2380         case HNAE3_GLOBAL_RESET:
2381                 reg = HCLGE_GLOBAL_RESET_REG;
2382                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2383                 break;
2384         case HNAE3_CORE_RESET:
2385                 reg = HCLGE_GLOBAL_RESET_REG;
2386                 reg_bit = HCLGE_CORE_RESET_BIT;
2387                 break;
2388         case HNAE3_FUNC_RESET:
2389                 reg = HCLGE_FUN_RST_ING;
2390                 reg_bit = HCLGE_FUN_RST_ING_B;
2391                 break;
2392         default:
2393                 dev_err(&hdev->pdev->dev,
2394                         "Wait for unsupported reset type: %d\n",
2395                         hdev->reset_type);
2396                 return -EINVAL;
2397         }
2398
2399         val = hclge_read_dev(&hdev->hw, reg);
2400         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2401                 msleep(HCLGE_RESET_WATI_MS);
2402                 val = hclge_read_dev(&hdev->hw, reg);
2403                 cnt++;
2404         }
2405
2406         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2407                 dev_warn(&hdev->pdev->dev,
2408                          "Wait for reset timeout: %d\n", hdev->reset_type);
2409                 return -EBUSY;
2410         }
2411
2412         return 0;
2413 }
2414
2415 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2416 {
2417         struct hclge_desc desc;
2418         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2419         int ret;
2420
2421         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2422         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2423         req->fun_reset_vfid = func_id;
2424
2425         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2426         if (ret)
2427                 dev_err(&hdev->pdev->dev,
2428                         "send function reset cmd fail, status =%d\n", ret);
2429
2430         return ret;
2431 }
2432
2433 static void hclge_do_reset(struct hclge_dev *hdev)
2434 {
2435         struct pci_dev *pdev = hdev->pdev;
2436         u32 val;
2437
2438         switch (hdev->reset_type) {
2439         case HNAE3_GLOBAL_RESET:
2440                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2441                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2442                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2443                 dev_info(&pdev->dev, "Global Reset requested\n");
2444                 break;
2445         case HNAE3_CORE_RESET:
2446                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2447                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2448                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2449                 dev_info(&pdev->dev, "Core Reset requested\n");
2450                 break;
2451         case HNAE3_FUNC_RESET:
2452                 dev_info(&pdev->dev, "PF Reset requested\n");
2453                 hclge_func_reset_cmd(hdev, 0);
2454                 /* schedule again to check later */
2455                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2456                 hclge_reset_task_schedule(hdev);
2457                 break;
2458         default:
2459                 dev_warn(&pdev->dev,
2460                          "Unsupported reset type: %d\n", hdev->reset_type);
2461                 break;
2462         }
2463 }
2464
2465 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2466                                                    unsigned long *addr)
2467 {
2468         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2469
2470         /* return the highest priority reset level amongst all */
2471         if (test_bit(HNAE3_GLOBAL_RESET, addr))
2472                 rst_level = HNAE3_GLOBAL_RESET;
2473         else if (test_bit(HNAE3_CORE_RESET, addr))
2474                 rst_level = HNAE3_CORE_RESET;
2475         else if (test_bit(HNAE3_IMP_RESET, addr))
2476                 rst_level = HNAE3_IMP_RESET;
2477         else if (test_bit(HNAE3_FUNC_RESET, addr))
2478                 rst_level = HNAE3_FUNC_RESET;
2479
2480         /* now, clear all other resets */
2481         clear_bit(HNAE3_GLOBAL_RESET, addr);
2482         clear_bit(HNAE3_CORE_RESET, addr);
2483         clear_bit(HNAE3_IMP_RESET, addr);
2484         clear_bit(HNAE3_FUNC_RESET, addr);
2485
2486         return rst_level;
2487 }
2488
2489 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2490 {
2491         u32 clearval = 0;
2492
2493         switch (hdev->reset_type) {
2494         case HNAE3_IMP_RESET:
2495                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2496                 break;
2497         case HNAE3_GLOBAL_RESET:
2498                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2499                 break;
2500         case HNAE3_CORE_RESET:
2501                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2502                 break;
2503         default:
2504                 break;
2505         }
2506
2507         if (!clearval)
2508                 return;
2509
2510         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2511         hclge_enable_vector(&hdev->misc_vector, true);
2512 }
2513
2514 static void hclge_reset(struct hclge_dev *hdev)
2515 {
2516         struct hnae3_handle *handle;
2517
2518         /* perform reset of the stack & ae device for a client */
2519         handle = &hdev->vport[0].nic;
2520         rtnl_lock();
2521         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2522
2523         if (!hclge_reset_wait(hdev)) {
2524                 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2525                 hclge_reset_ae_dev(hdev->ae_dev);
2526                 hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2527
2528                 hclge_clear_reset_cause(hdev);
2529         } else {
2530                 /* schedule again to check pending resets later */
2531                 set_bit(hdev->reset_type, &hdev->reset_pending);
2532                 hclge_reset_task_schedule(hdev);
2533         }
2534
2535         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2536         handle->last_reset_time = jiffies;
2537         rtnl_unlock();
2538 }
2539
2540 static void hclge_reset_event(struct hnae3_handle *handle)
2541 {
2542         struct hclge_vport *vport = hclge_get_vport(handle);
2543         struct hclge_dev *hdev = vport->back;
2544
2545         /* check if this is a new reset request and we are not here just because
2546          * last reset attempt did not succeed and watchdog hit us again. We will
2547          * know this if last reset request did not occur very recently (watchdog
2548          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2549          * In case of new request we reset the "reset level" to PF reset.
2550          * And if it is a repeat reset request of the most recent one then we
2551          * want to make sure we throttle the reset request. Therefore, we will
2552          * not allow it again before 3*HZ times.
2553          */
2554         if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
2555                 return;
2556         else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
2557                 handle->reset_level = HNAE3_FUNC_RESET;
2558
2559         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2560                  handle->reset_level);
2561
2562         /* request reset & schedule reset task */
2563         set_bit(handle->reset_level, &hdev->reset_request);
2564         hclge_reset_task_schedule(hdev);
2565
2566         if (handle->reset_level < HNAE3_GLOBAL_RESET)
2567                 handle->reset_level++;
2568 }
2569
2570 static void hclge_reset_subtask(struct hclge_dev *hdev)
2571 {
2572         /* check if there is any ongoing reset in the hardware. This status can
2573          * be checked from reset_pending. If there is then, we need to wait for
2574          * hardware to complete reset.
2575          *    a. If we are able to figure out in reasonable time that hardware
2576          *       has fully resetted then, we can proceed with driver, client
2577          *       reset.
2578          *    b. else, we can come back later to check this status so re-sched
2579          *       now.
2580          */
2581         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2582         if (hdev->reset_type != HNAE3_NONE_RESET)
2583                 hclge_reset(hdev);
2584
2585         /* check if we got any *new* reset requests to be honored */
2586         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2587         if (hdev->reset_type != HNAE3_NONE_RESET)
2588                 hclge_do_reset(hdev);
2589
2590         hdev->reset_type = HNAE3_NONE_RESET;
2591 }
2592
2593 static void hclge_reset_service_task(struct work_struct *work)
2594 {
2595         struct hclge_dev *hdev =
2596                 container_of(work, struct hclge_dev, rst_service_task);
2597
2598         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2599                 return;
2600
2601         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2602
2603         hclge_reset_subtask(hdev);
2604
2605         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2606 }
2607
2608 static void hclge_mailbox_service_task(struct work_struct *work)
2609 {
2610         struct hclge_dev *hdev =
2611                 container_of(work, struct hclge_dev, mbx_service_task);
2612
2613         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2614                 return;
2615
2616         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2617
2618         hclge_mbx_handler(hdev);
2619
2620         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2621 }
2622
2623 static void hclge_service_task(struct work_struct *work)
2624 {
2625         struct hclge_dev *hdev =
2626                 container_of(work, struct hclge_dev, service_task);
2627
2628         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2629                 hclge_update_stats_for_all(hdev);
2630                 hdev->hw_stats.stats_timer = 0;
2631         }
2632
2633         hclge_update_speed_duplex(hdev);
2634         hclge_update_link_status(hdev);
2635         hclge_service_complete(hdev);
2636 }
2637
2638 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2639 {
2640         /* VF handle has no client */
2641         if (!handle->client)
2642                 return container_of(handle, struct hclge_vport, nic);
2643         else if (handle->client->type == HNAE3_CLIENT_ROCE)
2644                 return container_of(handle, struct hclge_vport, roce);
2645         else
2646                 return container_of(handle, struct hclge_vport, nic);
2647 }
2648
2649 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2650                             struct hnae3_vector_info *vector_info)
2651 {
2652         struct hclge_vport *vport = hclge_get_vport(handle);
2653         struct hnae3_vector_info *vector = vector_info;
2654         struct hclge_dev *hdev = vport->back;
2655         int alloc = 0;
2656         int i, j;
2657
2658         vector_num = min(hdev->num_msi_left, vector_num);
2659
2660         for (j = 0; j < vector_num; j++) {
2661                 for (i = 1; i < hdev->num_msi; i++) {
2662                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2663                                 vector->vector = pci_irq_vector(hdev->pdev, i);
2664                                 vector->io_addr = hdev->hw.io_base +
2665                                         HCLGE_VECTOR_REG_BASE +
2666                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2667                                         vport->vport_id *
2668                                         HCLGE_VECTOR_VF_OFFSET;
2669                                 hdev->vector_status[i] = vport->vport_id;
2670                                 hdev->vector_irq[i] = vector->vector;
2671
2672                                 vector++;
2673                                 alloc++;
2674
2675                                 break;
2676                         }
2677                 }
2678         }
2679         hdev->num_msi_left -= alloc;
2680         hdev->num_msi_used += alloc;
2681
2682         return alloc;
2683 }
2684
2685 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2686 {
2687         int i;
2688
2689         for (i = 0; i < hdev->num_msi; i++)
2690                 if (vector == hdev->vector_irq[i])
2691                         return i;
2692
2693         return -EINVAL;
2694 }
2695
2696 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
2697 {
2698         struct hclge_vport *vport = hclge_get_vport(handle);
2699         struct hclge_dev *hdev = vport->back;
2700         int vector_id;
2701
2702         vector_id = hclge_get_vector_index(hdev, vector);
2703         if (vector_id < 0) {
2704                 dev_err(&hdev->pdev->dev,
2705                         "Get vector index fail. vector_id =%d\n", vector_id);
2706                 return vector_id;
2707         }
2708
2709         hclge_free_vector(hdev, vector_id);
2710
2711         return 0;
2712 }
2713
2714 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2715 {
2716         return HCLGE_RSS_KEY_SIZE;
2717 }
2718
2719 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2720 {
2721         return HCLGE_RSS_IND_TBL_SIZE;
2722 }
2723
2724 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2725                                   const u8 hfunc, const u8 *key)
2726 {
2727         struct hclge_rss_config_cmd *req;
2728         struct hclge_desc desc;
2729         int key_offset;
2730         int key_size;
2731         int ret;
2732
2733         req = (struct hclge_rss_config_cmd *)desc.data;
2734
2735         for (key_offset = 0; key_offset < 3; key_offset++) {
2736                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2737                                            false);
2738
2739                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2740                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2741
2742                 if (key_offset == 2)
2743                         key_size =
2744                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2745                 else
2746                         key_size = HCLGE_RSS_HASH_KEY_NUM;
2747
2748                 memcpy(req->hash_key,
2749                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2750
2751                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2752                 if (ret) {
2753                         dev_err(&hdev->pdev->dev,
2754                                 "Configure RSS config fail, status = %d\n",
2755                                 ret);
2756                         return ret;
2757                 }
2758         }
2759         return 0;
2760 }
2761
2762 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
2763 {
2764         struct hclge_rss_indirection_table_cmd *req;
2765         struct hclge_desc desc;
2766         int i, j;
2767         int ret;
2768
2769         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
2770
2771         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2772                 hclge_cmd_setup_basic_desc
2773                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2774
2775                 req->start_table_index =
2776                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
2777                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
2778
2779                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2780                         req->rss_result[j] =
2781                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2782
2783                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2784                 if (ret) {
2785                         dev_err(&hdev->pdev->dev,
2786                                 "Configure rss indir table fail,status = %d\n",
2787                                 ret);
2788                         return ret;
2789                 }
2790         }
2791         return 0;
2792 }
2793
2794 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2795                                  u16 *tc_size, u16 *tc_offset)
2796 {
2797         struct hclge_rss_tc_mode_cmd *req;
2798         struct hclge_desc desc;
2799         int ret;
2800         int i;
2801
2802         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2803         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
2804
2805         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2806                 u16 mode = 0;
2807
2808                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
2809                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
2810                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2811                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
2812                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2813
2814                 req->rss_tc_mode[i] = cpu_to_le16(mode);
2815         }
2816
2817         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2818         if (ret)
2819                 dev_err(&hdev->pdev->dev,
2820                         "Configure rss tc mode fail, status = %d\n", ret);
2821
2822         return ret;
2823 }
2824
2825 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2826 {
2827         struct hclge_rss_input_tuple_cmd *req;
2828         struct hclge_desc desc;
2829         int ret;
2830
2831         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2832
2833         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2834
2835         /* Get the tuple cfg from pf */
2836         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
2837         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
2838         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
2839         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
2840         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
2841         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
2842         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
2843         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
2844         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2845         if (ret)
2846                 dev_err(&hdev->pdev->dev,
2847                         "Configure rss input fail, status = %d\n", ret);
2848         return ret;
2849 }
2850
2851 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2852                          u8 *key, u8 *hfunc)
2853 {
2854         struct hclge_vport *vport = hclge_get_vport(handle);
2855         int i;
2856
2857         /* Get hash algorithm */
2858         if (hfunc)
2859                 *hfunc = vport->rss_algo;
2860
2861         /* Get the RSS Key required by the user */
2862         if (key)
2863                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2864
2865         /* Get indirect table */
2866         if (indir)
2867                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2868                         indir[i] =  vport->rss_indirection_tbl[i];
2869
2870         return 0;
2871 }
2872
2873 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2874                          const  u8 *key, const  u8 hfunc)
2875 {
2876         struct hclge_vport *vport = hclge_get_vport(handle);
2877         struct hclge_dev *hdev = vport->back;
2878         u8 hash_algo;
2879         int ret, i;
2880
2881         /* Set the RSS Hash Key if specififed by the user */
2882         if (key) {
2883
2884                 if (hfunc == ETH_RSS_HASH_TOP ||
2885                     hfunc == ETH_RSS_HASH_NO_CHANGE)
2886                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2887                 else
2888                         return -EINVAL;
2889                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2890                 if (ret)
2891                         return ret;
2892
2893                 /* Update the shadow RSS key with user specified qids */
2894                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2895                 vport->rss_algo = hash_algo;
2896         }
2897
2898         /* Update the shadow RSS table with user specified qids */
2899         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2900                 vport->rss_indirection_tbl[i] = indir[i];
2901
2902         /* Update the hardware */
2903         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
2904 }
2905
2906 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
2907 {
2908         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
2909
2910         if (nfc->data & RXH_L4_B_2_3)
2911                 hash_sets |= HCLGE_D_PORT_BIT;
2912         else
2913                 hash_sets &= ~HCLGE_D_PORT_BIT;
2914
2915         if (nfc->data & RXH_IP_SRC)
2916                 hash_sets |= HCLGE_S_IP_BIT;
2917         else
2918                 hash_sets &= ~HCLGE_S_IP_BIT;
2919
2920         if (nfc->data & RXH_IP_DST)
2921                 hash_sets |= HCLGE_D_IP_BIT;
2922         else
2923                 hash_sets &= ~HCLGE_D_IP_BIT;
2924
2925         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
2926                 hash_sets |= HCLGE_V_TAG_BIT;
2927
2928         return hash_sets;
2929 }
2930
2931 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
2932                                struct ethtool_rxnfc *nfc)
2933 {
2934         struct hclge_vport *vport = hclge_get_vport(handle);
2935         struct hclge_dev *hdev = vport->back;
2936         struct hclge_rss_input_tuple_cmd *req;
2937         struct hclge_desc desc;
2938         u8 tuple_sets;
2939         int ret;
2940
2941         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2942                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
2943                 return -EINVAL;
2944
2945         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2946         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2947
2948         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
2949         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
2950         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
2951         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
2952         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
2953         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
2954         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
2955         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
2956
2957         tuple_sets = hclge_get_rss_hash_bits(nfc);
2958         switch (nfc->flow_type) {
2959         case TCP_V4_FLOW:
2960                 req->ipv4_tcp_en = tuple_sets;
2961                 break;
2962         case TCP_V6_FLOW:
2963                 req->ipv6_tcp_en = tuple_sets;
2964                 break;
2965         case UDP_V4_FLOW:
2966                 req->ipv4_udp_en = tuple_sets;
2967                 break;
2968         case UDP_V6_FLOW:
2969                 req->ipv6_udp_en = tuple_sets;
2970                 break;
2971         case SCTP_V4_FLOW:
2972                 req->ipv4_sctp_en = tuple_sets;
2973                 break;
2974         case SCTP_V6_FLOW:
2975                 if ((nfc->data & RXH_L4_B_0_1) ||
2976                     (nfc->data & RXH_L4_B_2_3))
2977                         return -EINVAL;
2978
2979                 req->ipv6_sctp_en = tuple_sets;
2980                 break;
2981         case IPV4_FLOW:
2982                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2983                 break;
2984         case IPV6_FLOW:
2985                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2986                 break;
2987         default:
2988                 return -EINVAL;
2989         }
2990
2991         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2992         if (ret) {
2993                 dev_err(&hdev->pdev->dev,
2994                         "Set rss tuple fail, status = %d\n", ret);
2995                 return ret;
2996         }
2997
2998         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
2999         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3000         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3001         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3002         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3003         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3004         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3005         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3006         return 0;
3007 }
3008
3009 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3010                                struct ethtool_rxnfc *nfc)
3011 {
3012         struct hclge_vport *vport = hclge_get_vport(handle);
3013         u8 tuple_sets;
3014
3015         nfc->data = 0;
3016
3017         switch (nfc->flow_type) {
3018         case TCP_V4_FLOW:
3019                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3020                 break;
3021         case UDP_V4_FLOW:
3022                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3023                 break;
3024         case TCP_V6_FLOW:
3025                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3026                 break;
3027         case UDP_V6_FLOW:
3028                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3029                 break;
3030         case SCTP_V4_FLOW:
3031                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3032                 break;
3033         case SCTP_V6_FLOW:
3034                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3035                 break;
3036         case IPV4_FLOW:
3037         case IPV6_FLOW:
3038                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3039                 break;
3040         default:
3041                 return -EINVAL;
3042         }
3043
3044         if (!tuple_sets)
3045                 return 0;
3046
3047         if (tuple_sets & HCLGE_D_PORT_BIT)
3048                 nfc->data |= RXH_L4_B_2_3;
3049         if (tuple_sets & HCLGE_S_PORT_BIT)
3050                 nfc->data |= RXH_L4_B_0_1;
3051         if (tuple_sets & HCLGE_D_IP_BIT)
3052                 nfc->data |= RXH_IP_DST;
3053         if (tuple_sets & HCLGE_S_IP_BIT)
3054                 nfc->data |= RXH_IP_SRC;
3055
3056         return 0;
3057 }
3058
3059 static int hclge_get_tc_size(struct hnae3_handle *handle)
3060 {
3061         struct hclge_vport *vport = hclge_get_vport(handle);
3062         struct hclge_dev *hdev = vport->back;
3063
3064         return hdev->rss_size_max;
3065 }
3066
3067 int hclge_rss_init_hw(struct hclge_dev *hdev)
3068 {
3069         struct hclge_vport *vport = hdev->vport;
3070         u8 *rss_indir = vport[0].rss_indirection_tbl;
3071         u16 rss_size = vport[0].alloc_rss_size;
3072         u8 *key = vport[0].rss_hash_key;
3073         u8 hfunc = vport[0].rss_algo;
3074         u16 tc_offset[HCLGE_MAX_TC_NUM];
3075         u16 tc_valid[HCLGE_MAX_TC_NUM];
3076         u16 tc_size[HCLGE_MAX_TC_NUM];
3077         u16 roundup_size;
3078         int i, ret;
3079
3080         ret = hclge_set_rss_indir_table(hdev, rss_indir);
3081         if (ret)
3082                 return ret;
3083
3084         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3085         if (ret)
3086                 return ret;
3087
3088         ret = hclge_set_rss_input_tuple(hdev);
3089         if (ret)
3090                 return ret;
3091
3092         /* Each TC have the same queue size, and tc_size set to hardware is
3093          * the log2 of roundup power of two of rss_size, the acutal queue
3094          * size is limited by indirection table.
3095          */
3096         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3097                 dev_err(&hdev->pdev->dev,
3098                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3099                         rss_size);
3100                 return -EINVAL;
3101         }
3102
3103         roundup_size = roundup_pow_of_two(rss_size);
3104         roundup_size = ilog2(roundup_size);
3105
3106         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3107                 tc_valid[i] = 0;
3108
3109                 if (!(hdev->hw_tc_map & BIT(i)))
3110                         continue;
3111
3112                 tc_valid[i] = 1;
3113                 tc_size[i] = roundup_size;
3114                 tc_offset[i] = rss_size * i;
3115         }
3116
3117         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3118 }
3119
3120 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3121 {
3122         struct hclge_vport *vport = hdev->vport;
3123         int i, j;
3124
3125         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3126                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3127                         vport[j].rss_indirection_tbl[i] =
3128                                 i % vport[j].alloc_rss_size;
3129         }
3130 }
3131
3132 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3133 {
3134         struct hclge_vport *vport = hdev->vport;
3135         int i;
3136
3137         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3138                 vport[i].rss_tuple_sets.ipv4_tcp_en =
3139                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3140                 vport[i].rss_tuple_sets.ipv4_udp_en =
3141                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3142                 vport[i].rss_tuple_sets.ipv4_sctp_en =
3143                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3144                 vport[i].rss_tuple_sets.ipv4_fragment_en =
3145                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3146                 vport[i].rss_tuple_sets.ipv6_tcp_en =
3147                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3148                 vport[i].rss_tuple_sets.ipv6_udp_en =
3149                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3150                 vport[i].rss_tuple_sets.ipv6_sctp_en =
3151                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3152                 vport[i].rss_tuple_sets.ipv6_fragment_en =
3153                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3154
3155                 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3156
3157                 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3158         }
3159
3160         hclge_rss_indir_init_cfg(hdev);
3161 }
3162
3163 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3164                                 int vector_id, bool en,
3165                                 struct hnae3_ring_chain_node *ring_chain)
3166 {
3167         struct hclge_dev *hdev = vport->back;
3168         struct hnae3_ring_chain_node *node;
3169         struct hclge_desc desc;
3170         struct hclge_ctrl_vector_chain_cmd *req
3171                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3172         enum hclge_cmd_status status;
3173         enum hclge_opcode_type op;
3174         u16 tqp_type_and_id;
3175         int i;
3176
3177         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3178         hclge_cmd_setup_basic_desc(&desc, op, false);
3179         req->int_vector_id = vector_id;
3180
3181         i = 0;
3182         for (node = ring_chain; node; node = node->next) {
3183                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3184                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3185                                 HCLGE_INT_TYPE_S,
3186                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3187                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3188                                 HCLGE_TQP_ID_S, node->tqp_index);
3189                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3190                                 HCLGE_INT_GL_IDX_S,
3191                                 hnae3_get_field(node->int_gl_idx,
3192                                                 HNAE3_RING_GL_IDX_M,
3193                                                 HNAE3_RING_GL_IDX_S));
3194                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3195                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3196                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3197                         req->vfid = vport->vport_id;
3198
3199                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
3200                         if (status) {
3201                                 dev_err(&hdev->pdev->dev,
3202                                         "Map TQP fail, status is %d.\n",
3203                                         status);
3204                                 return -EIO;
3205                         }
3206                         i = 0;
3207
3208                         hclge_cmd_setup_basic_desc(&desc,
3209                                                    op,
3210                                                    false);
3211                         req->int_vector_id = vector_id;
3212                 }
3213         }
3214
3215         if (i > 0) {
3216                 req->int_cause_num = i;
3217                 req->vfid = vport->vport_id;
3218                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3219                 if (status) {
3220                         dev_err(&hdev->pdev->dev,
3221                                 "Map TQP fail, status is %d.\n", status);
3222                         return -EIO;
3223                 }
3224         }
3225
3226         return 0;
3227 }
3228
3229 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3230                                     int vector,
3231                                     struct hnae3_ring_chain_node *ring_chain)
3232 {
3233         struct hclge_vport *vport = hclge_get_vport(handle);
3234         struct hclge_dev *hdev = vport->back;
3235         int vector_id;
3236
3237         vector_id = hclge_get_vector_index(hdev, vector);
3238         if (vector_id < 0) {
3239                 dev_err(&hdev->pdev->dev,
3240                         "Get vector index fail. vector_id =%d\n", vector_id);
3241                 return vector_id;
3242         }
3243
3244         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3245 }
3246
3247 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3248                                        int vector,
3249                                        struct hnae3_ring_chain_node *ring_chain)
3250 {
3251         struct hclge_vport *vport = hclge_get_vport(handle);
3252         struct hclge_dev *hdev = vport->back;
3253         int vector_id, ret;
3254
3255         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3256                 return 0;
3257
3258         vector_id = hclge_get_vector_index(hdev, vector);
3259         if (vector_id < 0) {
3260                 dev_err(&handle->pdev->dev,
3261                         "Get vector index fail. ret =%d\n", vector_id);
3262                 return vector_id;
3263         }
3264
3265         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3266         if (ret)
3267                 dev_err(&handle->pdev->dev,
3268                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3269                         vector_id,
3270                         ret);
3271
3272         return ret;
3273 }
3274
3275 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3276                                struct hclge_promisc_param *param)
3277 {
3278         struct hclge_promisc_cfg_cmd *req;
3279         struct hclge_desc desc;
3280         int ret;
3281
3282         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3283
3284         req = (struct hclge_promisc_cfg_cmd *)desc.data;
3285         req->vf_id = param->vf_id;
3286
3287         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3288          * pdev revision(0x20), new revision support them. The
3289          * value of this two fields will not return error when driver
3290          * send command to fireware in revision(0x20).
3291          */
3292         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3293                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3294
3295         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3296         if (ret)
3297                 dev_err(&hdev->pdev->dev,
3298                         "Set promisc mode fail, status is %d.\n", ret);
3299
3300         return ret;
3301 }
3302
3303 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3304                               bool en_mc, bool en_bc, int vport_id)
3305 {
3306         if (!param)
3307                 return;
3308
3309         memset(param, 0, sizeof(struct hclge_promisc_param));
3310         if (en_uc)
3311                 param->enable = HCLGE_PROMISC_EN_UC;
3312         if (en_mc)
3313                 param->enable |= HCLGE_PROMISC_EN_MC;
3314         if (en_bc)
3315                 param->enable |= HCLGE_PROMISC_EN_BC;
3316         param->vf_id = vport_id;
3317 }
3318
3319 static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3320                                    bool en_mc_pmc)
3321 {
3322         struct hclge_vport *vport = hclge_get_vport(handle);
3323         struct hclge_dev *hdev = vport->back;
3324         struct hclge_promisc_param param;
3325
3326         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
3327                                  vport->vport_id);
3328         hclge_cmd_set_promisc_mode(hdev, &param);
3329 }
3330
3331 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3332 {
3333         struct hclge_get_fd_mode_cmd *req;
3334         struct hclge_desc desc;
3335         int ret;
3336
3337         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3338
3339         req = (struct hclge_get_fd_mode_cmd *)desc.data;
3340
3341         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3342         if (ret) {
3343                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3344                 return ret;
3345         }
3346
3347         *fd_mode = req->mode;
3348
3349         return ret;
3350 }
3351
3352 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3353                                    u32 *stage1_entry_num,
3354                                    u32 *stage2_entry_num,
3355                                    u16 *stage1_counter_num,
3356                                    u16 *stage2_counter_num)
3357 {
3358         struct hclge_get_fd_allocation_cmd *req;
3359         struct hclge_desc desc;
3360         int ret;
3361
3362         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3363
3364         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3365
3366         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367         if (ret) {
3368                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3369                         ret);
3370                 return ret;
3371         }
3372
3373         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3374         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3375         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3376         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3377
3378         return ret;
3379 }
3380
3381 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3382 {
3383         struct hclge_set_fd_key_config_cmd *req;
3384         struct hclge_fd_key_cfg *stage;
3385         struct hclge_desc desc;
3386         int ret;
3387
3388         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3389
3390         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3391         stage = &hdev->fd_cfg.key_cfg[stage_num];
3392         req->stage = stage_num;
3393         req->key_select = stage->key_sel;
3394         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3395         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3396         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3397         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3398         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3399         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3400
3401         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3402         if (ret)
3403                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3404
3405         return ret;
3406 }
3407
3408 static int hclge_init_fd_config(struct hclge_dev *hdev)
3409 {
3410 #define LOW_2_WORDS             0x03
3411         struct hclge_fd_key_cfg *key_cfg;
3412         int ret;
3413
3414         if (!hnae3_dev_fd_supported(hdev))
3415                 return 0;
3416
3417         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3418         if (ret)
3419                 return ret;
3420
3421         switch (hdev->fd_cfg.fd_mode) {
3422         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3423                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3424                 break;
3425         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3426                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3427                 break;
3428         default:
3429                 dev_err(&hdev->pdev->dev,
3430                         "Unsupported flow director mode %d\n",
3431                         hdev->fd_cfg.fd_mode);
3432                 return -EOPNOTSUPP;
3433         }
3434
3435         hdev->fd_cfg.fd_en = true;
3436         hdev->fd_cfg.proto_support =
3437                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3438                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3439         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3440         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3441         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3442         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3443         key_cfg->outer_sipv6_word_en = 0;
3444         key_cfg->outer_dipv6_word_en = 0;
3445
3446         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3447                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3448                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3449                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3450
3451         /* If use max 400bit key, we can support tuples for ether type */
3452         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3453                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3454                 key_cfg->tuple_active |=
3455                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3456         }
3457
3458         /* roce_type is used to filter roce frames
3459          * dst_vport is used to specify the rule
3460          */
3461         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3462
3463         ret = hclge_get_fd_allocation(hdev,
3464                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3465                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3466                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3467                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3468         if (ret)
3469                 return ret;
3470
3471         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3472 }
3473
3474 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3475                                 int loc, u8 *key, bool is_add)
3476 {
3477         struct hclge_fd_tcam_config_1_cmd *req1;
3478         struct hclge_fd_tcam_config_2_cmd *req2;
3479         struct hclge_fd_tcam_config_3_cmd *req3;
3480         struct hclge_desc desc[3];
3481         int ret;
3482
3483         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3484         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3485         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3486         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3487         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3488
3489         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3490         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3491         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3492
3493         req1->stage = stage;
3494         req1->xy_sel = sel_x ? 1 : 0;
3495         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3496         req1->index = cpu_to_le32(loc);
3497         req1->entry_vld = sel_x ? is_add : 0;
3498
3499         if (key) {
3500                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3501                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3502                        sizeof(req2->tcam_data));
3503                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3504                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3505         }
3506
3507         ret = hclge_cmd_send(&hdev->hw, desc, 3);
3508         if (ret)
3509                 dev_err(&hdev->pdev->dev,
3510                         "config tcam key fail, ret=%d\n",
3511                         ret);
3512
3513         return ret;
3514 }
3515
3516 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3517                               struct hclge_fd_ad_data *action)
3518 {
3519         struct hclge_fd_ad_config_cmd *req;
3520         struct hclge_desc desc;
3521         u64 ad_data = 0;
3522         int ret;
3523
3524         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3525
3526         req = (struct hclge_fd_ad_config_cmd *)desc.data;
3527         req->index = cpu_to_le32(loc);
3528         req->stage = stage;
3529
3530         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3531                       action->write_rule_id_to_bd);
3532         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3533                         action->rule_id);
3534         ad_data <<= 32;
3535         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3536         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3537                       action->forward_to_direct_queue);
3538         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3539                         action->queue_id);
3540         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3541         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3542                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3543         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3544         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3545                         action->counter_id);
3546
3547         req->ad_data = cpu_to_le64(ad_data);
3548         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3549         if (ret)
3550                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3551
3552         return ret;
3553 }
3554
3555 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3556                                    struct hclge_fd_rule *rule)
3557 {
3558         u16 tmp_x_s, tmp_y_s;
3559         u32 tmp_x_l, tmp_y_l;
3560         int i;
3561
3562         if (rule->unused_tuple & tuple_bit)
3563                 return true;
3564
3565         switch (tuple_bit) {
3566         case 0:
3567                 return false;
3568         case BIT(INNER_DST_MAC):
3569                 for (i = 0; i < 6; i++) {
3570                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
3571                                rule->tuples_mask.dst_mac[i]);
3572                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
3573                                rule->tuples_mask.dst_mac[i]);
3574                 }
3575
3576                 return true;
3577         case BIT(INNER_SRC_MAC):
3578                 for (i = 0; i < 6; i++) {
3579                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
3580                                rule->tuples.src_mac[i]);
3581                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
3582                                rule->tuples.src_mac[i]);
3583                 }
3584
3585                 return true;
3586         case BIT(INNER_VLAN_TAG_FST):
3587                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
3588                        rule->tuples_mask.vlan_tag1);
3589                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
3590                        rule->tuples_mask.vlan_tag1);
3591                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3592                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3593
3594                 return true;
3595         case BIT(INNER_ETH_TYPE):
3596                 calc_x(tmp_x_s, rule->tuples.ether_proto,
3597                        rule->tuples_mask.ether_proto);
3598                 calc_y(tmp_y_s, rule->tuples.ether_proto,
3599                        rule->tuples_mask.ether_proto);
3600                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3601                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3602
3603                 return true;
3604         case BIT(INNER_IP_TOS):
3605                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3606                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3607
3608                 return true;
3609         case BIT(INNER_IP_PROTO):
3610                 calc_x(*key_x, rule->tuples.ip_proto,
3611                        rule->tuples_mask.ip_proto);
3612                 calc_y(*key_y, rule->tuples.ip_proto,
3613                        rule->tuples_mask.ip_proto);
3614
3615                 return true;
3616         case BIT(INNER_SRC_IP):
3617                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
3618                        rule->tuples_mask.src_ip[3]);
3619                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
3620                        rule->tuples_mask.src_ip[3]);
3621                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3622                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3623
3624                 return true;
3625         case BIT(INNER_DST_IP):
3626                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
3627                        rule->tuples_mask.dst_ip[3]);
3628                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
3629                        rule->tuples_mask.dst_ip[3]);
3630                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3631                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3632
3633                 return true;
3634         case BIT(INNER_SRC_PORT):
3635                 calc_x(tmp_x_s, rule->tuples.src_port,
3636                        rule->tuples_mask.src_port);
3637                 calc_y(tmp_y_s, rule->tuples.src_port,
3638                        rule->tuples_mask.src_port);
3639                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3640                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3641
3642                 return true;
3643         case BIT(INNER_DST_PORT):
3644                 calc_x(tmp_x_s, rule->tuples.dst_port,
3645                        rule->tuples_mask.dst_port);
3646                 calc_y(tmp_y_s, rule->tuples.dst_port,
3647                        rule->tuples_mask.dst_port);
3648                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3649                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3650
3651                 return true;
3652         default:
3653                 return false;
3654         }
3655 }
3656
3657 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
3658                                  u8 vf_id, u8 network_port_id)
3659 {
3660         u32 port_number = 0;
3661
3662         if (port_type == HOST_PORT) {
3663                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
3664                                 pf_id);
3665                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
3666                                 vf_id);
3667                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
3668         } else {
3669                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
3670                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
3671                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
3672         }
3673
3674         return port_number;
3675 }
3676
3677 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
3678                                        __le32 *key_x, __le32 *key_y,
3679                                        struct hclge_fd_rule *rule)
3680 {
3681         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
3682         u8 cur_pos = 0, tuple_size, shift_bits;
3683         int i;
3684
3685         for (i = 0; i < MAX_META_DATA; i++) {
3686                 tuple_size = meta_data_key_info[i].key_length;
3687                 tuple_bit = key_cfg->meta_data_active & BIT(i);
3688
3689                 switch (tuple_bit) {
3690                 case BIT(ROCE_TYPE):
3691                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
3692                         cur_pos += tuple_size;
3693                         break;
3694                 case BIT(DST_VPORT):
3695                         port_number = hclge_get_port_number(HOST_PORT, 0,
3696                                                             rule->vf_id, 0);
3697                         hnae3_set_field(meta_data,
3698                                         GENMASK(cur_pos + tuple_size, cur_pos),
3699                                         cur_pos, port_number);
3700                         cur_pos += tuple_size;
3701                         break;
3702                 default:
3703                         break;
3704                 }
3705         }
3706
3707         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
3708         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
3709         shift_bits = sizeof(meta_data) * 8 - cur_pos;
3710
3711         *key_x = cpu_to_le32(tmp_x << shift_bits);
3712         *key_y = cpu_to_le32(tmp_y << shift_bits);
3713 }
3714
3715 /* A complete key is combined with meta data key and tuple key.
3716  * Meta data key is stored at the MSB region, and tuple key is stored at
3717  * the LSB region, unused bits will be filled 0.
3718  */
3719 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
3720                             struct hclge_fd_rule *rule)
3721 {
3722         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
3723         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
3724         u8 *cur_key_x, *cur_key_y;
3725         int i, ret, tuple_size;
3726         u8 meta_data_region;
3727
3728         memset(key_x, 0, sizeof(key_x));
3729         memset(key_y, 0, sizeof(key_y));
3730         cur_key_x = key_x;
3731         cur_key_y = key_y;
3732
3733         for (i = 0 ; i < MAX_TUPLE; i++) {
3734                 bool tuple_valid;
3735                 u32 check_tuple;
3736
3737                 tuple_size = tuple_key_info[i].key_length / 8;
3738                 check_tuple = key_cfg->tuple_active & BIT(i);
3739
3740                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
3741                                                      cur_key_y, rule);
3742                 if (tuple_valid) {
3743                         cur_key_x += tuple_size;
3744                         cur_key_y += tuple_size;
3745                 }
3746         }
3747
3748         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
3749                         MAX_META_DATA_LENGTH / 8;
3750
3751         hclge_fd_convert_meta_data(key_cfg,
3752                                    (__le32 *)(key_x + meta_data_region),
3753                                    (__le32 *)(key_y + meta_data_region),
3754                                    rule);
3755
3756         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
3757                                    true);
3758         if (ret) {
3759                 dev_err(&hdev->pdev->dev,
3760                         "fd key_y config fail, loc=%d, ret=%d\n",
3761                         rule->queue_id, ret);
3762                 return ret;
3763         }
3764
3765         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
3766                                    true);
3767         if (ret)
3768                 dev_err(&hdev->pdev->dev,
3769                         "fd key_x config fail, loc=%d, ret=%d\n",
3770                         rule->queue_id, ret);
3771         return ret;
3772 }
3773
3774 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
3775                                struct hclge_fd_rule *rule)
3776 {
3777         struct hclge_fd_ad_data ad_data;
3778
3779         ad_data.ad_id = rule->location;
3780
3781         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
3782                 ad_data.drop_packet = true;
3783                 ad_data.forward_to_direct_queue = false;
3784                 ad_data.queue_id = 0;
3785         } else {
3786                 ad_data.drop_packet = false;
3787                 ad_data.forward_to_direct_queue = true;
3788                 ad_data.queue_id = rule->queue_id;
3789         }
3790
3791         ad_data.use_counter = false;
3792         ad_data.counter_id = 0;
3793
3794         ad_data.use_next_stage = false;
3795         ad_data.next_input_key = 0;
3796
3797         ad_data.write_rule_id_to_bd = true;
3798         ad_data.rule_id = rule->location;
3799
3800         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
3801 }
3802
3803 static int hclge_fd_check_spec(struct hclge_dev *hdev,
3804                                struct ethtool_rx_flow_spec *fs, u32 *unused)
3805 {
3806         struct ethtool_tcpip4_spec *tcp_ip4_spec;
3807         struct ethtool_usrip4_spec *usr_ip4_spec;
3808         struct ethtool_tcpip6_spec *tcp_ip6_spec;
3809         struct ethtool_usrip6_spec *usr_ip6_spec;
3810         struct ethhdr *ether_spec;
3811
3812         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
3813                 return -EINVAL;
3814
3815         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
3816                 return -EOPNOTSUPP;
3817
3818         if ((fs->flow_type & FLOW_EXT) &&
3819             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
3820                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
3821                 return -EOPNOTSUPP;
3822         }
3823
3824         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
3825         case SCTP_V4_FLOW:
3826         case TCP_V4_FLOW:
3827         case UDP_V4_FLOW:
3828                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
3829                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
3830
3831                 if (!tcp_ip4_spec->ip4src)
3832                         *unused |= BIT(INNER_SRC_IP);
3833
3834                 if (!tcp_ip4_spec->ip4dst)
3835                         *unused |= BIT(INNER_DST_IP);
3836
3837                 if (!tcp_ip4_spec->psrc)
3838                         *unused |= BIT(INNER_SRC_PORT);
3839
3840                 if (!tcp_ip4_spec->pdst)
3841                         *unused |= BIT(INNER_DST_PORT);
3842
3843                 if (!tcp_ip4_spec->tos)
3844                         *unused |= BIT(INNER_IP_TOS);
3845
3846                 break;
3847         case IP_USER_FLOW:
3848                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
3849                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
3850                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3851
3852                 if (!usr_ip4_spec->ip4src)
3853                         *unused |= BIT(INNER_SRC_IP);
3854
3855                 if (!usr_ip4_spec->ip4dst)
3856                         *unused |= BIT(INNER_DST_IP);
3857
3858                 if (!usr_ip4_spec->tos)
3859                         *unused |= BIT(INNER_IP_TOS);
3860
3861                 if (!usr_ip4_spec->proto)
3862                         *unused |= BIT(INNER_IP_PROTO);
3863
3864                 if (usr_ip4_spec->l4_4_bytes)
3865                         return -EOPNOTSUPP;
3866
3867                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
3868                         return -EOPNOTSUPP;
3869
3870                 break;
3871         case SCTP_V6_FLOW:
3872         case TCP_V6_FLOW:
3873         case UDP_V6_FLOW:
3874                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
3875                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
3876                         BIT(INNER_IP_TOS);
3877
3878                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
3879                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
3880                         *unused |= BIT(INNER_SRC_IP);
3881
3882                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
3883                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
3884                         *unused |= BIT(INNER_DST_IP);
3885
3886                 if (!tcp_ip6_spec->psrc)
3887                         *unused |= BIT(INNER_SRC_PORT);
3888
3889                 if (!tcp_ip6_spec->pdst)
3890                         *unused |= BIT(INNER_DST_PORT);
3891
3892                 if (tcp_ip6_spec->tclass)
3893                         return -EOPNOTSUPP;
3894
3895                 break;
3896         case IPV6_USER_FLOW:
3897                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
3898                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
3899                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
3900                         BIT(INNER_DST_PORT);
3901
3902                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
3903                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
3904                         *unused |= BIT(INNER_SRC_IP);
3905
3906                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
3907                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
3908                         *unused |= BIT(INNER_DST_IP);
3909
3910                 if (!usr_ip6_spec->l4_proto)
3911                         *unused |= BIT(INNER_IP_PROTO);
3912
3913                 if (usr_ip6_spec->tclass)
3914                         return -EOPNOTSUPP;
3915
3916                 if (usr_ip6_spec->l4_4_bytes)
3917                         return -EOPNOTSUPP;
3918
3919                 break;
3920         case ETHER_FLOW:
3921                 ether_spec = &fs->h_u.ether_spec;
3922                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3923                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
3924                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
3925
3926                 if (is_zero_ether_addr(ether_spec->h_source))
3927                         *unused |= BIT(INNER_SRC_MAC);
3928
3929                 if (is_zero_ether_addr(ether_spec->h_dest))
3930                         *unused |= BIT(INNER_DST_MAC);
3931
3932                 if (!ether_spec->h_proto)
3933                         *unused |= BIT(INNER_ETH_TYPE);
3934
3935                 break;
3936         default:
3937                 return -EOPNOTSUPP;
3938         }
3939
3940         if ((fs->flow_type & FLOW_EXT)) {
3941                 if (fs->h_ext.vlan_etype)
3942                         return -EOPNOTSUPP;
3943                 if (!fs->h_ext.vlan_tci)
3944                         *unused |= BIT(INNER_VLAN_TAG_FST);
3945
3946                 if (fs->m_ext.vlan_tci) {
3947                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
3948                                 return -EINVAL;
3949                 }
3950         } else {
3951                 *unused |= BIT(INNER_VLAN_TAG_FST);
3952         }
3953
3954         if (fs->flow_type & FLOW_MAC_EXT) {
3955                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
3956                         return -EOPNOTSUPP;
3957
3958                 if (is_zero_ether_addr(fs->h_ext.h_dest))
3959                         *unused |= BIT(INNER_DST_MAC);
3960                 else
3961                         *unused &= ~(BIT(INNER_DST_MAC));
3962         }
3963
3964         return 0;
3965 }
3966
3967 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
3968 {
3969         struct hclge_fd_rule *rule = NULL;
3970         struct hlist_node *node2;
3971
3972         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
3973                 if (rule->location >= location)
3974                         break;
3975         }
3976
3977         return  rule && rule->location == location;
3978 }
3979
3980 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
3981                                      struct hclge_fd_rule *new_rule,
3982                                      u16 location,
3983                                      bool is_add)
3984 {
3985         struct hclge_fd_rule *rule = NULL, *parent = NULL;
3986         struct hlist_node *node2;
3987
3988         if (is_add && !new_rule)
3989                 return -EINVAL;
3990
3991         hlist_for_each_entry_safe(rule, node2,
3992                                   &hdev->fd_rule_list, rule_node) {
3993                 if (rule->location >= location)
3994                         break;
3995                 parent = rule;
3996         }
3997
3998         if (rule && rule->location == location) {
3999                 hlist_del(&rule->rule_node);
4000                 kfree(rule);
4001                 hdev->hclge_fd_rule_num--;
4002
4003                 if (!is_add)
4004                         return 0;
4005
4006         } else if (!is_add) {
4007                 dev_err(&hdev->pdev->dev,
4008                         "delete fail, rule %d is inexistent\n",
4009                         location);
4010                 return -EINVAL;
4011         }
4012
4013         INIT_HLIST_NODE(&new_rule->rule_node);
4014
4015         if (parent)
4016                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4017         else
4018                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4019
4020         hdev->hclge_fd_rule_num++;
4021
4022         return 0;
4023 }
4024
4025 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4026                               struct ethtool_rx_flow_spec *fs,
4027                               struct hclge_fd_rule *rule)
4028 {
4029         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4030
4031         switch (flow_type) {
4032         case SCTP_V4_FLOW:
4033         case TCP_V4_FLOW:
4034         case UDP_V4_FLOW:
4035                 rule->tuples.src_ip[3] =
4036                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4037                 rule->tuples_mask.src_ip[3] =
4038                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4039
4040                 rule->tuples.dst_ip[3] =
4041                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4042                 rule->tuples_mask.dst_ip[3] =
4043                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4044
4045                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4046                 rule->tuples_mask.src_port =
4047                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4048
4049                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4050                 rule->tuples_mask.dst_port =
4051                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4052
4053                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4054                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4055
4056                 rule->tuples.ether_proto = ETH_P_IP;
4057                 rule->tuples_mask.ether_proto = 0xFFFF;
4058
4059                 break;
4060         case IP_USER_FLOW:
4061                 rule->tuples.src_ip[3] =
4062                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4063                 rule->tuples_mask.src_ip[3] =
4064                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4065
4066                 rule->tuples.dst_ip[3] =
4067                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4068                 rule->tuples_mask.dst_ip[3] =
4069                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4070
4071                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4072                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4073
4074                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4075                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4076
4077                 rule->tuples.ether_proto = ETH_P_IP;
4078                 rule->tuples_mask.ether_proto = 0xFFFF;
4079
4080                 break;
4081         case SCTP_V6_FLOW:
4082         case TCP_V6_FLOW:
4083         case UDP_V6_FLOW:
4084                 be32_to_cpu_array(rule->tuples.src_ip,
4085                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
4086                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4087                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
4088
4089                 be32_to_cpu_array(rule->tuples.dst_ip,
4090                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
4091                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4092                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
4093
4094                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4095                 rule->tuples_mask.src_port =
4096                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4097
4098                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4099                 rule->tuples_mask.dst_port =
4100                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4101
4102                 rule->tuples.ether_proto = ETH_P_IPV6;
4103                 rule->tuples_mask.ether_proto = 0xFFFF;
4104
4105                 break;
4106         case IPV6_USER_FLOW:
4107                 be32_to_cpu_array(rule->tuples.src_ip,
4108                                   fs->h_u.usr_ip6_spec.ip6src, 4);
4109                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4110                                   fs->m_u.usr_ip6_spec.ip6src, 4);
4111
4112                 be32_to_cpu_array(rule->tuples.dst_ip,
4113                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
4114                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4115                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
4116
4117                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4118                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4119
4120                 rule->tuples.ether_proto = ETH_P_IPV6;
4121                 rule->tuples_mask.ether_proto = 0xFFFF;
4122
4123                 break;
4124         case ETHER_FLOW:
4125                 ether_addr_copy(rule->tuples.src_mac,
4126                                 fs->h_u.ether_spec.h_source);
4127                 ether_addr_copy(rule->tuples_mask.src_mac,
4128                                 fs->m_u.ether_spec.h_source);
4129
4130                 ether_addr_copy(rule->tuples.dst_mac,
4131                                 fs->h_u.ether_spec.h_dest);
4132                 ether_addr_copy(rule->tuples_mask.dst_mac,
4133                                 fs->m_u.ether_spec.h_dest);
4134
4135                 rule->tuples.ether_proto =
4136                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4137                 rule->tuples_mask.ether_proto =
4138                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4139
4140                 break;
4141         default:
4142                 return -EOPNOTSUPP;
4143         }
4144
4145         switch (flow_type) {
4146         case SCTP_V4_FLOW:
4147         case SCTP_V6_FLOW:
4148                 rule->tuples.ip_proto = IPPROTO_SCTP;
4149                 rule->tuples_mask.ip_proto = 0xFF;
4150                 break;
4151         case TCP_V4_FLOW:
4152         case TCP_V6_FLOW:
4153                 rule->tuples.ip_proto = IPPROTO_TCP;
4154                 rule->tuples_mask.ip_proto = 0xFF;
4155                 break;
4156         case UDP_V4_FLOW:
4157         case UDP_V6_FLOW:
4158                 rule->tuples.ip_proto = IPPROTO_UDP;
4159                 rule->tuples_mask.ip_proto = 0xFF;
4160                 break;
4161         default:
4162                 break;
4163         }
4164
4165         if ((fs->flow_type & FLOW_EXT)) {
4166                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4167                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4168         }
4169
4170         if (fs->flow_type & FLOW_MAC_EXT) {
4171                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4172                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4173         }
4174
4175         return 0;
4176 }
4177
4178 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4179                               struct ethtool_rxnfc *cmd)
4180 {
4181         struct hclge_vport *vport = hclge_get_vport(handle);
4182         struct hclge_dev *hdev = vport->back;
4183         u16 dst_vport_id = 0, q_index = 0;
4184         struct ethtool_rx_flow_spec *fs;
4185         struct hclge_fd_rule *rule;
4186         u32 unused = 0;
4187         u8 action;
4188         int ret;
4189
4190         if (!hnae3_dev_fd_supported(hdev))
4191                 return -EOPNOTSUPP;
4192
4193         if (!hdev->fd_cfg.fd_en) {
4194                 dev_warn(&hdev->pdev->dev,
4195                          "Please enable flow director first\n");
4196                 return -EOPNOTSUPP;
4197         }
4198
4199         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4200
4201         ret = hclge_fd_check_spec(hdev, fs, &unused);
4202         if (ret) {
4203                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4204                 return ret;
4205         }
4206
4207         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4208                 action = HCLGE_FD_ACTION_DROP_PACKET;
4209         } else {
4210                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4211                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4212                 u16 tqps;
4213
4214                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4215                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4216
4217                 if (ring >= tqps) {
4218                         dev_err(&hdev->pdev->dev,
4219                                 "Error: queue id (%d) > max tqp num (%d)\n",
4220                                 ring, tqps - 1);
4221                         return -EINVAL;
4222                 }
4223
4224                 if (vf > hdev->num_req_vfs) {
4225                         dev_err(&hdev->pdev->dev,
4226                                 "Error: vf id (%d) > max vf num (%d)\n",
4227                                 vf, hdev->num_req_vfs);
4228                         return -EINVAL;
4229                 }
4230
4231                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4232                 q_index = ring;
4233         }
4234
4235         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4236         if (!rule)
4237                 return -ENOMEM;
4238
4239         ret = hclge_fd_get_tuple(hdev, fs, rule);
4240         if (ret)
4241                 goto free_rule;
4242
4243         rule->flow_type = fs->flow_type;
4244
4245         rule->location = fs->location;
4246         rule->unused_tuple = unused;
4247         rule->vf_id = dst_vport_id;
4248         rule->queue_id = q_index;
4249         rule->action = action;
4250
4251         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4252         if (ret)
4253                 goto free_rule;
4254
4255         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4256         if (ret)
4257                 goto free_rule;
4258
4259         ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4260         if (ret)
4261                 goto free_rule;
4262
4263         return ret;
4264
4265 free_rule:
4266         kfree(rule);
4267         return ret;
4268 }
4269
4270 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4271                               struct ethtool_rxnfc *cmd)
4272 {
4273         struct hclge_vport *vport = hclge_get_vport(handle);
4274         struct hclge_dev *hdev = vport->back;
4275         struct ethtool_rx_flow_spec *fs;
4276         int ret;
4277
4278         if (!hnae3_dev_fd_supported(hdev))
4279                 return -EOPNOTSUPP;
4280
4281         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4282
4283         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4284                 return -EINVAL;
4285
4286         if (!hclge_fd_rule_exist(hdev, fs->location)) {
4287                 dev_err(&hdev->pdev->dev,
4288                         "Delete fail, rule %d is inexistent\n",
4289                         fs->location);
4290                 return -ENOENT;
4291         }
4292
4293         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4294                                    fs->location, NULL, false);
4295         if (ret)
4296                 return ret;
4297
4298         return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4299                                          false);
4300 }
4301
4302 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
4303 {
4304         struct hclge_desc desc;
4305         struct hclge_config_mac_mode_cmd *req =
4306                 (struct hclge_config_mac_mode_cmd *)desc.data;
4307         u32 loop_en = 0;
4308         int ret;
4309
4310         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
4311         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
4312         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
4313         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
4314         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
4315         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
4316         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
4317         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
4318         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
4319         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
4320         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
4321         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
4322         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
4323         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
4324         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
4325         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
4326
4327         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4328         if (ret)
4329                 dev_err(&hdev->pdev->dev,
4330                         "mac enable fail, ret =%d.\n", ret);
4331 }
4332
4333 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
4334 {
4335         struct hclge_config_mac_mode_cmd *req;
4336         struct hclge_desc desc;
4337         u32 loop_en;
4338         int ret;
4339
4340         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
4341         /* 1 Read out the MAC mode config at first */
4342         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
4343         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4344         if (ret) {
4345                 dev_err(&hdev->pdev->dev,
4346                         "mac loopback get fail, ret =%d.\n", ret);
4347                 return ret;
4348         }
4349
4350         /* 2 Then setup the loopback flag */
4351         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
4352         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
4353         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
4354         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
4355
4356         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
4357
4358         /* 3 Config mac work mode with loopback flag
4359          * and its original configure parameters
4360          */
4361         hclge_cmd_reuse_desc(&desc, false);
4362         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4363         if (ret)
4364                 dev_err(&hdev->pdev->dev,
4365                         "mac loopback set fail, ret =%d.\n", ret);
4366         return ret;
4367 }
4368
4369 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
4370                                      enum hnae3_loop loop_mode)
4371 {
4372 #define HCLGE_SERDES_RETRY_MS   10
4373 #define HCLGE_SERDES_RETRY_NUM  100
4374         struct hclge_serdes_lb_cmd *req;
4375         struct hclge_desc desc;
4376         int ret, i = 0;
4377         u8 loop_mode_b;
4378
4379         req = (struct hclge_serdes_lb_cmd *)desc.data;
4380         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
4381
4382         switch (loop_mode) {
4383         case HNAE3_LOOP_SERIAL_SERDES:
4384                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
4385                 break;
4386         case HNAE3_LOOP_PARALLEL_SERDES:
4387                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
4388                 break;
4389         default:
4390                 dev_err(&hdev->pdev->dev,
4391                         "unsupported serdes loopback mode %d\n", loop_mode);
4392                 return -ENOTSUPP;
4393         }
4394
4395         if (en) {
4396                 req->enable = loop_mode_b;
4397                 req->mask = loop_mode_b;
4398         } else {
4399                 req->mask = loop_mode_b;
4400         }
4401
4402         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4403         if (ret) {
4404                 dev_err(&hdev->pdev->dev,
4405                         "serdes loopback set fail, ret = %d\n", ret);
4406                 return ret;
4407         }
4408
4409         do {
4410                 msleep(HCLGE_SERDES_RETRY_MS);
4411                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
4412                                            true);
4413                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4414                 if (ret) {
4415                         dev_err(&hdev->pdev->dev,
4416                                 "serdes loopback get, ret = %d\n", ret);
4417                         return ret;
4418                 }
4419         } while (++i < HCLGE_SERDES_RETRY_NUM &&
4420                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
4421
4422         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
4423                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
4424                 return -EBUSY;
4425         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
4426                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
4427                 return -EIO;
4428         }
4429
4430         hclge_cfg_mac_mode(hdev, en);
4431         return 0;
4432 }
4433
4434 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
4435                             int stream_id, bool enable)
4436 {
4437         struct hclge_desc desc;
4438         struct hclge_cfg_com_tqp_queue_cmd *req =
4439                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
4440         int ret;
4441
4442         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
4443         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
4444         req->stream_id = cpu_to_le16(stream_id);
4445         req->enable |= enable << HCLGE_TQP_ENABLE_B;
4446
4447         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4448         if (ret)
4449                 dev_err(&hdev->pdev->dev,
4450                         "Tqp enable fail, status =%d.\n", ret);
4451         return ret;
4452 }
4453
4454 static int hclge_set_loopback(struct hnae3_handle *handle,
4455                               enum hnae3_loop loop_mode, bool en)
4456 {
4457         struct hclge_vport *vport = hclge_get_vport(handle);
4458         struct hclge_dev *hdev = vport->back;
4459         int i, ret;
4460
4461         switch (loop_mode) {
4462         case HNAE3_LOOP_APP:
4463                 ret = hclge_set_app_loopback(hdev, en);
4464                 break;
4465         case HNAE3_LOOP_SERIAL_SERDES:
4466         case HNAE3_LOOP_PARALLEL_SERDES:
4467                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
4468                 break;
4469         default:
4470                 ret = -ENOTSUPP;
4471                 dev_err(&hdev->pdev->dev,
4472                         "loop_mode %d is not supported\n", loop_mode);
4473                 break;
4474         }
4475
4476         for (i = 0; i < vport->alloc_tqps; i++) {
4477                 ret = hclge_tqp_enable(hdev, i, 0, en);
4478                 if (ret)
4479                         return ret;
4480         }
4481
4482         return 0;
4483 }
4484
4485 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
4486 {
4487         struct hclge_vport *vport = hclge_get_vport(handle);
4488         struct hnae3_queue *queue;
4489         struct hclge_tqp *tqp;
4490         int i;
4491
4492         for (i = 0; i < vport->alloc_tqps; i++) {
4493                 queue = handle->kinfo.tqp[i];
4494                 tqp = container_of(queue, struct hclge_tqp, q);
4495                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
4496         }
4497 }
4498
4499 static int hclge_ae_start(struct hnae3_handle *handle)
4500 {
4501         struct hclge_vport *vport = hclge_get_vport(handle);
4502         struct hclge_dev *hdev = vport->back;
4503         int i;
4504
4505         for (i = 0; i < vport->alloc_tqps; i++)
4506                 hclge_tqp_enable(hdev, i, 0, true);
4507
4508         /* mac enable */
4509         hclge_cfg_mac_mode(hdev, true);
4510         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
4511         mod_timer(&hdev->service_timer, jiffies + HZ);
4512         hdev->hw.mac.link = 0;
4513
4514         /* reset tqp stats */
4515         hclge_reset_tqp_stats(handle);
4516
4517         hclge_mac_start_phy(hdev);
4518
4519         return 0;
4520 }
4521
4522 static void hclge_ae_stop(struct hnae3_handle *handle)
4523 {
4524         struct hclge_vport *vport = hclge_get_vport(handle);
4525         struct hclge_dev *hdev = vport->back;
4526         int i;
4527
4528         set_bit(HCLGE_STATE_DOWN, &hdev->state);
4529
4530         del_timer_sync(&hdev->service_timer);
4531         cancel_work_sync(&hdev->service_task);
4532         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
4533
4534         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
4535                 hclge_mac_stop_phy(hdev);
4536                 return;
4537         }
4538
4539         for (i = 0; i < vport->alloc_tqps; i++)
4540                 hclge_tqp_enable(hdev, i, 0, false);
4541
4542         /* Mac disable */
4543         hclge_cfg_mac_mode(hdev, false);
4544
4545         hclge_mac_stop_phy(hdev);
4546
4547         /* reset tqp stats */
4548         hclge_reset_tqp_stats(handle);
4549         del_timer_sync(&hdev->service_timer);
4550         cancel_work_sync(&hdev->service_task);
4551         hclge_update_link_status(hdev);
4552 }
4553
4554 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
4555                                          u16 cmdq_resp, u8  resp_code,
4556                                          enum hclge_mac_vlan_tbl_opcode op)
4557 {
4558         struct hclge_dev *hdev = vport->back;
4559         int return_status = -EIO;
4560
4561         if (cmdq_resp) {
4562                 dev_err(&hdev->pdev->dev,
4563                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
4564                         cmdq_resp);
4565                 return -EIO;
4566         }
4567
4568         if (op == HCLGE_MAC_VLAN_ADD) {
4569                 if ((!resp_code) || (resp_code == 1)) {
4570                         return_status = 0;
4571                 } else if (resp_code == 2) {
4572                         return_status = -ENOSPC;
4573                         dev_err(&hdev->pdev->dev,
4574                                 "add mac addr failed for uc_overflow.\n");
4575                 } else if (resp_code == 3) {
4576                         return_status = -ENOSPC;
4577                         dev_err(&hdev->pdev->dev,
4578                                 "add mac addr failed for mc_overflow.\n");
4579                 } else {
4580                         dev_err(&hdev->pdev->dev,
4581                                 "add mac addr failed for undefined, code=%d.\n",
4582                                 resp_code);
4583                 }
4584         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
4585                 if (!resp_code) {
4586                         return_status = 0;
4587                 } else if (resp_code == 1) {
4588                         return_status = -ENOENT;
4589                         dev_dbg(&hdev->pdev->dev,
4590                                 "remove mac addr failed for miss.\n");
4591                 } else {
4592                         dev_err(&hdev->pdev->dev,
4593                                 "remove mac addr failed for undefined, code=%d.\n",
4594                                 resp_code);
4595                 }
4596         } else if (op == HCLGE_MAC_VLAN_LKUP) {
4597                 if (!resp_code) {
4598                         return_status = 0;
4599                 } else if (resp_code == 1) {
4600                         return_status = -ENOENT;
4601                         dev_dbg(&hdev->pdev->dev,
4602                                 "lookup mac addr failed for miss.\n");
4603                 } else {
4604                         dev_err(&hdev->pdev->dev,
4605                                 "lookup mac addr failed for undefined, code=%d.\n",
4606                                 resp_code);
4607                 }
4608         } else {
4609                 return_status = -EINVAL;
4610                 dev_err(&hdev->pdev->dev,
4611                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
4612                         op);
4613         }
4614
4615         return return_status;
4616 }
4617
4618 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
4619 {
4620         int word_num;
4621         int bit_num;
4622
4623         if (vfid > 255 || vfid < 0)
4624                 return -EIO;
4625
4626         if (vfid >= 0 && vfid <= 191) {
4627                 word_num = vfid / 32;
4628                 bit_num  = vfid % 32;
4629                 if (clr)
4630                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
4631                 else
4632                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
4633         } else {
4634                 word_num = (vfid - 192) / 32;
4635                 bit_num  = vfid % 32;
4636                 if (clr)
4637                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
4638                 else
4639                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
4640         }
4641
4642         return 0;
4643 }
4644
4645 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
4646 {
4647 #define HCLGE_DESC_NUMBER 3
4648 #define HCLGE_FUNC_NUMBER_PER_DESC 6
4649         int i, j;
4650
4651         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
4652                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
4653                         if (desc[i].data[j])
4654                                 return false;
4655
4656         return true;
4657 }
4658
4659 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
4660                                    const u8 *addr)
4661 {
4662         const unsigned char *mac_addr = addr;
4663         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
4664                        (mac_addr[0]) | (mac_addr[1] << 8);
4665         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
4666
4667         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
4668         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
4669 }
4670
4671 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
4672                                            const u8 *addr)
4673 {
4674         u16 high_val = addr[1] | (addr[0] << 8);
4675         struct hclge_dev *hdev = vport->back;
4676         u32 rsh = 4 - hdev->mta_mac_sel_type;
4677         u16 ret_val = (high_val >> rsh) & 0xfff;
4678
4679         return ret_val;
4680 }
4681
4682 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
4683                                      enum hclge_mta_dmac_sel_type mta_mac_sel,
4684                                      bool enable)
4685 {
4686         struct hclge_mta_filter_mode_cmd *req;
4687         struct hclge_desc desc;
4688         int ret;
4689
4690         req = (struct hclge_mta_filter_mode_cmd *)desc.data;
4691         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
4692
4693         hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
4694                       enable);
4695         hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
4696                         HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
4697
4698         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4699         if (ret)
4700                 dev_err(&hdev->pdev->dev,
4701                         "Config mat filter mode failed for cmd_send, ret =%d.\n",
4702                         ret);
4703
4704         return ret;
4705 }
4706
4707 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
4708                               u8 func_id,
4709                               bool enable)
4710 {
4711         struct hclge_cfg_func_mta_filter_cmd *req;
4712         struct hclge_desc desc;
4713         int ret;
4714
4715         req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
4716         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
4717
4718         hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
4719                       enable);
4720         req->function_id = func_id;
4721
4722         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4723         if (ret)
4724                 dev_err(&hdev->pdev->dev,
4725                         "Config func_id enable failed for cmd_send, ret =%d.\n",
4726                         ret);
4727
4728         return ret;
4729 }
4730
4731 static int hclge_set_mta_table_item(struct hclge_vport *vport,
4732                                     u16 idx,
4733                                     bool enable)
4734 {
4735         struct hclge_dev *hdev = vport->back;
4736         struct hclge_cfg_func_mta_item_cmd *req;
4737         struct hclge_desc desc;
4738         u16 item_idx = 0;
4739         int ret;
4740
4741         req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
4742         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
4743         hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
4744
4745         hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
4746                         HCLGE_CFG_MTA_ITEM_IDX_S, idx);
4747         req->item_idx = cpu_to_le16(item_idx);
4748
4749         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4750         if (ret) {
4751                 dev_err(&hdev->pdev->dev,
4752                         "Config mta table item failed for cmd_send, ret =%d.\n",
4753                         ret);
4754                 return ret;
4755         }
4756
4757         if (enable)
4758                 set_bit(idx, vport->mta_shadow);
4759         else
4760                 clear_bit(idx, vport->mta_shadow);
4761
4762         return 0;
4763 }
4764
4765 static int hclge_update_mta_status(struct hnae3_handle *handle)
4766 {
4767         unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
4768         struct hclge_vport *vport = hclge_get_vport(handle);
4769         struct net_device *netdev = handle->kinfo.netdev;
4770         struct netdev_hw_addr *ha;
4771         u16 tbl_idx;
4772
4773         memset(mta_status, 0, sizeof(mta_status));
4774
4775         /* update mta_status from mc addr list */
4776         netdev_for_each_mc_addr(ha, netdev) {
4777                 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
4778                 set_bit(tbl_idx, mta_status);
4779         }
4780
4781         return hclge_update_mta_status_common(vport, mta_status,
4782                                         0, HCLGE_MTA_TBL_SIZE, true);
4783 }
4784
4785 int hclge_update_mta_status_common(struct hclge_vport *vport,
4786                                    unsigned long *status,
4787                                    u16 idx,
4788                                    u16 count,
4789                                    bool update_filter)
4790 {
4791         struct hclge_dev *hdev = vport->back;
4792         u16 update_max = idx + count;
4793         u16 check_max;
4794         int ret = 0;
4795         bool used;
4796         u16 i;
4797
4798         /* setup mta check range */
4799         if (update_filter) {
4800                 i = 0;
4801                 check_max = HCLGE_MTA_TBL_SIZE;
4802         } else {
4803                 i = idx;
4804                 check_max = update_max;
4805         }
4806
4807         used = false;
4808         /* check and update all mta item */
4809         for (; i < check_max; i++) {
4810                 /* ignore unused item */
4811                 if (!test_bit(i, vport->mta_shadow))
4812                         continue;
4813
4814                 /* if i in update range then update it */
4815                 if (i >= idx && i < update_max)
4816                         if (!test_bit(i - idx, status))
4817                                 hclge_set_mta_table_item(vport, i, false);
4818
4819                 if (!used && test_bit(i, vport->mta_shadow))
4820                         used = true;
4821         }
4822
4823         /* no longer use mta, disable it */
4824         if (vport->accept_mta_mc && update_filter && !used) {
4825                 ret = hclge_cfg_func_mta_filter(hdev,
4826                                                 vport->vport_id,
4827                                                 false);
4828                 if (ret)
4829                         dev_err(&hdev->pdev->dev,
4830                                 "disable func mta filter fail ret=%d\n",
4831                                 ret);
4832                 else
4833                         vport->accept_mta_mc = false;
4834         }
4835
4836         return ret;
4837 }
4838
4839 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
4840                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
4841 {
4842         struct hclge_dev *hdev = vport->back;
4843         struct hclge_desc desc;
4844         u8 resp_code;
4845         u16 retval;
4846         int ret;
4847
4848         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
4849
4850         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4851
4852         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4853         if (ret) {
4854                 dev_err(&hdev->pdev->dev,
4855                         "del mac addr failed for cmd_send, ret =%d.\n",
4856                         ret);
4857                 return ret;
4858         }
4859         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4860         retval = le16_to_cpu(desc.retval);
4861
4862         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4863                                              HCLGE_MAC_VLAN_REMOVE);
4864 }
4865
4866 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
4867                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
4868                                      struct hclge_desc *desc,
4869                                      bool is_mc)
4870 {
4871         struct hclge_dev *hdev = vport->back;
4872         u8 resp_code;
4873         u16 retval;
4874         int ret;
4875
4876         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
4877         if (is_mc) {
4878                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4879                 memcpy(desc[0].data,
4880                        req,
4881                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4882                 hclge_cmd_setup_basic_desc(&desc[1],
4883                                            HCLGE_OPC_MAC_VLAN_ADD,
4884                                            true);
4885                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4886                 hclge_cmd_setup_basic_desc(&desc[2],
4887                                            HCLGE_OPC_MAC_VLAN_ADD,
4888                                            true);
4889                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4890         } else {
4891                 memcpy(desc[0].data,
4892                        req,
4893                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4894                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
4895         }
4896         if (ret) {
4897                 dev_err(&hdev->pdev->dev,
4898                         "lookup mac addr failed for cmd_send, ret =%d.\n",
4899                         ret);
4900                 return ret;
4901         }
4902         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
4903         retval = le16_to_cpu(desc[0].retval);
4904
4905         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4906                                              HCLGE_MAC_VLAN_LKUP);
4907 }
4908
4909 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
4910                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
4911                                   struct hclge_desc *mc_desc)
4912 {
4913         struct hclge_dev *hdev = vport->back;
4914         int cfg_status;
4915         u8 resp_code;
4916         u16 retval;
4917         int ret;
4918
4919         if (!mc_desc) {
4920                 struct hclge_desc desc;
4921
4922                 hclge_cmd_setup_basic_desc(&desc,
4923                                            HCLGE_OPC_MAC_VLAN_ADD,
4924                                            false);
4925                 memcpy(desc.data, req,
4926                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4927                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4928                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4929                 retval = le16_to_cpu(desc.retval);
4930
4931                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4932                                                            resp_code,
4933                                                            HCLGE_MAC_VLAN_ADD);
4934         } else {
4935                 hclge_cmd_reuse_desc(&mc_desc[0], false);
4936                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4937                 hclge_cmd_reuse_desc(&mc_desc[1], false);
4938                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4939                 hclge_cmd_reuse_desc(&mc_desc[2], false);
4940                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
4941                 memcpy(mc_desc[0].data, req,
4942                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4943                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
4944                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
4945                 retval = le16_to_cpu(mc_desc[0].retval);
4946
4947                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4948                                                            resp_code,
4949                                                            HCLGE_MAC_VLAN_ADD);
4950         }
4951
4952         if (ret) {
4953                 dev_err(&hdev->pdev->dev,
4954                         "add mac addr failed for cmd_send, ret =%d.\n",
4955                         ret);
4956                 return ret;
4957         }
4958
4959         return cfg_status;
4960 }
4961
4962 static int hclge_add_uc_addr(struct hnae3_handle *handle,
4963                              const unsigned char *addr)
4964 {
4965         struct hclge_vport *vport = hclge_get_vport(handle);
4966
4967         return hclge_add_uc_addr_common(vport, addr);
4968 }
4969
4970 int hclge_add_uc_addr_common(struct hclge_vport *vport,
4971                              const unsigned char *addr)
4972 {
4973         struct hclge_dev *hdev = vport->back;
4974         struct hclge_mac_vlan_tbl_entry_cmd req;
4975         struct hclge_desc desc;
4976         u16 egress_port = 0;
4977         int ret;
4978
4979         /* mac addr check */
4980         if (is_zero_ether_addr(addr) ||
4981             is_broadcast_ether_addr(addr) ||
4982             is_multicast_ether_addr(addr)) {
4983                 dev_err(&hdev->pdev->dev,
4984                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4985                          addr,
4986                          is_zero_ether_addr(addr),
4987                          is_broadcast_ether_addr(addr),
4988                          is_multicast_ether_addr(addr));
4989                 return -EINVAL;
4990         }
4991
4992         memset(&req, 0, sizeof(req));
4993         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4994
4995         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
4996                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
4997
4998         req.egress_port = cpu_to_le16(egress_port);
4999
5000         hclge_prepare_mac_addr(&req, addr);
5001
5002         /* Lookup the mac address in the mac_vlan table, and add
5003          * it if the entry is inexistent. Repeated unicast entry
5004          * is not allowed in the mac vlan table.
5005          */
5006         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5007         if (ret == -ENOENT)
5008                 return hclge_add_mac_vlan_tbl(vport, &req, NULL);
5009
5010         /* check if we just hit the duplicate */
5011         if (!ret)
5012                 ret = -EINVAL;
5013
5014         dev_err(&hdev->pdev->dev,
5015                 "PF failed to add unicast entry(%pM) in the MAC table\n",
5016                 addr);
5017
5018         return ret;
5019 }
5020
5021 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5022                             const unsigned char *addr)
5023 {
5024         struct hclge_vport *vport = hclge_get_vport(handle);
5025
5026         return hclge_rm_uc_addr_common(vport, addr);
5027 }
5028
5029 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5030                             const unsigned char *addr)
5031 {
5032         struct hclge_dev *hdev = vport->back;
5033         struct hclge_mac_vlan_tbl_entry_cmd req;
5034         int ret;
5035
5036         /* mac addr check */
5037         if (is_zero_ether_addr(addr) ||
5038             is_broadcast_ether_addr(addr) ||
5039             is_multicast_ether_addr(addr)) {
5040                 dev_dbg(&hdev->pdev->dev,
5041                         "Remove mac err! invalid mac:%pM.\n",
5042                          addr);
5043                 return -EINVAL;
5044         }
5045
5046         memset(&req, 0, sizeof(req));
5047         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5048         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5049         hclge_prepare_mac_addr(&req, addr);
5050         ret = hclge_remove_mac_vlan_tbl(vport, &req);
5051
5052         return ret;
5053 }
5054
5055 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5056                              const unsigned char *addr)
5057 {
5058         struct hclge_vport *vport = hclge_get_vport(handle);
5059
5060         return hclge_add_mc_addr_common(vport, addr);
5061 }
5062
5063 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5064                              const unsigned char *addr)
5065 {
5066         struct hclge_dev *hdev = vport->back;
5067         struct hclge_mac_vlan_tbl_entry_cmd req;
5068         struct hclge_desc desc[3];
5069         u16 tbl_idx;
5070         int status;
5071
5072         /* mac addr check */
5073         if (!is_multicast_ether_addr(addr)) {
5074                 dev_err(&hdev->pdev->dev,
5075                         "Add mc mac err! invalid mac:%pM.\n",
5076                          addr);
5077                 return -EINVAL;
5078         }
5079         memset(&req, 0, sizeof(req));
5080         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5081         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5082         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5083         hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5084         hclge_prepare_mac_addr(&req, addr);
5085         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5086         if (!status) {
5087                 /* This mac addr exist, update VFID for it */
5088                 hclge_update_desc_vfid(desc, vport->vport_id, false);
5089                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5090         } else {
5091                 /* This mac addr do not exist, add new entry for it */
5092                 memset(desc[0].data, 0, sizeof(desc[0].data));
5093                 memset(desc[1].data, 0, sizeof(desc[0].data));
5094                 memset(desc[2].data, 0, sizeof(desc[0].data));
5095                 hclge_update_desc_vfid(desc, vport->vport_id, false);
5096                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5097         }
5098
5099         /* If mc mac vlan table is full, use MTA table */
5100         if (status == -ENOSPC) {
5101                 if (!vport->accept_mta_mc) {
5102                         status = hclge_cfg_func_mta_filter(hdev,
5103                                                            vport->vport_id,
5104                                                            true);
5105                         if (status) {
5106                                 dev_err(&hdev->pdev->dev,
5107                                         "set mta filter mode fail ret=%d\n",
5108                                         status);
5109                                 return status;
5110                         }
5111                         vport->accept_mta_mc = true;
5112                 }
5113
5114                 /* Set MTA table for this MAC address */
5115                 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
5116                 status = hclge_set_mta_table_item(vport, tbl_idx, true);
5117         }
5118
5119         return status;
5120 }
5121
5122 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5123                             const unsigned char *addr)
5124 {
5125         struct hclge_vport *vport = hclge_get_vport(handle);
5126
5127         return hclge_rm_mc_addr_common(vport, addr);
5128 }
5129
5130 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5131                             const unsigned char *addr)
5132 {
5133         struct hclge_dev *hdev = vport->back;
5134         struct hclge_mac_vlan_tbl_entry_cmd req;
5135         enum hclge_cmd_status status;
5136         struct hclge_desc desc[3];
5137
5138         /* mac addr check */
5139         if (!is_multicast_ether_addr(addr)) {
5140                 dev_dbg(&hdev->pdev->dev,
5141                         "Remove mc mac err! invalid mac:%pM.\n",
5142                          addr);
5143                 return -EINVAL;
5144         }
5145
5146         memset(&req, 0, sizeof(req));
5147         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5148         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5149         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5150         hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5151         hclge_prepare_mac_addr(&req, addr);
5152         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5153         if (!status) {
5154                 /* This mac addr exist, remove this handle's VFID for it */
5155                 hclge_update_desc_vfid(desc, vport->vport_id, true);
5156
5157                 if (hclge_is_all_function_id_zero(desc))
5158                         /* All the vfid is zero, so need to delete this entry */
5159                         status = hclge_remove_mac_vlan_tbl(vport, &req);
5160                 else
5161                         /* Not all the vfid is zero, update the vfid */
5162                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5163
5164         } else {
5165                 /* Maybe this mac address is in mta table, but it cannot be
5166                  * deleted here because an entry of mta represents an address
5167                  * range rather than a specific address. the delete action to
5168                  * all entries will take effect in update_mta_status called by
5169                  * hns3_nic_set_rx_mode.
5170                  */
5171                 status = 0;
5172         }
5173
5174         return status;
5175 }
5176
5177 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5178                                               u16 cmdq_resp, u8 resp_code)
5179 {
5180 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
5181 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
5182 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
5183 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
5184
5185         int return_status;
5186
5187         if (cmdq_resp) {
5188                 dev_err(&hdev->pdev->dev,
5189                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5190                         cmdq_resp);
5191                 return -EIO;
5192         }
5193
5194         switch (resp_code) {
5195         case HCLGE_ETHERTYPE_SUCCESS_ADD:
5196         case HCLGE_ETHERTYPE_ALREADY_ADD:
5197                 return_status = 0;
5198                 break;
5199         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5200                 dev_err(&hdev->pdev->dev,
5201                         "add mac ethertype failed for manager table overflow.\n");
5202                 return_status = -EIO;
5203                 break;
5204         case HCLGE_ETHERTYPE_KEY_CONFLICT:
5205                 dev_err(&hdev->pdev->dev,
5206                         "add mac ethertype failed for key conflict.\n");
5207                 return_status = -EIO;
5208                 break;
5209         default:
5210                 dev_err(&hdev->pdev->dev,
5211                         "add mac ethertype failed for undefined, code=%d.\n",
5212                         resp_code);
5213                 return_status = -EIO;
5214         }
5215
5216         return return_status;
5217 }
5218
5219 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5220                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
5221 {
5222         struct hclge_desc desc;
5223         u8 resp_code;
5224         u16 retval;
5225         int ret;
5226
5227         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5228         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5229
5230         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5231         if (ret) {
5232                 dev_err(&hdev->pdev->dev,
5233                         "add mac ethertype failed for cmd_send, ret =%d.\n",
5234                         ret);
5235                 return ret;
5236         }
5237
5238         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5239         retval = le16_to_cpu(desc.retval);
5240
5241         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
5242 }
5243
5244 static int init_mgr_tbl(struct hclge_dev *hdev)
5245 {
5246         int ret;
5247         int i;
5248
5249         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
5250                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
5251                 if (ret) {
5252                         dev_err(&hdev->pdev->dev,
5253                                 "add mac ethertype failed, ret =%d.\n",
5254                                 ret);
5255                         return ret;
5256                 }
5257         }
5258
5259         return 0;
5260 }
5261
5262 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
5263 {
5264         struct hclge_vport *vport = hclge_get_vport(handle);
5265         struct hclge_dev *hdev = vport->back;
5266
5267         ether_addr_copy(p, hdev->hw.mac.mac_addr);
5268 }
5269
5270 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
5271                               bool is_first)
5272 {
5273         const unsigned char *new_addr = (const unsigned char *)p;
5274         struct hclge_vport *vport = hclge_get_vport(handle);
5275         struct hclge_dev *hdev = vport->back;
5276         int ret;
5277
5278         /* mac addr check */
5279         if (is_zero_ether_addr(new_addr) ||
5280             is_broadcast_ether_addr(new_addr) ||
5281             is_multicast_ether_addr(new_addr)) {
5282                 dev_err(&hdev->pdev->dev,
5283                         "Change uc mac err! invalid mac:%p.\n",
5284                          new_addr);
5285                 return -EINVAL;
5286         }
5287
5288         if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
5289                 dev_warn(&hdev->pdev->dev,
5290                          "remove old uc mac address fail.\n");
5291
5292         ret = hclge_add_uc_addr(handle, new_addr);
5293         if (ret) {
5294                 dev_err(&hdev->pdev->dev,
5295                         "add uc mac address fail, ret =%d.\n",
5296                         ret);
5297
5298                 if (!is_first &&
5299                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
5300                         dev_err(&hdev->pdev->dev,
5301                                 "restore uc mac address fail.\n");
5302
5303                 return -EIO;
5304         }
5305
5306         ret = hclge_pause_addr_cfg(hdev, new_addr);
5307         if (ret) {
5308                 dev_err(&hdev->pdev->dev,
5309                         "configure mac pause address fail, ret =%d.\n",
5310                         ret);
5311                 return -EIO;
5312         }
5313
5314         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
5315
5316         return 0;
5317 }
5318
5319 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
5320                           int cmd)
5321 {
5322         struct hclge_vport *vport = hclge_get_vport(handle);
5323         struct hclge_dev *hdev = vport->back;
5324
5325         if (!hdev->hw.mac.phydev)
5326                 return -EOPNOTSUPP;
5327
5328         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
5329 }
5330
5331 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
5332                                       bool filter_en)
5333 {
5334         struct hclge_vlan_filter_ctrl_cmd *req;
5335         struct hclge_desc desc;
5336         int ret;
5337
5338         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
5339
5340         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
5341         req->vlan_type = vlan_type;
5342         req->vlan_fe = filter_en;
5343
5344         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5345         if (ret)
5346                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
5347                         ret);
5348
5349         return ret;
5350 }
5351
5352 #define HCLGE_FILTER_TYPE_VF            0
5353 #define HCLGE_FILTER_TYPE_PORT          1
5354
5355 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
5356 {
5357         struct hclge_vport *vport = hclge_get_vport(handle);
5358         struct hclge_dev *hdev = vport->back;
5359
5360         hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
5361 }
5362
5363 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
5364                                     bool is_kill, u16 vlan, u8 qos,
5365                                     __be16 proto)
5366 {
5367 #define HCLGE_MAX_VF_BYTES  16
5368         struct hclge_vlan_filter_vf_cfg_cmd *req0;
5369         struct hclge_vlan_filter_vf_cfg_cmd *req1;
5370         struct hclge_desc desc[2];
5371         u8 vf_byte_val;
5372         u8 vf_byte_off;
5373         int ret;
5374
5375         hclge_cmd_setup_basic_desc(&desc[0],
5376                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
5377         hclge_cmd_setup_basic_desc(&desc[1],
5378                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
5379
5380         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5381
5382         vf_byte_off = vfid / 8;
5383         vf_byte_val = 1 << (vfid % 8);
5384
5385         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
5386         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
5387
5388         req0->vlan_id  = cpu_to_le16(vlan);
5389         req0->vlan_cfg = is_kill;
5390
5391         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
5392                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
5393         else
5394                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
5395
5396         ret = hclge_cmd_send(&hdev->hw, desc, 2);
5397         if (ret) {
5398                 dev_err(&hdev->pdev->dev,
5399                         "Send vf vlan command fail, ret =%d.\n",
5400                         ret);
5401                 return ret;
5402         }
5403
5404         if (!is_kill) {
5405 #define HCLGE_VF_VLAN_NO_ENTRY  2
5406                 if (!req0->resp_code || req0->resp_code == 1)
5407                         return 0;
5408
5409                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
5410                         dev_warn(&hdev->pdev->dev,
5411                                  "vf vlan table is full, vf vlan filter is disabled\n");
5412                         return 0;
5413                 }
5414
5415                 dev_err(&hdev->pdev->dev,
5416                         "Add vf vlan filter fail, ret =%d.\n",
5417                         req0->resp_code);
5418         } else {
5419 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
5420                 if (!req0->resp_code)
5421                         return 0;
5422
5423                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
5424                         dev_warn(&hdev->pdev->dev,
5425                                  "vlan %d filter is not in vf vlan table\n",
5426                                  vlan);
5427                         return 0;
5428                 }
5429
5430                 dev_err(&hdev->pdev->dev,
5431                         "Kill vf vlan filter fail, ret =%d.\n",
5432                         req0->resp_code);
5433         }
5434
5435         return -EIO;
5436 }
5437
5438 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
5439                                       u16 vlan_id, bool is_kill)
5440 {
5441         struct hclge_vlan_filter_pf_cfg_cmd *req;
5442         struct hclge_desc desc;
5443         u8 vlan_offset_byte_val;
5444         u8 vlan_offset_byte;
5445         u8 vlan_offset_160;
5446         int ret;
5447
5448         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
5449
5450         vlan_offset_160 = vlan_id / 160;
5451         vlan_offset_byte = (vlan_id % 160) / 8;
5452         vlan_offset_byte_val = 1 << (vlan_id % 8);
5453
5454         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
5455         req->vlan_offset = vlan_offset_160;
5456         req->vlan_cfg = is_kill;
5457         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
5458
5459         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5460         if (ret)
5461                 dev_err(&hdev->pdev->dev,
5462                         "port vlan command, send fail, ret =%d.\n", ret);
5463         return ret;
5464 }
5465
5466 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
5467                                     u16 vport_id, u16 vlan_id, u8 qos,
5468                                     bool is_kill)
5469 {
5470         u16 vport_idx, vport_num = 0;
5471         int ret;
5472
5473         if (is_kill && !vlan_id)
5474                 return 0;
5475
5476         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
5477                                        0, proto);
5478         if (ret) {
5479                 dev_err(&hdev->pdev->dev,
5480                         "Set %d vport vlan filter config fail, ret =%d.\n",
5481                         vport_id, ret);
5482                 return ret;
5483         }
5484
5485         /* vlan 0 may be added twice when 8021q module is enabled */
5486         if (!is_kill && !vlan_id &&
5487             test_bit(vport_id, hdev->vlan_table[vlan_id]))
5488                 return 0;
5489
5490         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
5491                 dev_err(&hdev->pdev->dev,
5492                         "Add port vlan failed, vport %d is already in vlan %d\n",
5493                         vport_id, vlan_id);
5494                 return -EINVAL;
5495         }
5496
5497         if (is_kill &&
5498             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
5499                 dev_err(&hdev->pdev->dev,
5500                         "Delete port vlan failed, vport %d is not in vlan %d\n",
5501                         vport_id, vlan_id);
5502                 return -EINVAL;
5503         }
5504
5505         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
5506                 vport_num++;
5507
5508         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
5509                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
5510                                                  is_kill);
5511
5512         return ret;
5513 }
5514
5515 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
5516                           u16 vlan_id, bool is_kill)
5517 {
5518         struct hclge_vport *vport = hclge_get_vport(handle);
5519         struct hclge_dev *hdev = vport->back;
5520
5521         return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
5522                                         0, is_kill);
5523 }
5524
5525 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
5526                                     u16 vlan, u8 qos, __be16 proto)
5527 {
5528         struct hclge_vport *vport = hclge_get_vport(handle);
5529         struct hclge_dev *hdev = vport->back;
5530
5531         if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
5532                 return -EINVAL;
5533         if (proto != htons(ETH_P_8021Q))
5534                 return -EPROTONOSUPPORT;
5535
5536         return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
5537 }
5538
5539 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
5540 {
5541         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
5542         struct hclge_vport_vtag_tx_cfg_cmd *req;
5543         struct hclge_dev *hdev = vport->back;
5544         struct hclge_desc desc;
5545         int status;
5546
5547         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
5548
5549         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
5550         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
5551         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
5552         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
5553                       vcfg->accept_tag1 ? 1 : 0);
5554         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
5555                       vcfg->accept_untag1 ? 1 : 0);
5556         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
5557                       vcfg->accept_tag2 ? 1 : 0);
5558         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
5559                       vcfg->accept_untag2 ? 1 : 0);
5560         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
5561                       vcfg->insert_tag1_en ? 1 : 0);
5562         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
5563                       vcfg->insert_tag2_en ? 1 : 0);
5564         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5565
5566         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
5567         req->vf_bitmap[req->vf_offset] =
5568                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5569
5570         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5571         if (status)
5572                 dev_err(&hdev->pdev->dev,
5573                         "Send port txvlan cfg command fail, ret =%d\n",
5574                         status);
5575
5576         return status;
5577 }
5578
5579 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
5580 {
5581         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
5582         struct hclge_vport_vtag_rx_cfg_cmd *req;
5583         struct hclge_dev *hdev = vport->back;
5584         struct hclge_desc desc;
5585         int status;
5586
5587         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
5588
5589         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
5590         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
5591                       vcfg->strip_tag1_en ? 1 : 0);
5592         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
5593                       vcfg->strip_tag2_en ? 1 : 0);
5594         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
5595                       vcfg->vlan1_vlan_prionly ? 1 : 0);
5596         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
5597                       vcfg->vlan2_vlan_prionly ? 1 : 0);
5598
5599         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
5600         req->vf_bitmap[req->vf_offset] =
5601                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5602
5603         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5604         if (status)
5605                 dev_err(&hdev->pdev->dev,
5606                         "Send port rxvlan cfg command fail, ret =%d\n",
5607                         status);
5608
5609         return status;
5610 }
5611
5612 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
5613 {
5614         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
5615         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
5616         struct hclge_desc desc;
5617         int status;
5618
5619         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
5620         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
5621         rx_req->ot_fst_vlan_type =
5622                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
5623         rx_req->ot_sec_vlan_type =
5624                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
5625         rx_req->in_fst_vlan_type =
5626                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
5627         rx_req->in_sec_vlan_type =
5628                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
5629
5630         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5631         if (status) {
5632                 dev_err(&hdev->pdev->dev,
5633                         "Send rxvlan protocol type command fail, ret =%d\n",
5634                         status);
5635                 return status;
5636         }
5637
5638         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
5639
5640         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5641         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
5642         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
5643
5644         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5645         if (status)
5646                 dev_err(&hdev->pdev->dev,
5647                         "Send txvlan protocol type command fail, ret =%d\n",
5648                         status);
5649
5650         return status;
5651 }
5652
5653 static int hclge_init_vlan_config(struct hclge_dev *hdev)
5654 {
5655 #define HCLGE_DEF_VLAN_TYPE             0x8100
5656
5657         struct hnae3_handle *handle;
5658         struct hclge_vport *vport;
5659         int ret;
5660         int i;
5661
5662         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
5663         if (ret)
5664                 return ret;
5665
5666         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
5667         if (ret)
5668                 return ret;
5669
5670         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
5671         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
5672         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
5673         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
5674         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
5675         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
5676
5677         ret = hclge_set_vlan_protocol_type(hdev);
5678         if (ret)
5679                 return ret;
5680
5681         for (i = 0; i < hdev->num_alloc_vport; i++) {
5682                 vport = &hdev->vport[i];
5683                 vport->txvlan_cfg.accept_tag1 = true;
5684                 vport->txvlan_cfg.accept_untag1 = true;
5685
5686                 /* accept_tag2 and accept_untag2 are not supported on
5687                  * pdev revision(0x20), new revision support them. The
5688                  * value of this two fields will not return error when driver
5689                  * send command to fireware in revision(0x20).
5690                  * This two fields can not configured by user.
5691                  */
5692                 vport->txvlan_cfg.accept_tag2 = true;
5693                 vport->txvlan_cfg.accept_untag2 = true;
5694
5695                 vport->txvlan_cfg.insert_tag1_en = false;
5696                 vport->txvlan_cfg.insert_tag2_en = false;
5697                 vport->txvlan_cfg.default_tag1 = 0;
5698                 vport->txvlan_cfg.default_tag2 = 0;
5699
5700                 ret = hclge_set_vlan_tx_offload_cfg(vport);
5701                 if (ret)
5702                         return ret;
5703
5704                 vport->rxvlan_cfg.strip_tag1_en = false;
5705                 vport->rxvlan_cfg.strip_tag2_en = true;
5706                 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
5707                 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
5708
5709                 ret = hclge_set_vlan_rx_offload_cfg(vport);
5710                 if (ret)
5711                         return ret;
5712         }
5713
5714         handle = &hdev->vport[0].nic;
5715         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
5716 }
5717
5718 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
5719 {
5720         struct hclge_vport *vport = hclge_get_vport(handle);
5721
5722         vport->rxvlan_cfg.strip_tag1_en = false;
5723         vport->rxvlan_cfg.strip_tag2_en = enable;
5724         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
5725         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
5726
5727         return hclge_set_vlan_rx_offload_cfg(vport);
5728 }
5729
5730 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
5731 {
5732         struct hclge_config_max_frm_size_cmd *req;
5733         struct hclge_desc desc;
5734         int max_frm_size;
5735         int ret;
5736
5737         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5738
5739         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
5740             max_frm_size > HCLGE_MAC_MAX_FRAME)
5741                 return -EINVAL;
5742
5743         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
5744
5745         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
5746
5747         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
5748         req->max_frm_size = cpu_to_le16(max_frm_size);
5749         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
5750
5751         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5752         if (ret)
5753                 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
5754         else
5755                 hdev->mps = max_frm_size;
5756
5757         return ret;
5758 }
5759
5760 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
5761 {
5762         struct hclge_vport *vport = hclge_get_vport(handle);
5763         struct hclge_dev *hdev = vport->back;
5764         int ret;
5765
5766         ret = hclge_set_mac_mtu(hdev, new_mtu);
5767         if (ret) {
5768                 dev_err(&hdev->pdev->dev,
5769                         "Change mtu fail, ret =%d\n", ret);
5770                 return ret;
5771         }
5772
5773         ret = hclge_buffer_alloc(hdev);
5774         if (ret)
5775                 dev_err(&hdev->pdev->dev,
5776                         "Allocate buffer fail, ret =%d\n", ret);
5777
5778         return ret;
5779 }
5780
5781 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
5782                                     bool enable)
5783 {
5784         struct hclge_reset_tqp_queue_cmd *req;
5785         struct hclge_desc desc;
5786         int ret;
5787
5788         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
5789
5790         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
5791         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5792         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
5793
5794         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5795         if (ret) {
5796                 dev_err(&hdev->pdev->dev,
5797                         "Send tqp reset cmd error, status =%d\n", ret);
5798                 return ret;
5799         }
5800
5801         return 0;
5802 }
5803
5804 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
5805 {
5806         struct hclge_reset_tqp_queue_cmd *req;
5807         struct hclge_desc desc;
5808         int ret;
5809
5810         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
5811
5812         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
5813         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5814
5815         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5816         if (ret) {
5817                 dev_err(&hdev->pdev->dev,
5818                         "Get reset status error, status =%d\n", ret);
5819                 return ret;
5820         }
5821
5822         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
5823 }
5824
5825 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
5826                                           u16 queue_id)
5827 {
5828         struct hnae3_queue *queue;
5829         struct hclge_tqp *tqp;
5830
5831         queue = handle->kinfo.tqp[queue_id];
5832         tqp = container_of(queue, struct hclge_tqp, q);
5833
5834         return tqp->index;
5835 }
5836
5837 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
5838 {
5839         struct hclge_vport *vport = hclge_get_vport(handle);
5840         struct hclge_dev *hdev = vport->back;
5841         int reset_try_times = 0;
5842         int reset_status;
5843         u16 queue_gid;
5844         int ret;
5845
5846         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5847                 return;
5848
5849         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
5850
5851         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
5852         if (ret) {
5853                 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
5854                 return;
5855         }
5856
5857         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5858         if (ret) {
5859                 dev_warn(&hdev->pdev->dev,
5860                          "Send reset tqp cmd fail, ret = %d\n", ret);
5861                 return;
5862         }
5863
5864         reset_try_times = 0;
5865         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5866                 /* Wait for tqp hw reset */
5867                 msleep(20);
5868                 reset_status = hclge_get_reset_status(hdev, queue_gid);
5869                 if (reset_status)
5870                         break;
5871         }
5872
5873         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5874                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5875                 return;
5876         }
5877
5878         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5879         if (ret) {
5880                 dev_warn(&hdev->pdev->dev,
5881                          "Deassert the soft reset fail, ret = %d\n", ret);
5882                 return;
5883         }
5884 }
5885
5886 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
5887 {
5888         struct hclge_dev *hdev = vport->back;
5889         int reset_try_times = 0;
5890         int reset_status;
5891         u16 queue_gid;
5892         int ret;
5893
5894         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
5895
5896         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5897         if (ret) {
5898                 dev_warn(&hdev->pdev->dev,
5899                          "Send reset tqp cmd fail, ret = %d\n", ret);
5900                 return;
5901         }
5902
5903         reset_try_times = 0;
5904         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5905                 /* Wait for tqp hw reset */
5906                 msleep(20);
5907                 reset_status = hclge_get_reset_status(hdev, queue_gid);
5908                 if (reset_status)
5909                         break;
5910         }
5911
5912         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5913                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5914                 return;
5915         }
5916
5917         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5918         if (ret)
5919                 dev_warn(&hdev->pdev->dev,
5920                          "Deassert the soft reset fail, ret = %d\n", ret);
5921 }
5922
5923 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
5924 {
5925         struct hclge_vport *vport = hclge_get_vport(handle);
5926         struct hclge_dev *hdev = vport->back;
5927
5928         return hdev->fw_version;
5929 }
5930
5931 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5932 {
5933         struct phy_device *phydev = hdev->hw.mac.phydev;
5934
5935         if (!phydev)
5936                 return;
5937
5938         phy_set_asym_pause(phydev, rx_en, tx_en);
5939 }
5940
5941 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5942 {
5943         int ret;
5944
5945         if (rx_en && tx_en)
5946                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
5947         else if (rx_en && !tx_en)
5948                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
5949         else if (!rx_en && tx_en)
5950                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
5951         else
5952                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
5953
5954         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
5955                 return 0;
5956
5957         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
5958         if (ret) {
5959                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
5960                         ret);
5961                 return ret;
5962         }
5963
5964         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
5965
5966         return 0;
5967 }
5968
5969 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
5970 {
5971         struct phy_device *phydev = hdev->hw.mac.phydev;
5972         u16 remote_advertising = 0;
5973         u16 local_advertising = 0;
5974         u32 rx_pause, tx_pause;
5975         u8 flowctl;
5976
5977         if (!phydev->link || !phydev->autoneg)
5978                 return 0;
5979
5980         local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
5981
5982         if (phydev->pause)
5983                 remote_advertising = LPA_PAUSE_CAP;
5984
5985         if (phydev->asym_pause)
5986                 remote_advertising |= LPA_PAUSE_ASYM;
5987
5988         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
5989                                            remote_advertising);
5990         tx_pause = flowctl & FLOW_CTRL_TX;
5991         rx_pause = flowctl & FLOW_CTRL_RX;
5992
5993         if (phydev->duplex == HCLGE_MAC_HALF) {
5994                 tx_pause = 0;
5995                 rx_pause = 0;
5996         }
5997
5998         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
5999 }
6000
6001 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6002                                  u32 *rx_en, u32 *tx_en)
6003 {
6004         struct hclge_vport *vport = hclge_get_vport(handle);
6005         struct hclge_dev *hdev = vport->back;
6006
6007         *auto_neg = hclge_get_autoneg(handle);
6008
6009         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6010                 *rx_en = 0;
6011                 *tx_en = 0;
6012                 return;
6013         }
6014
6015         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6016                 *rx_en = 1;
6017                 *tx_en = 0;
6018         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6019                 *tx_en = 1;
6020                 *rx_en = 0;
6021         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6022                 *rx_en = 1;
6023                 *tx_en = 1;
6024         } else {
6025                 *rx_en = 0;
6026                 *tx_en = 0;
6027         }
6028 }
6029
6030 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6031                                 u32 rx_en, u32 tx_en)
6032 {
6033         struct hclge_vport *vport = hclge_get_vport(handle);
6034         struct hclge_dev *hdev = vport->back;
6035         struct phy_device *phydev = hdev->hw.mac.phydev;
6036         u32 fc_autoneg;
6037
6038         fc_autoneg = hclge_get_autoneg(handle);
6039         if (auto_neg != fc_autoneg) {
6040                 dev_info(&hdev->pdev->dev,
6041                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6042                 return -EOPNOTSUPP;
6043         }
6044
6045         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6046                 dev_info(&hdev->pdev->dev,
6047                          "Priority flow control enabled. Cannot set link flow control.\n");
6048                 return -EOPNOTSUPP;
6049         }
6050
6051         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6052
6053         if (!fc_autoneg)
6054                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6055
6056         /* Only support flow control negotiation for netdev with
6057          * phy attached for now.
6058          */
6059         if (!phydev)
6060                 return -EOPNOTSUPP;
6061
6062         return phy_start_aneg(phydev);
6063 }
6064
6065 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6066                                           u8 *auto_neg, u32 *speed, u8 *duplex)
6067 {
6068         struct hclge_vport *vport = hclge_get_vport(handle);
6069         struct hclge_dev *hdev = vport->back;
6070
6071         if (speed)
6072                 *speed = hdev->hw.mac.speed;
6073         if (duplex)
6074                 *duplex = hdev->hw.mac.duplex;
6075         if (auto_neg)
6076                 *auto_neg = hdev->hw.mac.autoneg;
6077 }
6078
6079 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6080 {
6081         struct hclge_vport *vport = hclge_get_vport(handle);
6082         struct hclge_dev *hdev = vport->back;
6083
6084         if (media_type)
6085                 *media_type = hdev->hw.mac.media_type;
6086 }
6087
6088 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6089                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
6090 {
6091         struct hclge_vport *vport = hclge_get_vport(handle);
6092         struct hclge_dev *hdev = vport->back;
6093         struct phy_device *phydev = hdev->hw.mac.phydev;
6094         int mdix_ctrl, mdix, retval, is_resolved;
6095
6096         if (!phydev) {
6097                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6098                 *tp_mdix = ETH_TP_MDI_INVALID;
6099                 return;
6100         }
6101
6102         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6103
6104         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6105         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6106                                     HCLGE_PHY_MDIX_CTRL_S);
6107
6108         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6109         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6110         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6111
6112         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6113
6114         switch (mdix_ctrl) {
6115         case 0x0:
6116                 *tp_mdix_ctrl = ETH_TP_MDI;
6117                 break;
6118         case 0x1:
6119                 *tp_mdix_ctrl = ETH_TP_MDI_X;
6120                 break;
6121         case 0x3:
6122                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6123                 break;
6124         default:
6125                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6126                 break;
6127         }
6128
6129         if (!is_resolved)
6130                 *tp_mdix = ETH_TP_MDI_INVALID;
6131         else if (mdix)
6132                 *tp_mdix = ETH_TP_MDI_X;
6133         else
6134                 *tp_mdix = ETH_TP_MDI;
6135 }
6136
6137 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6138 {
6139         return hclge_mac_connect_phy(hdev);
6140 }
6141
6142 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6143 {
6144         hclge_mac_disconnect_phy(hdev);
6145 }
6146
6147 static int hclge_init_client_instance(struct hnae3_client *client,
6148                                       struct hnae3_ae_dev *ae_dev)
6149 {
6150         struct hclge_dev *hdev = ae_dev->priv;
6151         struct hclge_vport *vport;
6152         int i, ret;
6153
6154         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
6155                 vport = &hdev->vport[i];
6156
6157                 switch (client->type) {
6158                 case HNAE3_CLIENT_KNIC:
6159
6160                         hdev->nic_client = client;
6161                         vport->nic.client = client;
6162                         ret = client->ops->init_instance(&vport->nic);
6163                         if (ret)
6164                                 goto clear_nic;
6165
6166                         ret = hclge_init_instance_hw(hdev);
6167                         if (ret) {
6168                                 client->ops->uninit_instance(&vport->nic,
6169                                                              0);
6170                                 goto clear_nic;
6171                         }
6172
6173                         hnae3_set_client_init_flag(client, ae_dev, 1);
6174
6175                         if (hdev->roce_client &&
6176                             hnae3_dev_roce_supported(hdev)) {
6177                                 struct hnae3_client *rc = hdev->roce_client;
6178
6179                                 ret = hclge_init_roce_base_info(vport);
6180                                 if (ret)
6181                                         goto clear_roce;
6182
6183                                 ret = rc->ops->init_instance(&vport->roce);
6184                                 if (ret)
6185                                         goto clear_roce;
6186
6187                                 hnae3_set_client_init_flag(hdev->roce_client,
6188                                                            ae_dev, 1);
6189                         }
6190
6191                         break;
6192                 case HNAE3_CLIENT_UNIC:
6193                         hdev->nic_client = client;
6194                         vport->nic.client = client;
6195
6196                         ret = client->ops->init_instance(&vport->nic);
6197                         if (ret)
6198                                 goto clear_nic;
6199
6200                         hnae3_set_client_init_flag(client, ae_dev, 1);
6201
6202                         break;
6203                 case HNAE3_CLIENT_ROCE:
6204                         if (hnae3_dev_roce_supported(hdev)) {
6205                                 hdev->roce_client = client;
6206                                 vport->roce.client = client;
6207                         }
6208
6209                         if (hdev->roce_client && hdev->nic_client) {
6210                                 ret = hclge_init_roce_base_info(vport);
6211                                 if (ret)
6212                                         goto clear_roce;
6213
6214                                 ret = client->ops->init_instance(&vport->roce);
6215                                 if (ret)
6216                                         goto clear_roce;
6217
6218                                 hnae3_set_client_init_flag(client, ae_dev, 1);
6219                         }
6220
6221                         break;
6222                 default:
6223                         return -EINVAL;
6224                 }
6225         }
6226
6227         return 0;
6228
6229 clear_nic:
6230         hdev->nic_client = NULL;
6231         vport->nic.client = NULL;
6232         return ret;
6233 clear_roce:
6234         hdev->roce_client = NULL;
6235         vport->roce.client = NULL;
6236         return ret;
6237 }
6238
6239 static void hclge_uninit_client_instance(struct hnae3_client *client,
6240                                          struct hnae3_ae_dev *ae_dev)
6241 {
6242         struct hclge_dev *hdev = ae_dev->priv;
6243         struct hclge_vport *vport;
6244         int i;
6245
6246         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6247                 vport = &hdev->vport[i];
6248                 if (hdev->roce_client) {
6249                         hdev->roce_client->ops->uninit_instance(&vport->roce,
6250                                                                 0);
6251                         hdev->roce_client = NULL;
6252                         vport->roce.client = NULL;
6253                 }
6254                 if (client->type == HNAE3_CLIENT_ROCE)
6255                         return;
6256                 if (hdev->nic_client && client->ops->uninit_instance) {
6257                         hclge_uninit_instance_hw(hdev);
6258                         client->ops->uninit_instance(&vport->nic, 0);
6259                         hdev->nic_client = NULL;
6260                         vport->nic.client = NULL;
6261                 }
6262         }
6263 }
6264
6265 static int hclge_pci_init(struct hclge_dev *hdev)
6266 {
6267         struct pci_dev *pdev = hdev->pdev;
6268         struct hclge_hw *hw;
6269         int ret;
6270
6271         ret = pci_enable_device(pdev);
6272         if (ret) {
6273                 dev_err(&pdev->dev, "failed to enable PCI device\n");
6274                 return ret;
6275         }
6276
6277         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6278         if (ret) {
6279                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6280                 if (ret) {
6281                         dev_err(&pdev->dev,
6282                                 "can't set consistent PCI DMA");
6283                         goto err_disable_device;
6284                 }
6285                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
6286         }
6287
6288         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
6289         if (ret) {
6290                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
6291                 goto err_disable_device;
6292         }
6293
6294         pci_set_master(pdev);
6295         hw = &hdev->hw;
6296         hw->io_base = pcim_iomap(pdev, 2, 0);
6297         if (!hw->io_base) {
6298                 dev_err(&pdev->dev, "Can't map configuration register space\n");
6299                 ret = -ENOMEM;
6300                 goto err_clr_master;
6301         }
6302
6303         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
6304
6305         return 0;
6306 err_clr_master:
6307         pci_clear_master(pdev);
6308         pci_release_regions(pdev);
6309 err_disable_device:
6310         pci_disable_device(pdev);
6311
6312         return ret;
6313 }
6314
6315 static void hclge_pci_uninit(struct hclge_dev *hdev)
6316 {
6317         struct pci_dev *pdev = hdev->pdev;
6318
6319         pcim_iounmap(pdev, hdev->hw.io_base);
6320         pci_free_irq_vectors(pdev);
6321         pci_clear_master(pdev);
6322         pci_release_mem_regions(pdev);
6323         pci_disable_device(pdev);
6324 }
6325
6326 static void hclge_state_init(struct hclge_dev *hdev)
6327 {
6328         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
6329         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6330         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
6331         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6332         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
6333         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
6334 }
6335
6336 static void hclge_state_uninit(struct hclge_dev *hdev)
6337 {
6338         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6339
6340         if (hdev->service_timer.function)
6341                 del_timer_sync(&hdev->service_timer);
6342         if (hdev->service_task.func)
6343                 cancel_work_sync(&hdev->service_task);
6344         if (hdev->rst_service_task.func)
6345                 cancel_work_sync(&hdev->rst_service_task);
6346         if (hdev->mbx_service_task.func)
6347                 cancel_work_sync(&hdev->mbx_service_task);
6348 }
6349
6350 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
6351 {
6352         struct pci_dev *pdev = ae_dev->pdev;
6353         struct hclge_dev *hdev;
6354         int ret;
6355
6356         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
6357         if (!hdev) {
6358                 ret = -ENOMEM;
6359                 goto out;
6360         }
6361
6362         hdev->pdev = pdev;
6363         hdev->ae_dev = ae_dev;
6364         hdev->reset_type = HNAE3_NONE_RESET;
6365         ae_dev->priv = hdev;
6366
6367         ret = hclge_pci_init(hdev);
6368         if (ret) {
6369                 dev_err(&pdev->dev, "PCI init failed\n");
6370                 goto out;
6371         }
6372
6373         /* Firmware command queue initialize */
6374         ret = hclge_cmd_queue_init(hdev);
6375         if (ret) {
6376                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
6377                 goto err_pci_uninit;
6378         }
6379
6380         /* Firmware command initialize */
6381         ret = hclge_cmd_init(hdev);
6382         if (ret)
6383                 goto err_cmd_uninit;
6384
6385         ret = hclge_get_cap(hdev);
6386         if (ret) {
6387                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
6388                         ret);
6389                 goto err_cmd_uninit;
6390         }
6391
6392         ret = hclge_configure(hdev);
6393         if (ret) {
6394                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
6395                 goto err_cmd_uninit;
6396         }
6397
6398         ret = hclge_init_msi(hdev);
6399         if (ret) {
6400                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
6401                 goto err_cmd_uninit;
6402         }
6403
6404         ret = hclge_misc_irq_init(hdev);
6405         if (ret) {
6406                 dev_err(&pdev->dev,
6407                         "Misc IRQ(vector0) init error, ret = %d.\n",
6408                         ret);
6409                 goto err_msi_uninit;
6410         }
6411
6412         ret = hclge_alloc_tqps(hdev);
6413         if (ret) {
6414                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
6415                 goto err_msi_irq_uninit;
6416         }
6417
6418         ret = hclge_alloc_vport(hdev);
6419         if (ret) {
6420                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
6421                 goto err_msi_irq_uninit;
6422         }
6423
6424         ret = hclge_map_tqp(hdev);
6425         if (ret) {
6426                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
6427                 goto err_msi_irq_uninit;
6428         }
6429
6430         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
6431                 ret = hclge_mac_mdio_config(hdev);
6432                 if (ret) {
6433                         dev_err(&hdev->pdev->dev,
6434                                 "mdio config fail ret=%d\n", ret);
6435                         goto err_msi_irq_uninit;
6436                 }
6437         }
6438
6439         ret = hclge_mac_init(hdev);
6440         if (ret) {
6441                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
6442                 goto err_mdiobus_unreg;
6443         }
6444
6445         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
6446         if (ret) {
6447                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
6448                 goto err_mdiobus_unreg;
6449         }
6450
6451         ret = hclge_init_vlan_config(hdev);
6452         if (ret) {
6453                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
6454                 goto err_mdiobus_unreg;
6455         }
6456
6457         ret = hclge_tm_schd_init(hdev);
6458         if (ret) {
6459                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
6460                 goto err_mdiobus_unreg;
6461         }
6462
6463         hclge_rss_init_cfg(hdev);
6464         ret = hclge_rss_init_hw(hdev);
6465         if (ret) {
6466                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
6467                 goto err_mdiobus_unreg;
6468         }
6469
6470         ret = init_mgr_tbl(hdev);
6471         if (ret) {
6472                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
6473                 goto err_mdiobus_unreg;
6474         }
6475
6476         ret = hclge_init_fd_config(hdev);
6477         if (ret) {
6478                 dev_err(&pdev->dev,
6479                         "fd table init fail, ret=%d\n", ret);
6480                 goto err_mdiobus_unreg;
6481         }
6482
6483         hclge_dcb_ops_set(hdev);
6484
6485         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
6486         INIT_WORK(&hdev->service_task, hclge_service_task);
6487         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
6488         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
6489
6490         hclge_clear_all_event_cause(hdev);
6491
6492         /* Enable MISC vector(vector0) */
6493         hclge_enable_vector(&hdev->misc_vector, true);
6494
6495         hclge_state_init(hdev);
6496
6497         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
6498         return 0;
6499
6500 err_mdiobus_unreg:
6501         if (hdev->hw.mac.phydev)
6502                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
6503 err_msi_irq_uninit:
6504         hclge_misc_irq_uninit(hdev);
6505 err_msi_uninit:
6506         pci_free_irq_vectors(pdev);
6507 err_cmd_uninit:
6508         hclge_destroy_cmd_queue(&hdev->hw);
6509 err_pci_uninit:
6510         pcim_iounmap(pdev, hdev->hw.io_base);
6511         pci_clear_master(pdev);
6512         pci_release_regions(pdev);
6513         pci_disable_device(pdev);
6514 out:
6515         return ret;
6516 }
6517
6518 static void hclge_stats_clear(struct hclge_dev *hdev)
6519 {
6520         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
6521 }
6522
6523 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
6524 {
6525         struct hclge_dev *hdev = ae_dev->priv;
6526         struct pci_dev *pdev = ae_dev->pdev;
6527         int ret;
6528
6529         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6530
6531         hclge_stats_clear(hdev);
6532         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
6533
6534         ret = hclge_cmd_init(hdev);
6535         if (ret) {
6536                 dev_err(&pdev->dev, "Cmd queue init failed\n");
6537                 return ret;
6538         }
6539
6540         ret = hclge_get_cap(hdev);
6541         if (ret) {
6542                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
6543                         ret);
6544                 return ret;
6545         }
6546
6547         ret = hclge_configure(hdev);
6548         if (ret) {
6549                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
6550                 return ret;
6551         }
6552
6553         ret = hclge_map_tqp(hdev);
6554         if (ret) {
6555                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
6556                 return ret;
6557         }
6558
6559         ret = hclge_mac_init(hdev);
6560         if (ret) {
6561                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
6562                 return ret;
6563         }
6564
6565         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
6566         if (ret) {
6567                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
6568                 return ret;
6569         }
6570
6571         ret = hclge_init_vlan_config(hdev);
6572         if (ret) {
6573                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
6574                 return ret;
6575         }
6576
6577         ret = hclge_tm_init_hw(hdev);
6578         if (ret) {
6579                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
6580                 return ret;
6581         }
6582
6583         ret = hclge_rss_init_hw(hdev);
6584         if (ret) {
6585                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
6586                 return ret;
6587         }
6588
6589         ret = hclge_init_fd_config(hdev);
6590         if (ret) {
6591                 dev_err(&pdev->dev,
6592                         "fd table init fail, ret=%d\n", ret);
6593                 return ret;
6594         }
6595
6596         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
6597                  HCLGE_DRIVER_NAME);
6598
6599         return 0;
6600 }
6601
6602 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
6603 {
6604         struct hclge_dev *hdev = ae_dev->priv;
6605         struct hclge_mac *mac = &hdev->hw.mac;
6606
6607         hclge_state_uninit(hdev);
6608
6609         if (mac->phydev)
6610                 mdiobus_unregister(mac->mdio_bus);
6611
6612         /* Disable MISC vector(vector0) */
6613         hclge_enable_vector(&hdev->misc_vector, false);
6614         synchronize_irq(hdev->misc_vector.vector_irq);
6615
6616         hclge_destroy_cmd_queue(&hdev->hw);
6617         hclge_misc_irq_uninit(hdev);
6618         hclge_pci_uninit(hdev);
6619         ae_dev->priv = NULL;
6620 }
6621
6622 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
6623 {
6624         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
6625         struct hclge_vport *vport = hclge_get_vport(handle);
6626         struct hclge_dev *hdev = vport->back;
6627
6628         return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
6629 }
6630
6631 static void hclge_get_channels(struct hnae3_handle *handle,
6632                                struct ethtool_channels *ch)
6633 {
6634         struct hclge_vport *vport = hclge_get_vport(handle);
6635
6636         ch->max_combined = hclge_get_max_channels(handle);
6637         ch->other_count = 1;
6638         ch->max_other = 1;
6639         ch->combined_count = vport->alloc_tqps;
6640 }
6641
6642 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
6643                                         u16 *alloc_tqps, u16 *max_rss_size)
6644 {
6645         struct hclge_vport *vport = hclge_get_vport(handle);
6646         struct hclge_dev *hdev = vport->back;
6647
6648         *alloc_tqps = vport->alloc_tqps;
6649         *max_rss_size = hdev->rss_size_max;
6650 }
6651
6652 static void hclge_release_tqp(struct hclge_vport *vport)
6653 {
6654         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6655         struct hclge_dev *hdev = vport->back;
6656         int i;
6657
6658         for (i = 0; i < kinfo->num_tqps; i++) {
6659                 struct hclge_tqp *tqp =
6660                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
6661
6662                 tqp->q.handle = NULL;
6663                 tqp->q.tqp_index = 0;
6664                 tqp->alloced = false;
6665         }
6666
6667         devm_kfree(&hdev->pdev->dev, kinfo->tqp);
6668         kinfo->tqp = NULL;
6669 }
6670
6671 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
6672 {
6673         struct hclge_vport *vport = hclge_get_vport(handle);
6674         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6675         struct hclge_dev *hdev = vport->back;
6676         int cur_rss_size = kinfo->rss_size;
6677         int cur_tqps = kinfo->num_tqps;
6678         u16 tc_offset[HCLGE_MAX_TC_NUM];
6679         u16 tc_valid[HCLGE_MAX_TC_NUM];
6680         u16 tc_size[HCLGE_MAX_TC_NUM];
6681         u16 roundup_size;
6682         u32 *rss_indir;
6683         int ret, i;
6684
6685         /* Free old tqps, and reallocate with new tqp number when nic setup */
6686         hclge_release_tqp(vport);
6687
6688         ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
6689         if (ret) {
6690                 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
6691                 return ret;
6692         }
6693
6694         ret = hclge_map_tqp_to_vport(hdev, vport);
6695         if (ret) {
6696                 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
6697                 return ret;
6698         }
6699
6700         ret = hclge_tm_schd_init(hdev);
6701         if (ret) {
6702                 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
6703                 return ret;
6704         }
6705
6706         roundup_size = roundup_pow_of_two(kinfo->rss_size);
6707         roundup_size = ilog2(roundup_size);
6708         /* Set the RSS TC mode according to the new RSS size */
6709         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
6710                 tc_valid[i] = 0;
6711
6712                 if (!(hdev->hw_tc_map & BIT(i)))
6713                         continue;
6714
6715                 tc_valid[i] = 1;
6716                 tc_size[i] = roundup_size;
6717                 tc_offset[i] = kinfo->rss_size * i;
6718         }
6719         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
6720         if (ret)
6721                 return ret;
6722
6723         /* Reinitializes the rss indirect table according to the new RSS size */
6724         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
6725         if (!rss_indir)
6726                 return -ENOMEM;
6727
6728         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
6729                 rss_indir[i] = i % kinfo->rss_size;
6730
6731         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
6732         if (ret)
6733                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
6734                         ret);
6735
6736         kfree(rss_indir);
6737
6738         if (!ret)
6739                 dev_info(&hdev->pdev->dev,
6740                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
6741                          cur_rss_size, kinfo->rss_size,
6742                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
6743
6744         return ret;
6745 }
6746
6747 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
6748                               u32 *regs_num_64_bit)
6749 {
6750         struct hclge_desc desc;
6751         u32 total_num;
6752         int ret;
6753
6754         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
6755         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6756         if (ret) {
6757                 dev_err(&hdev->pdev->dev,
6758                         "Query register number cmd failed, ret = %d.\n", ret);
6759                 return ret;
6760         }
6761
6762         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
6763         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
6764
6765         total_num = *regs_num_32_bit + *regs_num_64_bit;
6766         if (!total_num)
6767                 return -EINVAL;
6768
6769         return 0;
6770 }
6771
6772 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
6773                                  void *data)
6774 {
6775 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
6776
6777         struct hclge_desc *desc;
6778         u32 *reg_val = data;
6779         __le32 *desc_data;
6780         int cmd_num;
6781         int i, k, n;
6782         int ret;
6783
6784         if (regs_num == 0)
6785                 return 0;
6786
6787         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
6788         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
6789         if (!desc)
6790                 return -ENOMEM;
6791
6792         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
6793         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
6794         if (ret) {
6795                 dev_err(&hdev->pdev->dev,
6796                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
6797                 kfree(desc);
6798                 return ret;
6799         }
6800
6801         for (i = 0; i < cmd_num; i++) {
6802                 if (i == 0) {
6803                         desc_data = (__le32 *)(&desc[i].data[0]);
6804                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
6805                 } else {
6806                         desc_data = (__le32 *)(&desc[i]);
6807                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
6808                 }
6809                 for (k = 0; k < n; k++) {
6810                         *reg_val++ = le32_to_cpu(*desc_data++);
6811
6812                         regs_num--;
6813                         if (!regs_num)
6814                                 break;
6815                 }
6816         }
6817
6818         kfree(desc);
6819         return 0;
6820 }
6821
6822 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
6823                                  void *data)
6824 {
6825 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
6826
6827         struct hclge_desc *desc;
6828         u64 *reg_val = data;
6829         __le64 *desc_data;
6830         int cmd_num;
6831         int i, k, n;
6832         int ret;
6833
6834         if (regs_num == 0)
6835                 return 0;
6836
6837         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
6838         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
6839         if (!desc)
6840                 return -ENOMEM;
6841
6842         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
6843         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
6844         if (ret) {
6845                 dev_err(&hdev->pdev->dev,
6846                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
6847                 kfree(desc);
6848                 return ret;
6849         }
6850
6851         for (i = 0; i < cmd_num; i++) {
6852                 if (i == 0) {
6853                         desc_data = (__le64 *)(&desc[i].data[0]);
6854                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
6855                 } else {
6856                         desc_data = (__le64 *)(&desc[i]);
6857                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
6858                 }
6859                 for (k = 0; k < n; k++) {
6860                         *reg_val++ = le64_to_cpu(*desc_data++);
6861
6862                         regs_num--;
6863                         if (!regs_num)
6864                                 break;
6865                 }
6866         }
6867
6868         kfree(desc);
6869         return 0;
6870 }
6871
6872 static int hclge_get_regs_len(struct hnae3_handle *handle)
6873 {
6874         struct hclge_vport *vport = hclge_get_vport(handle);
6875         struct hclge_dev *hdev = vport->back;
6876         u32 regs_num_32_bit, regs_num_64_bit;
6877         int ret;
6878
6879         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
6880         if (ret) {
6881                 dev_err(&hdev->pdev->dev,
6882                         "Get register number failed, ret = %d.\n", ret);
6883                 return -EOPNOTSUPP;
6884         }
6885
6886         return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
6887 }
6888
6889 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
6890                            void *data)
6891 {
6892         struct hclge_vport *vport = hclge_get_vport(handle);
6893         struct hclge_dev *hdev = vport->back;
6894         u32 regs_num_32_bit, regs_num_64_bit;
6895         int ret;
6896
6897         *version = hdev->fw_version;
6898
6899         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
6900         if (ret) {
6901                 dev_err(&hdev->pdev->dev,
6902                         "Get register number failed, ret = %d.\n", ret);
6903                 return;
6904         }
6905
6906         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
6907         if (ret) {
6908                 dev_err(&hdev->pdev->dev,
6909                         "Get 32 bit register failed, ret = %d.\n", ret);
6910                 return;
6911         }
6912
6913         data = (u32 *)data + regs_num_32_bit;
6914         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
6915                                     data);
6916         if (ret)
6917                 dev_err(&hdev->pdev->dev,
6918                         "Get 64 bit register failed, ret = %d.\n", ret);
6919 }
6920
6921 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
6922 {
6923         struct hclge_set_led_state_cmd *req;
6924         struct hclge_desc desc;
6925         int ret;
6926
6927         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
6928
6929         req = (struct hclge_set_led_state_cmd *)desc.data;
6930         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
6931                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
6932
6933         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6934         if (ret)
6935                 dev_err(&hdev->pdev->dev,
6936                         "Send set led state cmd error, ret =%d\n", ret);
6937
6938         return ret;
6939 }
6940
6941 enum hclge_led_status {
6942         HCLGE_LED_OFF,
6943         HCLGE_LED_ON,
6944         HCLGE_LED_NO_CHANGE = 0xFF,
6945 };
6946
6947 static int hclge_set_led_id(struct hnae3_handle *handle,
6948                             enum ethtool_phys_id_state status)
6949 {
6950         struct hclge_vport *vport = hclge_get_vport(handle);
6951         struct hclge_dev *hdev = vport->back;
6952
6953         switch (status) {
6954         case ETHTOOL_ID_ACTIVE:
6955                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
6956         case ETHTOOL_ID_INACTIVE:
6957                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
6958         default:
6959                 return -EINVAL;
6960         }
6961 }
6962
6963 static void hclge_get_link_mode(struct hnae3_handle *handle,
6964                                 unsigned long *supported,
6965                                 unsigned long *advertising)
6966 {
6967         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
6968         struct hclge_vport *vport = hclge_get_vport(handle);
6969         struct hclge_dev *hdev = vport->back;
6970         unsigned int idx = 0;
6971
6972         for (; idx < size; idx++) {
6973                 supported[idx] = hdev->hw.mac.supported[idx];
6974                 advertising[idx] = hdev->hw.mac.advertising[idx];
6975         }
6976 }
6977
6978 static const struct hnae3_ae_ops hclge_ops = {
6979         .init_ae_dev = hclge_init_ae_dev,
6980         .uninit_ae_dev = hclge_uninit_ae_dev,
6981         .init_client_instance = hclge_init_client_instance,
6982         .uninit_client_instance = hclge_uninit_client_instance,
6983         .map_ring_to_vector = hclge_map_ring_to_vector,
6984         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
6985         .get_vector = hclge_get_vector,
6986         .put_vector = hclge_put_vector,
6987         .set_promisc_mode = hclge_set_promisc_mode,
6988         .set_loopback = hclge_set_loopback,
6989         .start = hclge_ae_start,
6990         .stop = hclge_ae_stop,
6991         .get_status = hclge_get_status,
6992         .get_ksettings_an_result = hclge_get_ksettings_an_result,
6993         .update_speed_duplex_h = hclge_update_speed_duplex_h,
6994         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
6995         .get_media_type = hclge_get_media_type,
6996         .get_rss_key_size = hclge_get_rss_key_size,
6997         .get_rss_indir_size = hclge_get_rss_indir_size,
6998         .get_rss = hclge_get_rss,
6999         .set_rss = hclge_set_rss,
7000         .set_rss_tuple = hclge_set_rss_tuple,
7001         .get_rss_tuple = hclge_get_rss_tuple,
7002         .get_tc_size = hclge_get_tc_size,
7003         .get_mac_addr = hclge_get_mac_addr,
7004         .set_mac_addr = hclge_set_mac_addr,
7005         .do_ioctl = hclge_do_ioctl,
7006         .add_uc_addr = hclge_add_uc_addr,
7007         .rm_uc_addr = hclge_rm_uc_addr,
7008         .add_mc_addr = hclge_add_mc_addr,
7009         .rm_mc_addr = hclge_rm_mc_addr,
7010         .update_mta_status = hclge_update_mta_status,
7011         .set_autoneg = hclge_set_autoneg,
7012         .get_autoneg = hclge_get_autoneg,
7013         .get_pauseparam = hclge_get_pauseparam,
7014         .set_pauseparam = hclge_set_pauseparam,
7015         .set_mtu = hclge_set_mtu,
7016         .reset_queue = hclge_reset_tqp,
7017         .get_stats = hclge_get_stats,
7018         .update_stats = hclge_update_stats,
7019         .get_strings = hclge_get_strings,
7020         .get_sset_count = hclge_get_sset_count,
7021         .get_fw_version = hclge_get_fw_version,
7022         .get_mdix_mode = hclge_get_mdix_mode,
7023         .enable_vlan_filter = hclge_enable_vlan_filter,
7024         .set_vlan_filter = hclge_set_vlan_filter,
7025         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7026         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7027         .reset_event = hclge_reset_event,
7028         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7029         .set_channels = hclge_set_channels,
7030         .get_channels = hclge_get_channels,
7031         .get_regs_len = hclge_get_regs_len,
7032         .get_regs = hclge_get_regs,
7033         .set_led_id = hclge_set_led_id,
7034         .get_link_mode = hclge_get_link_mode,
7035         .add_fd_entry = hclge_add_fd_entry,
7036         .del_fd_entry = hclge_del_fd_entry,
7037 };
7038
7039 static struct hnae3_ae_algo ae_algo = {
7040         .ops = &hclge_ops,
7041         .pdev_id_table = ae_algo_pci_tbl,
7042 };
7043
7044 static int hclge_init(void)
7045 {
7046         pr_info("%s is initializing\n", HCLGE_NAME);
7047
7048         hnae3_register_ae_algo(&ae_algo);
7049
7050         return 0;
7051 }
7052
7053 static void hclge_exit(void)
7054 {
7055         hnae3_unregister_ae_algo(&ae_algo);
7056 }
7057 module_init(hclge_init);
7058 module_exit(hclge_exit);
7059
7060 MODULE_LICENSE("GPL");
7061 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
7062 MODULE_DESCRIPTION("HCLGE Driver");
7063 MODULE_VERSION(HCLGE_MOD_VERSION);