1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
24 #define HCLGE_NAME "hclge"
25 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
26 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
28 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
29 enum hclge_mta_dmac_sel_type mta_mac_sel,
31 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
32 static int hclge_init_vlan_config(struct hclge_dev *hdev);
33 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static struct hnae3_ae_algo ae_algo;
37 static const struct pci_device_id ae_algo_pci_tbl[] = {
38 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
39 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
40 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
45 /* required last entry */
49 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
51 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
53 "Serdes serial Loopback test",
54 "Serdes parallel Loopback test",
58 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
59 {"mac_tx_mac_pause_num",
60 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
61 {"mac_rx_mac_pause_num",
62 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
63 {"mac_tx_pfc_pri0_pkt_num",
64 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
65 {"mac_tx_pfc_pri1_pkt_num",
66 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
67 {"mac_tx_pfc_pri2_pkt_num",
68 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
69 {"mac_tx_pfc_pri3_pkt_num",
70 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
71 {"mac_tx_pfc_pri4_pkt_num",
72 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
73 {"mac_tx_pfc_pri5_pkt_num",
74 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
75 {"mac_tx_pfc_pri6_pkt_num",
76 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
77 {"mac_tx_pfc_pri7_pkt_num",
78 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
79 {"mac_rx_pfc_pri0_pkt_num",
80 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
81 {"mac_rx_pfc_pri1_pkt_num",
82 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
83 {"mac_rx_pfc_pri2_pkt_num",
84 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
85 {"mac_rx_pfc_pri3_pkt_num",
86 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
87 {"mac_rx_pfc_pri4_pkt_num",
88 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
89 {"mac_rx_pfc_pri5_pkt_num",
90 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
91 {"mac_rx_pfc_pri6_pkt_num",
92 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
93 {"mac_rx_pfc_pri7_pkt_num",
94 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
95 {"mac_tx_total_pkt_num",
96 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
97 {"mac_tx_total_oct_num",
98 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
99 {"mac_tx_good_pkt_num",
100 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
101 {"mac_tx_bad_pkt_num",
102 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
103 {"mac_tx_good_oct_num",
104 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
105 {"mac_tx_bad_oct_num",
106 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
107 {"mac_tx_uni_pkt_num",
108 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
109 {"mac_tx_multi_pkt_num",
110 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
111 {"mac_tx_broad_pkt_num",
112 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
113 {"mac_tx_undersize_pkt_num",
114 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
115 {"mac_tx_oversize_pkt_num",
116 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
117 {"mac_tx_64_oct_pkt_num",
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
119 {"mac_tx_65_127_oct_pkt_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
121 {"mac_tx_128_255_oct_pkt_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
123 {"mac_tx_256_511_oct_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
125 {"mac_tx_512_1023_oct_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
127 {"mac_tx_1024_1518_oct_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
129 {"mac_tx_1519_2047_oct_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
131 {"mac_tx_2048_4095_oct_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
133 {"mac_tx_4096_8191_oct_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
135 {"mac_tx_8192_9216_oct_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
137 {"mac_tx_9217_12287_oct_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
139 {"mac_tx_12288_16383_oct_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
141 {"mac_tx_1519_max_good_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
143 {"mac_tx_1519_max_bad_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
145 {"mac_rx_total_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
147 {"mac_rx_total_oct_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
149 {"mac_rx_good_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
151 {"mac_rx_bad_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
153 {"mac_rx_good_oct_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
155 {"mac_rx_bad_oct_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
157 {"mac_rx_uni_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
159 {"mac_rx_multi_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
161 {"mac_rx_broad_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
163 {"mac_rx_undersize_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
165 {"mac_rx_oversize_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
167 {"mac_rx_64_oct_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
169 {"mac_rx_65_127_oct_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
171 {"mac_rx_128_255_oct_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
173 {"mac_rx_256_511_oct_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
175 {"mac_rx_512_1023_oct_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
177 {"mac_rx_1024_1518_oct_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
179 {"mac_rx_1519_2047_oct_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
181 {"mac_rx_2048_4095_oct_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
183 {"mac_rx_4096_8191_oct_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
185 {"mac_rx_8192_9216_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
187 {"mac_rx_9217_12287_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
189 {"mac_rx_12288_16383_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
191 {"mac_rx_1519_max_good_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
193 {"mac_rx_1519_max_bad_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
196 {"mac_tx_fragment_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
198 {"mac_tx_undermin_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
200 {"mac_tx_jabber_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
202 {"mac_tx_err_all_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
204 {"mac_tx_from_app_good_pkt_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
206 {"mac_tx_from_app_bad_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
208 {"mac_rx_fragment_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
210 {"mac_rx_undermin_pkt_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
212 {"mac_rx_jabber_pkt_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
214 {"mac_rx_fcs_err_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
216 {"mac_rx_send_app_good_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
218 {"mac_rx_send_app_bad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
222 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
224 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
225 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
226 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
227 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
228 .i_port_bitmap = 0x1,
232 static int hclge_mac_update_stats(struct hclge_dev *hdev)
234 #define HCLGE_MAC_CMD_NUM 21
235 #define HCLGE_RTN_DATA_NUM 4
237 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
238 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
243 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
244 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
246 dev_err(&hdev->pdev->dev,
247 "Get MAC pkt stats fail, status = %d.\n", ret);
252 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
253 if (unlikely(i == 0)) {
254 desc_data = (__le64 *)(&desc[i].data[0]);
255 n = HCLGE_RTN_DATA_NUM - 2;
257 desc_data = (__le64 *)(&desc[i]);
258 n = HCLGE_RTN_DATA_NUM;
260 for (k = 0; k < n; k++) {
261 *data++ += le64_to_cpu(*desc_data);
269 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
271 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
272 struct hclge_vport *vport = hclge_get_vport(handle);
273 struct hclge_dev *hdev = vport->back;
274 struct hnae3_queue *queue;
275 struct hclge_desc desc[1];
276 struct hclge_tqp *tqp;
279 for (i = 0; i < kinfo->num_tqps; i++) {
280 queue = handle->kinfo.tqp[i];
281 tqp = container_of(queue, struct hclge_tqp, q);
282 /* command : HCLGE_OPC_QUERY_IGU_STAT */
283 hclge_cmd_setup_basic_desc(&desc[0],
284 HCLGE_OPC_QUERY_RX_STATUS,
287 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
288 ret = hclge_cmd_send(&hdev->hw, desc, 1);
290 dev_err(&hdev->pdev->dev,
291 "Query tqp stat fail, status = %d,queue = %d\n",
295 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
296 le32_to_cpu(desc[0].data[1]);
299 for (i = 0; i < kinfo->num_tqps; i++) {
300 queue = handle->kinfo.tqp[i];
301 tqp = container_of(queue, struct hclge_tqp, q);
302 /* command : HCLGE_OPC_QUERY_IGU_STAT */
303 hclge_cmd_setup_basic_desc(&desc[0],
304 HCLGE_OPC_QUERY_TX_STATUS,
307 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
308 ret = hclge_cmd_send(&hdev->hw, desc, 1);
310 dev_err(&hdev->pdev->dev,
311 "Query tqp stat fail, status = %d,queue = %d\n",
315 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
316 le32_to_cpu(desc[0].data[1]);
322 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
324 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
325 struct hclge_tqp *tqp;
329 for (i = 0; i < kinfo->num_tqps; i++) {
330 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
331 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
334 for (i = 0; i < kinfo->num_tqps; i++) {
335 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
336 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
342 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
344 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
346 return kinfo->num_tqps * (2);
349 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
351 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
355 for (i = 0; i < kinfo->num_tqps; i++) {
356 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
357 struct hclge_tqp, q);
358 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
360 buff = buff + ETH_GSTRING_LEN;
363 for (i = 0; i < kinfo->num_tqps; i++) {
364 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
365 struct hclge_tqp, q);
366 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
368 buff = buff + ETH_GSTRING_LEN;
374 static u64 *hclge_comm_get_stats(void *comm_stats,
375 const struct hclge_comm_stats_str strs[],
381 for (i = 0; i < size; i++)
382 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
387 static u8 *hclge_comm_get_strings(u32 stringset,
388 const struct hclge_comm_stats_str strs[],
391 char *buff = (char *)data;
394 if (stringset != ETH_SS_STATS)
397 for (i = 0; i < size; i++) {
398 snprintf(buff, ETH_GSTRING_LEN,
400 buff = buff + ETH_GSTRING_LEN;
406 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
407 struct net_device_stats *net_stats)
409 net_stats->tx_dropped = 0;
410 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
411 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
412 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
414 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
415 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
417 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
418 net_stats->rx_length_errors =
419 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
420 net_stats->rx_length_errors +=
421 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
422 net_stats->rx_over_errors =
423 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
426 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
428 struct hnae3_handle *handle;
431 handle = &hdev->vport[0].nic;
432 if (handle->client) {
433 status = hclge_tqps_update_stats(handle);
435 dev_err(&hdev->pdev->dev,
436 "Update TQPS stats fail, status = %d.\n",
441 status = hclge_mac_update_stats(hdev);
443 dev_err(&hdev->pdev->dev,
444 "Update MAC stats fail, status = %d.\n", status);
446 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
449 static void hclge_update_stats(struct hnae3_handle *handle,
450 struct net_device_stats *net_stats)
452 struct hclge_vport *vport = hclge_get_vport(handle);
453 struct hclge_dev *hdev = vport->back;
454 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
457 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
460 status = hclge_mac_update_stats(hdev);
462 dev_err(&hdev->pdev->dev,
463 "Update MAC stats fail, status = %d.\n",
466 status = hclge_tqps_update_stats(handle);
468 dev_err(&hdev->pdev->dev,
469 "Update TQPS stats fail, status = %d.\n",
472 hclge_update_netstat(hw_stats, net_stats);
474 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
477 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
479 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
480 HNAE3_SUPPORT_PHY_LOOPBACK |\
481 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
482 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
484 struct hclge_vport *vport = hclge_get_vport(handle);
485 struct hclge_dev *hdev = vport->back;
488 /* Loopback test support rules:
489 * mac: only GE mode support
490 * serdes: all mac mode will support include GE/XGE/LGE/CGE
491 * phy: only support when phy device exist on board
493 if (stringset == ETH_SS_TEST) {
494 /* clear loopback bit flags at first */
495 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
496 if (hdev->pdev->revision >= 0x21 ||
497 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
498 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
499 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
501 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
505 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
506 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
507 } else if (stringset == ETH_SS_STATS) {
508 count = ARRAY_SIZE(g_mac_stats_string) +
509 hclge_tqps_get_sset_count(handle, stringset);
515 static void hclge_get_strings(struct hnae3_handle *handle,
519 u8 *p = (char *)data;
522 if (stringset == ETH_SS_STATS) {
523 size = ARRAY_SIZE(g_mac_stats_string);
524 p = hclge_comm_get_strings(stringset,
528 p = hclge_tqps_get_strings(handle, p);
529 } else if (stringset == ETH_SS_TEST) {
530 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
532 hns3_nic_test_strs[HNAE3_LOOP_APP],
534 p += ETH_GSTRING_LEN;
536 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
538 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
540 p += ETH_GSTRING_LEN;
542 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
544 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
546 p += ETH_GSTRING_LEN;
548 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
550 hns3_nic_test_strs[HNAE3_LOOP_PHY],
552 p += ETH_GSTRING_LEN;
557 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
559 struct hclge_vport *vport = hclge_get_vport(handle);
560 struct hclge_dev *hdev = vport->back;
563 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
565 ARRAY_SIZE(g_mac_stats_string),
567 p = hclge_tqps_get_stats(handle, p);
570 static int hclge_parse_func_status(struct hclge_dev *hdev,
571 struct hclge_func_status_cmd *status)
573 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
576 /* Set the pf to main pf */
577 if (status->pf_state & HCLGE_PF_STATE_MAIN)
578 hdev->flag |= HCLGE_FLAG_MAIN;
580 hdev->flag &= ~HCLGE_FLAG_MAIN;
585 static int hclge_query_function_status(struct hclge_dev *hdev)
587 struct hclge_func_status_cmd *req;
588 struct hclge_desc desc;
592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
593 req = (struct hclge_func_status_cmd *)desc.data;
596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
598 dev_err(&hdev->pdev->dev,
599 "query function status failed %d.\n",
605 /* Check pf reset is done */
608 usleep_range(1000, 2000);
609 } while (timeout++ < 5);
611 ret = hclge_parse_func_status(hdev, req);
616 static int hclge_query_pf_resource(struct hclge_dev *hdev)
618 struct hclge_pf_res_cmd *req;
619 struct hclge_desc desc;
622 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
623 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
625 dev_err(&hdev->pdev->dev,
626 "query pf resource failed %d.\n", ret);
630 req = (struct hclge_pf_res_cmd *)desc.data;
631 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
632 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
634 if (hnae3_dev_roce_supported(hdev)) {
635 hdev->roce_base_msix_offset =
636 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
637 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
639 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
640 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
642 /* PF should have NIC vectors and Roce vectors,
643 * NIC vectors are queued before Roce vectors.
645 hdev->num_msi = hdev->num_roce_msi +
646 hdev->roce_base_msix_offset;
649 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
650 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
656 static int hclge_parse_speed(int speed_cmd, int *speed)
660 *speed = HCLGE_MAC_SPEED_10M;
663 *speed = HCLGE_MAC_SPEED_100M;
666 *speed = HCLGE_MAC_SPEED_1G;
669 *speed = HCLGE_MAC_SPEED_10G;
672 *speed = HCLGE_MAC_SPEED_25G;
675 *speed = HCLGE_MAC_SPEED_40G;
678 *speed = HCLGE_MAC_SPEED_50G;
681 *speed = HCLGE_MAC_SPEED_100G;
690 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
693 unsigned long *supported = hdev->hw.mac.supported;
695 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
696 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
699 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
700 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
703 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
704 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
707 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
708 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
711 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
712 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
715 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
716 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
719 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
721 u8 media_type = hdev->hw.mac.media_type;
723 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
726 hclge_parse_fiber_link_mode(hdev, speed_ability);
729 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
731 struct hclge_cfg_param_cmd *req;
732 u64 mac_addr_tmp_high;
736 req = (struct hclge_cfg_param_cmd *)desc[0].data;
738 /* get the configuration */
739 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
742 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
743 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
744 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
745 HCLGE_CFG_TQP_DESC_N_M,
746 HCLGE_CFG_TQP_DESC_N_S);
748 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
749 HCLGE_CFG_PHY_ADDR_M,
750 HCLGE_CFG_PHY_ADDR_S);
751 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
752 HCLGE_CFG_MEDIA_TP_M,
753 HCLGE_CFG_MEDIA_TP_S);
754 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
755 HCLGE_CFG_RX_BUF_LEN_M,
756 HCLGE_CFG_RX_BUF_LEN_S);
757 /* get mac_address */
758 mac_addr_tmp = __le32_to_cpu(req->param[2]);
759 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
760 HCLGE_CFG_MAC_ADDR_H_M,
761 HCLGE_CFG_MAC_ADDR_H_S);
763 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
765 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
766 HCLGE_CFG_DEFAULT_SPEED_M,
767 HCLGE_CFG_DEFAULT_SPEED_S);
768 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
769 HCLGE_CFG_RSS_SIZE_M,
770 HCLGE_CFG_RSS_SIZE_S);
772 for (i = 0; i < ETH_ALEN; i++)
773 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
775 req = (struct hclge_cfg_param_cmd *)desc[1].data;
776 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
778 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
779 HCLGE_CFG_SPEED_ABILITY_M,
780 HCLGE_CFG_SPEED_ABILITY_S);
783 /* hclge_get_cfg: query the static parameter from flash
784 * @hdev: pointer to struct hclge_dev
785 * @hcfg: the config structure to be getted
787 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
789 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
790 struct hclge_cfg_param_cmd *req;
793 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
796 req = (struct hclge_cfg_param_cmd *)desc[i].data;
797 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
799 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
800 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
801 /* Len should be united by 4 bytes when send to hardware */
802 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
803 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
804 req->offset = cpu_to_le32(offset);
807 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
809 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
813 hclge_parse_cfg(hcfg, desc);
818 static int hclge_get_cap(struct hclge_dev *hdev)
822 ret = hclge_query_function_status(hdev);
824 dev_err(&hdev->pdev->dev,
825 "query function status error %d.\n", ret);
829 /* get pf resource */
830 ret = hclge_query_pf_resource(hdev);
832 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
837 static int hclge_configure(struct hclge_dev *hdev)
839 struct hclge_cfg cfg;
842 ret = hclge_get_cfg(hdev, &cfg);
844 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
848 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
849 hdev->base_tqp_pid = 0;
850 hdev->rss_size_max = cfg.rss_size_max;
851 hdev->rx_buf_len = cfg.rx_buf_len;
852 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
853 hdev->hw.mac.media_type = cfg.media_type;
854 hdev->hw.mac.phy_addr = cfg.phy_addr;
855 hdev->num_desc = cfg.tqp_desc_num;
856 hdev->tm_info.num_pg = 1;
857 hdev->tc_max = cfg.tc_num;
858 hdev->tm_info.hw_pfc_map = 0;
860 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
862 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
866 hclge_parse_link_mode(hdev, cfg.speed_ability);
868 if ((hdev->tc_max > HNAE3_MAX_TC) ||
869 (hdev->tc_max < 1)) {
870 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
875 /* Dev does not support DCB */
876 if (!hnae3_dev_dcb_supported(hdev)) {
880 hdev->pfc_max = hdev->tc_max;
883 hdev->tm_info.num_tc = hdev->tc_max;
885 /* Currently not support uncontiuous tc */
886 for (i = 0; i < hdev->tm_info.num_tc; i++)
887 hnae3_set_bit(hdev->hw_tc_map, i, 1);
889 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
894 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
897 struct hclge_cfg_tso_status_cmd *req;
898 struct hclge_desc desc;
901 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
903 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
906 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
907 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
908 req->tso_mss_min = cpu_to_le16(tso_mss);
911 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
912 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
913 req->tso_mss_max = cpu_to_le16(tso_mss);
915 return hclge_cmd_send(&hdev->hw, &desc, 1);
918 static int hclge_alloc_tqps(struct hclge_dev *hdev)
920 struct hclge_tqp *tqp;
923 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
924 sizeof(struct hclge_tqp), GFP_KERNEL);
930 for (i = 0; i < hdev->num_tqps; i++) {
931 tqp->dev = &hdev->pdev->dev;
934 tqp->q.ae_algo = &ae_algo;
935 tqp->q.buf_size = hdev->rx_buf_len;
936 tqp->q.desc_num = hdev->num_desc;
937 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
938 i * HCLGE_TQP_REG_SIZE;
946 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
947 u16 tqp_pid, u16 tqp_vid, bool is_pf)
949 struct hclge_tqp_map_cmd *req;
950 struct hclge_desc desc;
953 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
955 req = (struct hclge_tqp_map_cmd *)desc.data;
956 req->tqp_id = cpu_to_le16(tqp_pid);
957 req->tqp_vf = func_id;
958 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
959 1 << HCLGE_TQP_MAP_EN_B;
960 req->tqp_vid = cpu_to_le16(tqp_vid);
962 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
964 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
969 static int hclge_assign_tqp(struct hclge_vport *vport)
971 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
972 struct hclge_dev *hdev = vport->back;
975 for (i = 0, alloced = 0; i < hdev->num_tqps &&
976 alloced < kinfo->num_tqps; i++) {
977 if (!hdev->htqp[i].alloced) {
978 hdev->htqp[i].q.handle = &vport->nic;
979 hdev->htqp[i].q.tqp_index = alloced;
980 hdev->htqp[i].q.desc_num = kinfo->num_desc;
981 kinfo->tqp[alloced] = &hdev->htqp[i].q;
982 hdev->htqp[i].alloced = true;
986 vport->alloc_tqps = kinfo->num_tqps;
991 static int hclge_knic_setup(struct hclge_vport *vport,
992 u16 num_tqps, u16 num_desc)
994 struct hnae3_handle *nic = &vport->nic;
995 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
996 struct hclge_dev *hdev = vport->back;
999 kinfo->num_desc = num_desc;
1000 kinfo->rx_buf_len = hdev->rx_buf_len;
1001 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1003 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1004 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1006 for (i = 0; i < HNAE3_MAX_TC; i++) {
1007 if (hdev->hw_tc_map & BIT(i)) {
1008 kinfo->tc_info[i].enable = true;
1009 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1010 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1011 kinfo->tc_info[i].tc = i;
1013 /* Set to default queue if TC is disable */
1014 kinfo->tc_info[i].enable = false;
1015 kinfo->tc_info[i].tqp_offset = 0;
1016 kinfo->tc_info[i].tqp_count = 1;
1017 kinfo->tc_info[i].tc = 0;
1021 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1022 sizeof(struct hnae3_queue *), GFP_KERNEL);
1026 ret = hclge_assign_tqp(vport);
1028 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1033 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1034 struct hclge_vport *vport)
1036 struct hnae3_handle *nic = &vport->nic;
1037 struct hnae3_knic_private_info *kinfo;
1040 kinfo = &nic->kinfo;
1041 for (i = 0; i < kinfo->num_tqps; i++) {
1042 struct hclge_tqp *q =
1043 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1047 is_pf = !(vport->vport_id);
1048 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1057 static int hclge_map_tqp(struct hclge_dev *hdev)
1059 struct hclge_vport *vport = hdev->vport;
1062 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1063 for (i = 0; i < num_vport; i++) {
1066 ret = hclge_map_tqp_to_vport(hdev, vport);
1076 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1078 /* this would be initialized later */
1081 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1083 struct hnae3_handle *nic = &vport->nic;
1084 struct hclge_dev *hdev = vport->back;
1087 nic->pdev = hdev->pdev;
1088 nic->ae_algo = &ae_algo;
1089 nic->numa_node_mask = hdev->numa_node_mask;
1091 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1092 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1094 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1099 hclge_unic_setup(vport, num_tqps);
1105 static int hclge_alloc_vport(struct hclge_dev *hdev)
1107 struct pci_dev *pdev = hdev->pdev;
1108 struct hclge_vport *vport;
1114 /* We need to alloc a vport for main NIC of PF */
1115 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1117 if (hdev->num_tqps < num_vport) {
1118 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1119 hdev->num_tqps, num_vport);
1123 /* Alloc the same number of TQPs for every vport */
1124 tqp_per_vport = hdev->num_tqps / num_vport;
1125 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1127 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1132 hdev->vport = vport;
1133 hdev->num_alloc_vport = num_vport;
1135 if (IS_ENABLED(CONFIG_PCI_IOV))
1136 hdev->num_alloc_vfs = hdev->num_req_vfs;
1138 for (i = 0; i < num_vport; i++) {
1140 vport->vport_id = i;
1143 ret = hclge_vport_setup(vport, tqp_main_vport);
1145 ret = hclge_vport_setup(vport, tqp_per_vport);
1148 "vport setup failed for vport %d, %d\n",
1159 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1160 struct hclge_pkt_buf_alloc *buf_alloc)
1162 /* TX buffer size is unit by 128 byte */
1163 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1164 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1165 struct hclge_tx_buff_alloc_cmd *req;
1166 struct hclge_desc desc;
1170 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1172 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1173 for (i = 0; i < HCLGE_TC_NUM; i++) {
1174 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1176 req->tx_pkt_buff[i] =
1177 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1178 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1181 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1183 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1189 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1190 struct hclge_pkt_buf_alloc *buf_alloc)
1192 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1195 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1200 static int hclge_get_tc_num(struct hclge_dev *hdev)
1204 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1205 if (hdev->hw_tc_map & BIT(i))
1210 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1214 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1215 if (hdev->hw_tc_map & BIT(i) &&
1216 hdev->tm_info.hw_pfc_map & BIT(i))
1221 /* Get the number of pfc enabled TCs, which have private buffer */
1222 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1223 struct hclge_pkt_buf_alloc *buf_alloc)
1225 struct hclge_priv_buf *priv;
1228 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1229 priv = &buf_alloc->priv_buf[i];
1230 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1238 /* Get the number of pfc disabled TCs, which have private buffer */
1239 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1240 struct hclge_pkt_buf_alloc *buf_alloc)
1242 struct hclge_priv_buf *priv;
1245 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1246 priv = &buf_alloc->priv_buf[i];
1247 if (hdev->hw_tc_map & BIT(i) &&
1248 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1256 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1258 struct hclge_priv_buf *priv;
1262 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1263 priv = &buf_alloc->priv_buf[i];
1265 rx_priv += priv->buf_size;
1270 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1272 u32 i, total_tx_size = 0;
1274 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1275 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1277 return total_tx_size;
1280 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1281 struct hclge_pkt_buf_alloc *buf_alloc,
1284 u32 shared_buf_min, shared_buf_tc, shared_std;
1285 int tc_num, pfc_enable_num;
1290 tc_num = hclge_get_tc_num(hdev);
1291 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1293 if (hnae3_dev_dcb_supported(hdev))
1294 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1296 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1298 shared_buf_tc = pfc_enable_num * hdev->mps +
1299 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1301 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1303 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1304 if (rx_all <= rx_priv + shared_std)
1307 shared_buf = rx_all - rx_priv;
1308 buf_alloc->s_buf.buf_size = shared_buf;
1309 buf_alloc->s_buf.self.high = shared_buf;
1310 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1312 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1313 if ((hdev->hw_tc_map & BIT(i)) &&
1314 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1315 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1316 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1318 buf_alloc->s_buf.tc_thrd[i].low = 0;
1319 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1326 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1327 struct hclge_pkt_buf_alloc *buf_alloc)
1331 total_size = hdev->pkt_buf_size;
1333 /* alloc tx buffer for all enabled tc */
1334 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1335 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1337 if (total_size < HCLGE_DEFAULT_TX_BUF)
1340 if (hdev->hw_tc_map & BIT(i))
1341 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1343 priv->tx_buf_size = 0;
1345 total_size -= priv->tx_buf_size;
1351 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1352 * @hdev: pointer to struct hclge_dev
1353 * @buf_alloc: pointer to buffer calculation data
1354 * @return: 0: calculate sucessful, negative: fail
1356 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1357 struct hclge_pkt_buf_alloc *buf_alloc)
1359 #define HCLGE_BUF_SIZE_UNIT 128
1360 u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1361 int no_pfc_priv_num, pfc_priv_num;
1362 struct hclge_priv_buf *priv;
1365 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1366 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1368 /* When DCB is not supported, rx private
1369 * buffer is not allocated.
1371 if (!hnae3_dev_dcb_supported(hdev)) {
1372 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1378 /* step 1, try to alloc private buffer for all enabled tc */
1379 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1380 priv = &buf_alloc->priv_buf[i];
1381 if (hdev->hw_tc_map & BIT(i)) {
1383 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1384 priv->wl.low = aligned_mps;
1385 priv->wl.high = priv->wl.low + aligned_mps;
1386 priv->buf_size = priv->wl.high +
1390 priv->wl.high = 2 * aligned_mps;
1391 priv->buf_size = priv->wl.high;
1401 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1404 /* step 2, try to decrease the buffer size of
1405 * no pfc TC's private buffer
1407 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1408 priv = &buf_alloc->priv_buf[i];
1415 if (!(hdev->hw_tc_map & BIT(i)))
1420 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1422 priv->wl.high = priv->wl.low + aligned_mps;
1423 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1426 priv->wl.high = aligned_mps;
1427 priv->buf_size = priv->wl.high;
1431 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1434 /* step 3, try to reduce the number of pfc disabled TCs,
1435 * which have private buffer
1437 /* get the total no pfc enable TC number, which have private buffer */
1438 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1440 /* let the last to be cleared first */
1441 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1442 priv = &buf_alloc->priv_buf[i];
1444 if (hdev->hw_tc_map & BIT(i) &&
1445 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1446 /* Clear the no pfc TC private buffer */
1454 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1455 no_pfc_priv_num == 0)
1459 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1462 /* step 4, try to reduce the number of pfc enabled TCs
1463 * which have private buffer.
1465 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1467 /* let the last to be cleared first */
1468 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1469 priv = &buf_alloc->priv_buf[i];
1471 if (hdev->hw_tc_map & BIT(i) &&
1472 hdev->tm_info.hw_pfc_map & BIT(i)) {
1473 /* Reduce the number of pfc TC with private buffer */
1481 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1485 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1491 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1492 struct hclge_pkt_buf_alloc *buf_alloc)
1494 struct hclge_rx_priv_buff_cmd *req;
1495 struct hclge_desc desc;
1499 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1500 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1502 /* Alloc private buffer TCs */
1503 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1504 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1507 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1509 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1513 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1514 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1516 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1518 dev_err(&hdev->pdev->dev,
1519 "rx private buffer alloc cmd failed %d\n", ret);
1524 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1525 struct hclge_pkt_buf_alloc *buf_alloc)
1527 struct hclge_rx_priv_wl_buf *req;
1528 struct hclge_priv_buf *priv;
1529 struct hclge_desc desc[2];
1533 for (i = 0; i < 2; i++) {
1534 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1536 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1538 /* The first descriptor set the NEXT bit to 1 */
1540 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1542 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1544 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1545 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1547 priv = &buf_alloc->priv_buf[idx];
1548 req->tc_wl[j].high =
1549 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1550 req->tc_wl[j].high |=
1551 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1553 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1554 req->tc_wl[j].low |=
1555 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1559 /* Send 2 descriptor at one time */
1560 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1562 dev_err(&hdev->pdev->dev,
1563 "rx private waterline config cmd failed %d\n",
1568 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1569 struct hclge_pkt_buf_alloc *buf_alloc)
1571 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1572 struct hclge_rx_com_thrd *req;
1573 struct hclge_desc desc[2];
1574 struct hclge_tc_thrd *tc;
1578 for (i = 0; i < 2; i++) {
1579 hclge_cmd_setup_basic_desc(&desc[i],
1580 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1581 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1583 /* The first descriptor set the NEXT bit to 1 */
1585 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1587 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1589 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1590 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1592 req->com_thrd[j].high =
1593 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1594 req->com_thrd[j].high |=
1595 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1596 req->com_thrd[j].low =
1597 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1598 req->com_thrd[j].low |=
1599 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1603 /* Send 2 descriptors at one time */
1604 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1606 dev_err(&hdev->pdev->dev,
1607 "common threshold config cmd failed %d\n", ret);
1611 static int hclge_common_wl_config(struct hclge_dev *hdev,
1612 struct hclge_pkt_buf_alloc *buf_alloc)
1614 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1615 struct hclge_rx_com_wl *req;
1616 struct hclge_desc desc;
1619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1621 req = (struct hclge_rx_com_wl *)desc.data;
1622 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1623 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1625 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1626 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1630 dev_err(&hdev->pdev->dev,
1631 "common waterline config cmd failed %d\n", ret);
1636 int hclge_buffer_alloc(struct hclge_dev *hdev)
1638 struct hclge_pkt_buf_alloc *pkt_buf;
1641 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1645 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1647 dev_err(&hdev->pdev->dev,
1648 "could not calc tx buffer size for all TCs %d\n", ret);
1652 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1654 dev_err(&hdev->pdev->dev,
1655 "could not alloc tx buffers %d\n", ret);
1659 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1661 dev_err(&hdev->pdev->dev,
1662 "could not calc rx priv buffer size for all TCs %d\n",
1667 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1669 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1674 if (hnae3_dev_dcb_supported(hdev)) {
1675 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1677 dev_err(&hdev->pdev->dev,
1678 "could not configure rx private waterline %d\n",
1683 ret = hclge_common_thrd_config(hdev, pkt_buf);
1685 dev_err(&hdev->pdev->dev,
1686 "could not configure common threshold %d\n",
1692 ret = hclge_common_wl_config(hdev, pkt_buf);
1694 dev_err(&hdev->pdev->dev,
1695 "could not configure common waterline %d\n", ret);
1702 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1704 struct hnae3_handle *roce = &vport->roce;
1705 struct hnae3_handle *nic = &vport->nic;
1707 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1709 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1710 vport->back->num_msi_left == 0)
1713 roce->rinfo.base_vector = vport->back->roce_base_vector;
1715 roce->rinfo.netdev = nic->kinfo.netdev;
1716 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1718 roce->pdev = nic->pdev;
1719 roce->ae_algo = nic->ae_algo;
1720 roce->numa_node_mask = nic->numa_node_mask;
1725 static int hclge_init_msi(struct hclge_dev *hdev)
1727 struct pci_dev *pdev = hdev->pdev;
1731 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1732 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1735 "failed(%d) to allocate MSI/MSI-X vectors\n",
1739 if (vectors < hdev->num_msi)
1740 dev_warn(&hdev->pdev->dev,
1741 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1742 hdev->num_msi, vectors);
1744 hdev->num_msi = vectors;
1745 hdev->num_msi_left = vectors;
1746 hdev->base_msi_vector = pdev->irq;
1747 hdev->roce_base_vector = hdev->base_msi_vector +
1748 hdev->roce_base_msix_offset;
1750 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1751 sizeof(u16), GFP_KERNEL);
1752 if (!hdev->vector_status) {
1753 pci_free_irq_vectors(pdev);
1757 for (i = 0; i < hdev->num_msi; i++)
1758 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1760 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1761 sizeof(int), GFP_KERNEL);
1762 if (!hdev->vector_irq) {
1763 pci_free_irq_vectors(pdev);
1770 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1773 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1774 duplex = HCLGE_MAC_FULL;
1779 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1782 struct hclge_config_mac_speed_dup_cmd *req;
1783 struct hclge_desc desc;
1786 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1788 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1790 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1793 case HCLGE_MAC_SPEED_10M:
1794 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1795 HCLGE_CFG_SPEED_S, 6);
1797 case HCLGE_MAC_SPEED_100M:
1798 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1799 HCLGE_CFG_SPEED_S, 7);
1801 case HCLGE_MAC_SPEED_1G:
1802 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1803 HCLGE_CFG_SPEED_S, 0);
1805 case HCLGE_MAC_SPEED_10G:
1806 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1807 HCLGE_CFG_SPEED_S, 1);
1809 case HCLGE_MAC_SPEED_25G:
1810 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1811 HCLGE_CFG_SPEED_S, 2);
1813 case HCLGE_MAC_SPEED_40G:
1814 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1815 HCLGE_CFG_SPEED_S, 3);
1817 case HCLGE_MAC_SPEED_50G:
1818 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1819 HCLGE_CFG_SPEED_S, 4);
1821 case HCLGE_MAC_SPEED_100G:
1822 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1823 HCLGE_CFG_SPEED_S, 5);
1826 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1830 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1833 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1835 dev_err(&hdev->pdev->dev,
1836 "mac speed/duplex config cmd failed %d.\n", ret);
1843 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1847 duplex = hclge_check_speed_dup(duplex, speed);
1848 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1851 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1855 hdev->hw.mac.speed = speed;
1856 hdev->hw.mac.duplex = duplex;
1861 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1864 struct hclge_vport *vport = hclge_get_vport(handle);
1865 struct hclge_dev *hdev = vport->back;
1867 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1870 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
1873 struct hclge_query_an_speed_dup_cmd *req;
1874 struct hclge_desc desc;
1878 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
1880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
1881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1883 dev_err(&hdev->pdev->dev,
1884 "mac speed/autoneg/duplex query cmd failed %d\n",
1889 *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
1890 speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
1891 HCLGE_QUERY_SPEED_S);
1893 ret = hclge_parse_speed(speed_tmp, speed);
1895 dev_err(&hdev->pdev->dev,
1896 "could not parse speed(=%d), %d\n", speed_tmp, ret);
1901 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1903 struct hclge_config_auto_neg_cmd *req;
1904 struct hclge_desc desc;
1908 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1910 req = (struct hclge_config_auto_neg_cmd *)desc.data;
1911 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1912 req->cfg_an_cmd_flag = cpu_to_le32(flag);
1914 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1916 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
1922 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
1924 struct hclge_vport *vport = hclge_get_vport(handle);
1925 struct hclge_dev *hdev = vport->back;
1927 return hclge_set_autoneg_en(hdev, enable);
1930 static int hclge_get_autoneg(struct hnae3_handle *handle)
1932 struct hclge_vport *vport = hclge_get_vport(handle);
1933 struct hclge_dev *hdev = vport->back;
1934 struct phy_device *phydev = hdev->hw.mac.phydev;
1937 return phydev->autoneg;
1939 return hdev->hw.mac.autoneg;
1942 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
1946 struct hclge_mac_vlan_mask_entry_cmd *req;
1947 struct hclge_desc desc;
1950 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
1951 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
1953 hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
1955 ether_addr_copy(req->mac_mask, mac_mask);
1957 status = hclge_cmd_send(&hdev->hw, &desc, 1);
1959 dev_err(&hdev->pdev->dev,
1960 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
1966 static int hclge_mac_init(struct hclge_dev *hdev)
1968 struct hnae3_handle *handle = &hdev->vport[0].nic;
1969 struct net_device *netdev = handle->kinfo.netdev;
1970 struct hclge_mac *mac = &hdev->hw.mac;
1971 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1972 struct hclge_vport *vport;
1977 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
1978 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
1979 hdev->hw.mac.duplex);
1981 dev_err(&hdev->pdev->dev,
1982 "Config mac speed dup fail ret=%d\n", ret);
1988 /* Initialize the MTA table work mode */
1989 hdev->enable_mta = true;
1990 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
1992 ret = hclge_set_mta_filter_mode(hdev,
1993 hdev->mta_mac_sel_type,
1996 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2001 for (i = 0; i < hdev->num_alloc_vport; i++) {
2002 vport = &hdev->vport[i];
2003 vport->accept_mta_mc = false;
2005 memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
2006 ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
2008 dev_err(&hdev->pdev->dev,
2009 "set mta filter mode fail ret=%d\n", ret);
2014 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2016 dev_err(&hdev->pdev->dev,
2017 "set default mac_vlan_mask fail ret=%d\n", ret);
2026 ret = hclge_set_mtu(handle, mtu);
2028 dev_err(&hdev->pdev->dev,
2029 "set mtu failed ret=%d\n", ret);
2034 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2036 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2037 schedule_work(&hdev->mbx_service_task);
2040 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2042 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2043 schedule_work(&hdev->rst_service_task);
2046 static void hclge_task_schedule(struct hclge_dev *hdev)
2048 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2049 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2050 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2051 (void)schedule_work(&hdev->service_task);
2054 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2056 struct hclge_link_status_cmd *req;
2057 struct hclge_desc desc;
2061 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2062 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2064 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2069 req = (struct hclge_link_status_cmd *)desc.data;
2070 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2072 return !!link_status;
2075 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2080 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2083 mac_state = hclge_get_mac_link_status(hdev);
2085 if (hdev->hw.mac.phydev) {
2086 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2087 link_stat = mac_state &
2088 hdev->hw.mac.phydev->link;
2093 link_stat = mac_state;
2099 static void hclge_update_link_status(struct hclge_dev *hdev)
2101 struct hnae3_client *client = hdev->nic_client;
2102 struct hnae3_handle *handle;
2108 state = hclge_get_mac_phy_link(hdev);
2109 if (state != hdev->hw.mac.link) {
2110 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2111 handle = &hdev->vport[i].nic;
2112 client->ops->link_status_change(handle, state);
2114 hdev->hw.mac.link = state;
2118 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2120 struct hclge_mac mac = hdev->hw.mac;
2125 /* get the speed and duplex as autoneg'result from mac cmd when phy
2128 if (mac.phydev || !mac.autoneg)
2131 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2133 dev_err(&hdev->pdev->dev,
2134 "mac autoneg/speed/duplex query failed %d\n", ret);
2138 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2140 dev_err(&hdev->pdev->dev,
2141 "mac speed/duplex config failed %d\n", ret);
2148 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2150 struct hclge_vport *vport = hclge_get_vport(handle);
2151 struct hclge_dev *hdev = vport->back;
2153 return hclge_update_speed_duplex(hdev);
2156 static int hclge_get_status(struct hnae3_handle *handle)
2158 struct hclge_vport *vport = hclge_get_vport(handle);
2159 struct hclge_dev *hdev = vport->back;
2161 hclge_update_link_status(hdev);
2163 return hdev->hw.mac.link;
2166 static void hclge_service_timer(struct timer_list *t)
2168 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2170 mod_timer(&hdev->service_timer, jiffies + HZ);
2171 hdev->hw_stats.stats_timer++;
2172 hclge_task_schedule(hdev);
2175 static void hclge_service_complete(struct hclge_dev *hdev)
2177 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2179 /* Flush memory before next watchdog */
2180 smp_mb__before_atomic();
2181 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2184 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2189 /* fetch the events from their corresponding regs */
2190 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2191 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2193 /* Assumption: If by any chance reset and mailbox events are reported
2194 * together then we will only process reset event in this go and will
2195 * defer the processing of the mailbox events. Since, we would have not
2196 * cleared RX CMDQ event this time we would receive again another
2197 * interrupt from H/W just for the mailbox.
2200 /* check for vector0 reset event sources */
2201 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2202 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2203 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2204 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2205 return HCLGE_VECTOR0_EVENT_RST;
2208 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2209 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2210 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2211 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2212 return HCLGE_VECTOR0_EVENT_RST;
2215 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2216 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2217 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2218 return HCLGE_VECTOR0_EVENT_RST;
2221 /* check for vector0 mailbox(=CMDQ RX) event source */
2222 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2223 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2224 *clearval = cmdq_src_reg;
2225 return HCLGE_VECTOR0_EVENT_MBX;
2228 return HCLGE_VECTOR0_EVENT_OTHER;
2231 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2234 switch (event_type) {
2235 case HCLGE_VECTOR0_EVENT_RST:
2236 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2238 case HCLGE_VECTOR0_EVENT_MBX:
2239 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2246 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2248 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2249 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2250 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2251 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2252 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2255 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2257 writel(enable ? 1 : 0, vector->addr);
2260 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2262 struct hclge_dev *hdev = data;
2266 hclge_enable_vector(&hdev->misc_vector, false);
2267 event_cause = hclge_check_event_cause(hdev, &clearval);
2269 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2270 switch (event_cause) {
2271 case HCLGE_VECTOR0_EVENT_RST:
2272 hclge_reset_task_schedule(hdev);
2274 case HCLGE_VECTOR0_EVENT_MBX:
2275 /* If we are here then,
2276 * 1. Either we are not handling any mbx task and we are not
2279 * 2. We could be handling a mbx task but nothing more is
2281 * In both cases, we should schedule mbx task as there are more
2282 * mbx messages reported by this interrupt.
2284 hclge_mbx_task_schedule(hdev);
2287 dev_warn(&hdev->pdev->dev,
2288 "received unknown or unhandled event of vector0\n");
2292 /* clear the source of interrupt if it is not cause by reset */
2293 if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
2294 hclge_clear_event_cause(hdev, event_cause, clearval);
2295 hclge_enable_vector(&hdev->misc_vector, true);
2301 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2303 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2304 dev_warn(&hdev->pdev->dev,
2305 "vector(vector_id %d) has been freed.\n", vector_id);
2309 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2310 hdev->num_msi_left += 1;
2311 hdev->num_msi_used -= 1;
2314 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2316 struct hclge_misc_vector *vector = &hdev->misc_vector;
2318 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2320 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2321 hdev->vector_status[0] = 0;
2323 hdev->num_msi_left -= 1;
2324 hdev->num_msi_used += 1;
2327 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2331 hclge_get_misc_vector(hdev);
2333 /* this would be explicitly freed in the end */
2334 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2335 0, "hclge_misc", hdev);
2337 hclge_free_vector(hdev, 0);
2338 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2339 hdev->misc_vector.vector_irq);
2345 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2347 free_irq(hdev->misc_vector.vector_irq, hdev);
2348 hclge_free_vector(hdev, 0);
2351 static int hclge_notify_client(struct hclge_dev *hdev,
2352 enum hnae3_reset_notify_type type)
2354 struct hnae3_client *client = hdev->nic_client;
2357 if (!client->ops->reset_notify)
2360 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2361 struct hnae3_handle *handle = &hdev->vport[i].nic;
2364 ret = client->ops->reset_notify(handle, type);
2372 static int hclge_reset_wait(struct hclge_dev *hdev)
2374 #define HCLGE_RESET_WATI_MS 100
2375 #define HCLGE_RESET_WAIT_CNT 5
2376 u32 val, reg, reg_bit;
2379 switch (hdev->reset_type) {
2380 case HNAE3_GLOBAL_RESET:
2381 reg = HCLGE_GLOBAL_RESET_REG;
2382 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2384 case HNAE3_CORE_RESET:
2385 reg = HCLGE_GLOBAL_RESET_REG;
2386 reg_bit = HCLGE_CORE_RESET_BIT;
2388 case HNAE3_FUNC_RESET:
2389 reg = HCLGE_FUN_RST_ING;
2390 reg_bit = HCLGE_FUN_RST_ING_B;
2393 dev_err(&hdev->pdev->dev,
2394 "Wait for unsupported reset type: %d\n",
2399 val = hclge_read_dev(&hdev->hw, reg);
2400 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2401 msleep(HCLGE_RESET_WATI_MS);
2402 val = hclge_read_dev(&hdev->hw, reg);
2406 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2407 dev_warn(&hdev->pdev->dev,
2408 "Wait for reset timeout: %d\n", hdev->reset_type);
2415 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2417 struct hclge_desc desc;
2418 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2421 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2422 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2423 req->fun_reset_vfid = func_id;
2425 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2427 dev_err(&hdev->pdev->dev,
2428 "send function reset cmd fail, status =%d\n", ret);
2433 static void hclge_do_reset(struct hclge_dev *hdev)
2435 struct pci_dev *pdev = hdev->pdev;
2438 switch (hdev->reset_type) {
2439 case HNAE3_GLOBAL_RESET:
2440 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2441 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2442 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2443 dev_info(&pdev->dev, "Global Reset requested\n");
2445 case HNAE3_CORE_RESET:
2446 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2447 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2448 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2449 dev_info(&pdev->dev, "Core Reset requested\n");
2451 case HNAE3_FUNC_RESET:
2452 dev_info(&pdev->dev, "PF Reset requested\n");
2453 hclge_func_reset_cmd(hdev, 0);
2454 /* schedule again to check later */
2455 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2456 hclge_reset_task_schedule(hdev);
2459 dev_warn(&pdev->dev,
2460 "Unsupported reset type: %d\n", hdev->reset_type);
2465 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2466 unsigned long *addr)
2468 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2470 /* return the highest priority reset level amongst all */
2471 if (test_bit(HNAE3_GLOBAL_RESET, addr))
2472 rst_level = HNAE3_GLOBAL_RESET;
2473 else if (test_bit(HNAE3_CORE_RESET, addr))
2474 rst_level = HNAE3_CORE_RESET;
2475 else if (test_bit(HNAE3_IMP_RESET, addr))
2476 rst_level = HNAE3_IMP_RESET;
2477 else if (test_bit(HNAE3_FUNC_RESET, addr))
2478 rst_level = HNAE3_FUNC_RESET;
2480 /* now, clear all other resets */
2481 clear_bit(HNAE3_GLOBAL_RESET, addr);
2482 clear_bit(HNAE3_CORE_RESET, addr);
2483 clear_bit(HNAE3_IMP_RESET, addr);
2484 clear_bit(HNAE3_FUNC_RESET, addr);
2489 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2493 switch (hdev->reset_type) {
2494 case HNAE3_IMP_RESET:
2495 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2497 case HNAE3_GLOBAL_RESET:
2498 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2500 case HNAE3_CORE_RESET:
2501 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2510 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2511 hclge_enable_vector(&hdev->misc_vector, true);
2514 static void hclge_reset(struct hclge_dev *hdev)
2516 struct hnae3_handle *handle;
2518 /* perform reset of the stack & ae device for a client */
2519 handle = &hdev->vport[0].nic;
2521 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2523 if (!hclge_reset_wait(hdev)) {
2524 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2525 hclge_reset_ae_dev(hdev->ae_dev);
2526 hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2528 hclge_clear_reset_cause(hdev);
2530 /* schedule again to check pending resets later */
2531 set_bit(hdev->reset_type, &hdev->reset_pending);
2532 hclge_reset_task_schedule(hdev);
2535 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2536 handle->last_reset_time = jiffies;
2540 static void hclge_reset_event(struct hnae3_handle *handle)
2542 struct hclge_vport *vport = hclge_get_vport(handle);
2543 struct hclge_dev *hdev = vport->back;
2545 /* check if this is a new reset request and we are not here just because
2546 * last reset attempt did not succeed and watchdog hit us again. We will
2547 * know this if last reset request did not occur very recently (watchdog
2548 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2549 * In case of new request we reset the "reset level" to PF reset.
2550 * And if it is a repeat reset request of the most recent one then we
2551 * want to make sure we throttle the reset request. Therefore, we will
2552 * not allow it again before 3*HZ times.
2554 if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
2556 else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
2557 handle->reset_level = HNAE3_FUNC_RESET;
2559 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2560 handle->reset_level);
2562 /* request reset & schedule reset task */
2563 set_bit(handle->reset_level, &hdev->reset_request);
2564 hclge_reset_task_schedule(hdev);
2566 if (handle->reset_level < HNAE3_GLOBAL_RESET)
2567 handle->reset_level++;
2570 static void hclge_reset_subtask(struct hclge_dev *hdev)
2572 /* check if there is any ongoing reset in the hardware. This status can
2573 * be checked from reset_pending. If there is then, we need to wait for
2574 * hardware to complete reset.
2575 * a. If we are able to figure out in reasonable time that hardware
2576 * has fully resetted then, we can proceed with driver, client
2578 * b. else, we can come back later to check this status so re-sched
2581 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2582 if (hdev->reset_type != HNAE3_NONE_RESET)
2585 /* check if we got any *new* reset requests to be honored */
2586 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2587 if (hdev->reset_type != HNAE3_NONE_RESET)
2588 hclge_do_reset(hdev);
2590 hdev->reset_type = HNAE3_NONE_RESET;
2593 static void hclge_reset_service_task(struct work_struct *work)
2595 struct hclge_dev *hdev =
2596 container_of(work, struct hclge_dev, rst_service_task);
2598 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2601 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2603 hclge_reset_subtask(hdev);
2605 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2608 static void hclge_mailbox_service_task(struct work_struct *work)
2610 struct hclge_dev *hdev =
2611 container_of(work, struct hclge_dev, mbx_service_task);
2613 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2616 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2618 hclge_mbx_handler(hdev);
2620 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2623 static void hclge_service_task(struct work_struct *work)
2625 struct hclge_dev *hdev =
2626 container_of(work, struct hclge_dev, service_task);
2628 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2629 hclge_update_stats_for_all(hdev);
2630 hdev->hw_stats.stats_timer = 0;
2633 hclge_update_speed_duplex(hdev);
2634 hclge_update_link_status(hdev);
2635 hclge_service_complete(hdev);
2638 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2640 /* VF handle has no client */
2641 if (!handle->client)
2642 return container_of(handle, struct hclge_vport, nic);
2643 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2644 return container_of(handle, struct hclge_vport, roce);
2646 return container_of(handle, struct hclge_vport, nic);
2649 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2650 struct hnae3_vector_info *vector_info)
2652 struct hclge_vport *vport = hclge_get_vport(handle);
2653 struct hnae3_vector_info *vector = vector_info;
2654 struct hclge_dev *hdev = vport->back;
2658 vector_num = min(hdev->num_msi_left, vector_num);
2660 for (j = 0; j < vector_num; j++) {
2661 for (i = 1; i < hdev->num_msi; i++) {
2662 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2663 vector->vector = pci_irq_vector(hdev->pdev, i);
2664 vector->io_addr = hdev->hw.io_base +
2665 HCLGE_VECTOR_REG_BASE +
2666 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2668 HCLGE_VECTOR_VF_OFFSET;
2669 hdev->vector_status[i] = vport->vport_id;
2670 hdev->vector_irq[i] = vector->vector;
2679 hdev->num_msi_left -= alloc;
2680 hdev->num_msi_used += alloc;
2685 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2689 for (i = 0; i < hdev->num_msi; i++)
2690 if (vector == hdev->vector_irq[i])
2696 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
2698 struct hclge_vport *vport = hclge_get_vport(handle);
2699 struct hclge_dev *hdev = vport->back;
2702 vector_id = hclge_get_vector_index(hdev, vector);
2703 if (vector_id < 0) {
2704 dev_err(&hdev->pdev->dev,
2705 "Get vector index fail. vector_id =%d\n", vector_id);
2709 hclge_free_vector(hdev, vector_id);
2714 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2716 return HCLGE_RSS_KEY_SIZE;
2719 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2721 return HCLGE_RSS_IND_TBL_SIZE;
2724 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2725 const u8 hfunc, const u8 *key)
2727 struct hclge_rss_config_cmd *req;
2728 struct hclge_desc desc;
2733 req = (struct hclge_rss_config_cmd *)desc.data;
2735 for (key_offset = 0; key_offset < 3; key_offset++) {
2736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2739 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2740 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2742 if (key_offset == 2)
2744 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2746 key_size = HCLGE_RSS_HASH_KEY_NUM;
2748 memcpy(req->hash_key,
2749 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2751 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2753 dev_err(&hdev->pdev->dev,
2754 "Configure RSS config fail, status = %d\n",
2762 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
2764 struct hclge_rss_indirection_table_cmd *req;
2765 struct hclge_desc desc;
2769 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
2771 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2772 hclge_cmd_setup_basic_desc
2773 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2775 req->start_table_index =
2776 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
2777 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
2779 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2780 req->rss_result[j] =
2781 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2783 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2785 dev_err(&hdev->pdev->dev,
2786 "Configure rss indir table fail,status = %d\n",
2794 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2795 u16 *tc_size, u16 *tc_offset)
2797 struct hclge_rss_tc_mode_cmd *req;
2798 struct hclge_desc desc;
2802 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2803 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
2805 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2808 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
2809 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
2810 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2811 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
2812 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2814 req->rss_tc_mode[i] = cpu_to_le16(mode);
2817 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2819 dev_err(&hdev->pdev->dev,
2820 "Configure rss tc mode fail, status = %d\n", ret);
2825 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2827 struct hclge_rss_input_tuple_cmd *req;
2828 struct hclge_desc desc;
2831 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2833 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2835 /* Get the tuple cfg from pf */
2836 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
2837 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
2838 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
2839 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
2840 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
2841 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
2842 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
2843 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
2844 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2846 dev_err(&hdev->pdev->dev,
2847 "Configure rss input fail, status = %d\n", ret);
2851 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2854 struct hclge_vport *vport = hclge_get_vport(handle);
2857 /* Get hash algorithm */
2859 *hfunc = vport->rss_algo;
2861 /* Get the RSS Key required by the user */
2863 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2865 /* Get indirect table */
2867 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2868 indir[i] = vport->rss_indirection_tbl[i];
2873 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2874 const u8 *key, const u8 hfunc)
2876 struct hclge_vport *vport = hclge_get_vport(handle);
2877 struct hclge_dev *hdev = vport->back;
2881 /* Set the RSS Hash Key if specififed by the user */
2884 if (hfunc == ETH_RSS_HASH_TOP ||
2885 hfunc == ETH_RSS_HASH_NO_CHANGE)
2886 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2889 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2893 /* Update the shadow RSS key with user specified qids */
2894 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2895 vport->rss_algo = hash_algo;
2898 /* Update the shadow RSS table with user specified qids */
2899 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2900 vport->rss_indirection_tbl[i] = indir[i];
2902 /* Update the hardware */
2903 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
2906 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
2908 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
2910 if (nfc->data & RXH_L4_B_2_3)
2911 hash_sets |= HCLGE_D_PORT_BIT;
2913 hash_sets &= ~HCLGE_D_PORT_BIT;
2915 if (nfc->data & RXH_IP_SRC)
2916 hash_sets |= HCLGE_S_IP_BIT;
2918 hash_sets &= ~HCLGE_S_IP_BIT;
2920 if (nfc->data & RXH_IP_DST)
2921 hash_sets |= HCLGE_D_IP_BIT;
2923 hash_sets &= ~HCLGE_D_IP_BIT;
2925 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
2926 hash_sets |= HCLGE_V_TAG_BIT;
2931 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
2932 struct ethtool_rxnfc *nfc)
2934 struct hclge_vport *vport = hclge_get_vport(handle);
2935 struct hclge_dev *hdev = vport->back;
2936 struct hclge_rss_input_tuple_cmd *req;
2937 struct hclge_desc desc;
2941 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2942 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2945 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2946 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2948 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
2949 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
2950 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
2951 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
2952 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
2953 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
2954 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
2955 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
2957 tuple_sets = hclge_get_rss_hash_bits(nfc);
2958 switch (nfc->flow_type) {
2960 req->ipv4_tcp_en = tuple_sets;
2963 req->ipv6_tcp_en = tuple_sets;
2966 req->ipv4_udp_en = tuple_sets;
2969 req->ipv6_udp_en = tuple_sets;
2972 req->ipv4_sctp_en = tuple_sets;
2975 if ((nfc->data & RXH_L4_B_0_1) ||
2976 (nfc->data & RXH_L4_B_2_3))
2979 req->ipv6_sctp_en = tuple_sets;
2982 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2985 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2991 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2993 dev_err(&hdev->pdev->dev,
2994 "Set rss tuple fail, status = %d\n", ret);
2998 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
2999 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3000 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3001 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3002 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3003 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3004 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3005 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3009 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3010 struct ethtool_rxnfc *nfc)
3012 struct hclge_vport *vport = hclge_get_vport(handle);
3017 switch (nfc->flow_type) {
3019 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3022 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3025 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3028 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3031 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3034 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3038 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3047 if (tuple_sets & HCLGE_D_PORT_BIT)
3048 nfc->data |= RXH_L4_B_2_3;
3049 if (tuple_sets & HCLGE_S_PORT_BIT)
3050 nfc->data |= RXH_L4_B_0_1;
3051 if (tuple_sets & HCLGE_D_IP_BIT)
3052 nfc->data |= RXH_IP_DST;
3053 if (tuple_sets & HCLGE_S_IP_BIT)
3054 nfc->data |= RXH_IP_SRC;
3059 static int hclge_get_tc_size(struct hnae3_handle *handle)
3061 struct hclge_vport *vport = hclge_get_vport(handle);
3062 struct hclge_dev *hdev = vport->back;
3064 return hdev->rss_size_max;
3067 int hclge_rss_init_hw(struct hclge_dev *hdev)
3069 struct hclge_vport *vport = hdev->vport;
3070 u8 *rss_indir = vport[0].rss_indirection_tbl;
3071 u16 rss_size = vport[0].alloc_rss_size;
3072 u8 *key = vport[0].rss_hash_key;
3073 u8 hfunc = vport[0].rss_algo;
3074 u16 tc_offset[HCLGE_MAX_TC_NUM];
3075 u16 tc_valid[HCLGE_MAX_TC_NUM];
3076 u16 tc_size[HCLGE_MAX_TC_NUM];
3080 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3084 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3088 ret = hclge_set_rss_input_tuple(hdev);
3092 /* Each TC have the same queue size, and tc_size set to hardware is
3093 * the log2 of roundup power of two of rss_size, the acutal queue
3094 * size is limited by indirection table.
3096 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3097 dev_err(&hdev->pdev->dev,
3098 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3103 roundup_size = roundup_pow_of_two(rss_size);
3104 roundup_size = ilog2(roundup_size);
3106 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3109 if (!(hdev->hw_tc_map & BIT(i)))
3113 tc_size[i] = roundup_size;
3114 tc_offset[i] = rss_size * i;
3117 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3120 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3122 struct hclge_vport *vport = hdev->vport;
3125 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3126 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3127 vport[j].rss_indirection_tbl[i] =
3128 i % vport[j].alloc_rss_size;
3132 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3134 struct hclge_vport *vport = hdev->vport;
3137 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3138 vport[i].rss_tuple_sets.ipv4_tcp_en =
3139 HCLGE_RSS_INPUT_TUPLE_OTHER;
3140 vport[i].rss_tuple_sets.ipv4_udp_en =
3141 HCLGE_RSS_INPUT_TUPLE_OTHER;
3142 vport[i].rss_tuple_sets.ipv4_sctp_en =
3143 HCLGE_RSS_INPUT_TUPLE_SCTP;
3144 vport[i].rss_tuple_sets.ipv4_fragment_en =
3145 HCLGE_RSS_INPUT_TUPLE_OTHER;
3146 vport[i].rss_tuple_sets.ipv6_tcp_en =
3147 HCLGE_RSS_INPUT_TUPLE_OTHER;
3148 vport[i].rss_tuple_sets.ipv6_udp_en =
3149 HCLGE_RSS_INPUT_TUPLE_OTHER;
3150 vport[i].rss_tuple_sets.ipv6_sctp_en =
3151 HCLGE_RSS_INPUT_TUPLE_SCTP;
3152 vport[i].rss_tuple_sets.ipv6_fragment_en =
3153 HCLGE_RSS_INPUT_TUPLE_OTHER;
3155 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3157 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3160 hclge_rss_indir_init_cfg(hdev);
3163 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3164 int vector_id, bool en,
3165 struct hnae3_ring_chain_node *ring_chain)
3167 struct hclge_dev *hdev = vport->back;
3168 struct hnae3_ring_chain_node *node;
3169 struct hclge_desc desc;
3170 struct hclge_ctrl_vector_chain_cmd *req
3171 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3172 enum hclge_cmd_status status;
3173 enum hclge_opcode_type op;
3174 u16 tqp_type_and_id;
3177 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3178 hclge_cmd_setup_basic_desc(&desc, op, false);
3179 req->int_vector_id = vector_id;
3182 for (node = ring_chain; node; node = node->next) {
3183 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3184 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3186 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3187 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3188 HCLGE_TQP_ID_S, node->tqp_index);
3189 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3191 hnae3_get_field(node->int_gl_idx,
3192 HNAE3_RING_GL_IDX_M,
3193 HNAE3_RING_GL_IDX_S));
3194 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3195 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3196 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3197 req->vfid = vport->vport_id;
3199 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3201 dev_err(&hdev->pdev->dev,
3202 "Map TQP fail, status is %d.\n",
3208 hclge_cmd_setup_basic_desc(&desc,
3211 req->int_vector_id = vector_id;
3216 req->int_cause_num = i;
3217 req->vfid = vport->vport_id;
3218 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3220 dev_err(&hdev->pdev->dev,
3221 "Map TQP fail, status is %d.\n", status);
3229 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3231 struct hnae3_ring_chain_node *ring_chain)
3233 struct hclge_vport *vport = hclge_get_vport(handle);
3234 struct hclge_dev *hdev = vport->back;
3237 vector_id = hclge_get_vector_index(hdev, vector);
3238 if (vector_id < 0) {
3239 dev_err(&hdev->pdev->dev,
3240 "Get vector index fail. vector_id =%d\n", vector_id);
3244 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3247 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3249 struct hnae3_ring_chain_node *ring_chain)
3251 struct hclge_vport *vport = hclge_get_vport(handle);
3252 struct hclge_dev *hdev = vport->back;
3255 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3258 vector_id = hclge_get_vector_index(hdev, vector);
3259 if (vector_id < 0) {
3260 dev_err(&handle->pdev->dev,
3261 "Get vector index fail. ret =%d\n", vector_id);
3265 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3267 dev_err(&handle->pdev->dev,
3268 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3275 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3276 struct hclge_promisc_param *param)
3278 struct hclge_promisc_cfg_cmd *req;
3279 struct hclge_desc desc;
3282 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3284 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3285 req->vf_id = param->vf_id;
3287 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3288 * pdev revision(0x20), new revision support them. The
3289 * value of this two fields will not return error when driver
3290 * send command to fireware in revision(0x20).
3292 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3293 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3295 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3297 dev_err(&hdev->pdev->dev,
3298 "Set promisc mode fail, status is %d.\n", ret);
3303 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3304 bool en_mc, bool en_bc, int vport_id)
3309 memset(param, 0, sizeof(struct hclge_promisc_param));
3311 param->enable = HCLGE_PROMISC_EN_UC;
3313 param->enable |= HCLGE_PROMISC_EN_MC;
3315 param->enable |= HCLGE_PROMISC_EN_BC;
3316 param->vf_id = vport_id;
3319 static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3322 struct hclge_vport *vport = hclge_get_vport(handle);
3323 struct hclge_dev *hdev = vport->back;
3324 struct hclge_promisc_param param;
3326 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true,
3328 hclge_cmd_set_promisc_mode(hdev, ¶m);
3331 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3333 struct hclge_get_fd_mode_cmd *req;
3334 struct hclge_desc desc;
3337 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3339 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3341 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3343 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3347 *fd_mode = req->mode;
3352 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3353 u32 *stage1_entry_num,
3354 u32 *stage2_entry_num,
3355 u16 *stage1_counter_num,
3356 u16 *stage2_counter_num)
3358 struct hclge_get_fd_allocation_cmd *req;
3359 struct hclge_desc desc;
3362 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3364 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3366 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3368 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3373 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3374 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3375 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3376 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3381 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3383 struct hclge_set_fd_key_config_cmd *req;
3384 struct hclge_fd_key_cfg *stage;
3385 struct hclge_desc desc;
3388 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3390 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3391 stage = &hdev->fd_cfg.key_cfg[stage_num];
3392 req->stage = stage_num;
3393 req->key_select = stage->key_sel;
3394 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3395 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3396 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3397 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3398 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3399 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3401 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3403 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3408 static int hclge_init_fd_config(struct hclge_dev *hdev)
3410 #define LOW_2_WORDS 0x03
3411 struct hclge_fd_key_cfg *key_cfg;
3414 if (!hnae3_dev_fd_supported(hdev))
3417 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3421 switch (hdev->fd_cfg.fd_mode) {
3422 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3423 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3425 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3426 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3429 dev_err(&hdev->pdev->dev,
3430 "Unsupported flow director mode %d\n",
3431 hdev->fd_cfg.fd_mode);
3435 hdev->fd_cfg.fd_en = true;
3436 hdev->fd_cfg.proto_support =
3437 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3438 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3439 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3440 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3441 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3442 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3443 key_cfg->outer_sipv6_word_en = 0;
3444 key_cfg->outer_dipv6_word_en = 0;
3446 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3447 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3448 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3449 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3451 /* If use max 400bit key, we can support tuples for ether type */
3452 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3453 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3454 key_cfg->tuple_active |=
3455 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3458 /* roce_type is used to filter roce frames
3459 * dst_vport is used to specify the rule
3461 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3463 ret = hclge_get_fd_allocation(hdev,
3464 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3465 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3466 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3467 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3471 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3474 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3475 int loc, u8 *key, bool is_add)
3477 struct hclge_fd_tcam_config_1_cmd *req1;
3478 struct hclge_fd_tcam_config_2_cmd *req2;
3479 struct hclge_fd_tcam_config_3_cmd *req3;
3480 struct hclge_desc desc[3];
3483 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3484 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3485 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3486 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3487 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3489 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3490 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3491 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3493 req1->stage = stage;
3494 req1->xy_sel = sel_x ? 1 : 0;
3495 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3496 req1->index = cpu_to_le32(loc);
3497 req1->entry_vld = sel_x ? is_add : 0;
3500 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3501 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3502 sizeof(req2->tcam_data));
3503 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3504 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3507 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3509 dev_err(&hdev->pdev->dev,
3510 "config tcam key fail, ret=%d\n",
3516 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3517 struct hclge_fd_ad_data *action)
3519 struct hclge_fd_ad_config_cmd *req;
3520 struct hclge_desc desc;
3524 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3526 req = (struct hclge_fd_ad_config_cmd *)desc.data;
3527 req->index = cpu_to_le32(loc);
3530 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3531 action->write_rule_id_to_bd);
3532 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3535 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3536 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3537 action->forward_to_direct_queue);
3538 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3540 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3541 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3542 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3543 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3544 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3545 action->counter_id);
3547 req->ad_data = cpu_to_le64(ad_data);
3548 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3550 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3555 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3556 struct hclge_fd_rule *rule)
3558 u16 tmp_x_s, tmp_y_s;
3559 u32 tmp_x_l, tmp_y_l;
3562 if (rule->unused_tuple & tuple_bit)
3565 switch (tuple_bit) {
3568 case BIT(INNER_DST_MAC):
3569 for (i = 0; i < 6; i++) {
3570 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
3571 rule->tuples_mask.dst_mac[i]);
3572 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
3573 rule->tuples_mask.dst_mac[i]);
3577 case BIT(INNER_SRC_MAC):
3578 for (i = 0; i < 6; i++) {
3579 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
3580 rule->tuples.src_mac[i]);
3581 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
3582 rule->tuples.src_mac[i]);
3586 case BIT(INNER_VLAN_TAG_FST):
3587 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
3588 rule->tuples_mask.vlan_tag1);
3589 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
3590 rule->tuples_mask.vlan_tag1);
3591 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3592 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3595 case BIT(INNER_ETH_TYPE):
3596 calc_x(tmp_x_s, rule->tuples.ether_proto,
3597 rule->tuples_mask.ether_proto);
3598 calc_y(tmp_y_s, rule->tuples.ether_proto,
3599 rule->tuples_mask.ether_proto);
3600 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3601 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3604 case BIT(INNER_IP_TOS):
3605 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3606 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3609 case BIT(INNER_IP_PROTO):
3610 calc_x(*key_x, rule->tuples.ip_proto,
3611 rule->tuples_mask.ip_proto);
3612 calc_y(*key_y, rule->tuples.ip_proto,
3613 rule->tuples_mask.ip_proto);
3616 case BIT(INNER_SRC_IP):
3617 calc_x(tmp_x_l, rule->tuples.src_ip[3],
3618 rule->tuples_mask.src_ip[3]);
3619 calc_y(tmp_y_l, rule->tuples.src_ip[3],
3620 rule->tuples_mask.src_ip[3]);
3621 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3622 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3625 case BIT(INNER_DST_IP):
3626 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
3627 rule->tuples_mask.dst_ip[3]);
3628 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
3629 rule->tuples_mask.dst_ip[3]);
3630 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3631 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3634 case BIT(INNER_SRC_PORT):
3635 calc_x(tmp_x_s, rule->tuples.src_port,
3636 rule->tuples_mask.src_port);
3637 calc_y(tmp_y_s, rule->tuples.src_port,
3638 rule->tuples_mask.src_port);
3639 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3640 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3643 case BIT(INNER_DST_PORT):
3644 calc_x(tmp_x_s, rule->tuples.dst_port,
3645 rule->tuples_mask.dst_port);
3646 calc_y(tmp_y_s, rule->tuples.dst_port,
3647 rule->tuples_mask.dst_port);
3648 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3649 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3657 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
3658 u8 vf_id, u8 network_port_id)
3660 u32 port_number = 0;
3662 if (port_type == HOST_PORT) {
3663 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
3665 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
3667 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
3669 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
3670 HCLGE_NETWORK_PORT_ID_S, network_port_id);
3671 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
3677 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
3678 __le32 *key_x, __le32 *key_y,
3679 struct hclge_fd_rule *rule)
3681 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
3682 u8 cur_pos = 0, tuple_size, shift_bits;
3685 for (i = 0; i < MAX_META_DATA; i++) {
3686 tuple_size = meta_data_key_info[i].key_length;
3687 tuple_bit = key_cfg->meta_data_active & BIT(i);
3689 switch (tuple_bit) {
3690 case BIT(ROCE_TYPE):
3691 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
3692 cur_pos += tuple_size;
3694 case BIT(DST_VPORT):
3695 port_number = hclge_get_port_number(HOST_PORT, 0,
3697 hnae3_set_field(meta_data,
3698 GENMASK(cur_pos + tuple_size, cur_pos),
3699 cur_pos, port_number);
3700 cur_pos += tuple_size;
3707 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
3708 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
3709 shift_bits = sizeof(meta_data) * 8 - cur_pos;
3711 *key_x = cpu_to_le32(tmp_x << shift_bits);
3712 *key_y = cpu_to_le32(tmp_y << shift_bits);
3715 /* A complete key is combined with meta data key and tuple key.
3716 * Meta data key is stored at the MSB region, and tuple key is stored at
3717 * the LSB region, unused bits will be filled 0.
3719 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
3720 struct hclge_fd_rule *rule)
3722 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
3723 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
3724 u8 *cur_key_x, *cur_key_y;
3725 int i, ret, tuple_size;
3726 u8 meta_data_region;
3728 memset(key_x, 0, sizeof(key_x));
3729 memset(key_y, 0, sizeof(key_y));
3733 for (i = 0 ; i < MAX_TUPLE; i++) {
3737 tuple_size = tuple_key_info[i].key_length / 8;
3738 check_tuple = key_cfg->tuple_active & BIT(i);
3740 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
3743 cur_key_x += tuple_size;
3744 cur_key_y += tuple_size;
3748 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
3749 MAX_META_DATA_LENGTH / 8;
3751 hclge_fd_convert_meta_data(key_cfg,
3752 (__le32 *)(key_x + meta_data_region),
3753 (__le32 *)(key_y + meta_data_region),
3756 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
3759 dev_err(&hdev->pdev->dev,
3760 "fd key_y config fail, loc=%d, ret=%d\n",
3761 rule->queue_id, ret);
3765 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
3768 dev_err(&hdev->pdev->dev,
3769 "fd key_x config fail, loc=%d, ret=%d\n",
3770 rule->queue_id, ret);
3774 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
3775 struct hclge_fd_rule *rule)
3777 struct hclge_fd_ad_data ad_data;
3779 ad_data.ad_id = rule->location;
3781 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
3782 ad_data.drop_packet = true;
3783 ad_data.forward_to_direct_queue = false;
3784 ad_data.queue_id = 0;
3786 ad_data.drop_packet = false;
3787 ad_data.forward_to_direct_queue = true;
3788 ad_data.queue_id = rule->queue_id;
3791 ad_data.use_counter = false;
3792 ad_data.counter_id = 0;
3794 ad_data.use_next_stage = false;
3795 ad_data.next_input_key = 0;
3797 ad_data.write_rule_id_to_bd = true;
3798 ad_data.rule_id = rule->location;
3800 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
3803 static int hclge_fd_check_spec(struct hclge_dev *hdev,
3804 struct ethtool_rx_flow_spec *fs, u32 *unused)
3806 struct ethtool_tcpip4_spec *tcp_ip4_spec;
3807 struct ethtool_usrip4_spec *usr_ip4_spec;
3808 struct ethtool_tcpip6_spec *tcp_ip6_spec;
3809 struct ethtool_usrip6_spec *usr_ip6_spec;
3810 struct ethhdr *ether_spec;
3812 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
3815 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
3818 if ((fs->flow_type & FLOW_EXT) &&
3819 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
3820 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
3824 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
3828 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
3829 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
3831 if (!tcp_ip4_spec->ip4src)
3832 *unused |= BIT(INNER_SRC_IP);
3834 if (!tcp_ip4_spec->ip4dst)
3835 *unused |= BIT(INNER_DST_IP);
3837 if (!tcp_ip4_spec->psrc)
3838 *unused |= BIT(INNER_SRC_PORT);
3840 if (!tcp_ip4_spec->pdst)
3841 *unused |= BIT(INNER_DST_PORT);
3843 if (!tcp_ip4_spec->tos)
3844 *unused |= BIT(INNER_IP_TOS);
3848 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
3849 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
3850 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3852 if (!usr_ip4_spec->ip4src)
3853 *unused |= BIT(INNER_SRC_IP);
3855 if (!usr_ip4_spec->ip4dst)
3856 *unused |= BIT(INNER_DST_IP);
3858 if (!usr_ip4_spec->tos)
3859 *unused |= BIT(INNER_IP_TOS);
3861 if (!usr_ip4_spec->proto)
3862 *unused |= BIT(INNER_IP_PROTO);
3864 if (usr_ip4_spec->l4_4_bytes)
3867 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
3874 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
3875 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
3878 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
3879 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
3880 *unused |= BIT(INNER_SRC_IP);
3882 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
3883 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
3884 *unused |= BIT(INNER_DST_IP);
3886 if (!tcp_ip6_spec->psrc)
3887 *unused |= BIT(INNER_SRC_PORT);
3889 if (!tcp_ip6_spec->pdst)
3890 *unused |= BIT(INNER_DST_PORT);
3892 if (tcp_ip6_spec->tclass)
3896 case IPV6_USER_FLOW:
3897 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
3898 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
3899 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
3900 BIT(INNER_DST_PORT);
3902 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
3903 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
3904 *unused |= BIT(INNER_SRC_IP);
3906 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
3907 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
3908 *unused |= BIT(INNER_DST_IP);
3910 if (!usr_ip6_spec->l4_proto)
3911 *unused |= BIT(INNER_IP_PROTO);
3913 if (usr_ip6_spec->tclass)
3916 if (usr_ip6_spec->l4_4_bytes)
3921 ether_spec = &fs->h_u.ether_spec;
3922 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3923 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
3924 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
3926 if (is_zero_ether_addr(ether_spec->h_source))
3927 *unused |= BIT(INNER_SRC_MAC);
3929 if (is_zero_ether_addr(ether_spec->h_dest))
3930 *unused |= BIT(INNER_DST_MAC);
3932 if (!ether_spec->h_proto)
3933 *unused |= BIT(INNER_ETH_TYPE);
3940 if ((fs->flow_type & FLOW_EXT)) {
3941 if (fs->h_ext.vlan_etype)
3943 if (!fs->h_ext.vlan_tci)
3944 *unused |= BIT(INNER_VLAN_TAG_FST);
3946 if (fs->m_ext.vlan_tci) {
3947 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
3951 *unused |= BIT(INNER_VLAN_TAG_FST);
3954 if (fs->flow_type & FLOW_MAC_EXT) {
3955 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
3958 if (is_zero_ether_addr(fs->h_ext.h_dest))
3959 *unused |= BIT(INNER_DST_MAC);
3961 *unused &= ~(BIT(INNER_DST_MAC));
3967 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
3969 struct hclge_fd_rule *rule = NULL;
3970 struct hlist_node *node2;
3972 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
3973 if (rule->location >= location)
3977 return rule && rule->location == location;
3980 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
3981 struct hclge_fd_rule *new_rule,
3985 struct hclge_fd_rule *rule = NULL, *parent = NULL;
3986 struct hlist_node *node2;
3988 if (is_add && !new_rule)
3991 hlist_for_each_entry_safe(rule, node2,
3992 &hdev->fd_rule_list, rule_node) {
3993 if (rule->location >= location)
3998 if (rule && rule->location == location) {
3999 hlist_del(&rule->rule_node);
4001 hdev->hclge_fd_rule_num--;
4006 } else if (!is_add) {
4007 dev_err(&hdev->pdev->dev,
4008 "delete fail, rule %d is inexistent\n",
4013 INIT_HLIST_NODE(&new_rule->rule_node);
4016 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4018 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4020 hdev->hclge_fd_rule_num++;
4025 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4026 struct ethtool_rx_flow_spec *fs,
4027 struct hclge_fd_rule *rule)
4029 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4031 switch (flow_type) {
4035 rule->tuples.src_ip[3] =
4036 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4037 rule->tuples_mask.src_ip[3] =
4038 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4040 rule->tuples.dst_ip[3] =
4041 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4042 rule->tuples_mask.dst_ip[3] =
4043 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4045 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4046 rule->tuples_mask.src_port =
4047 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4049 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4050 rule->tuples_mask.dst_port =
4051 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4053 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4054 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4056 rule->tuples.ether_proto = ETH_P_IP;
4057 rule->tuples_mask.ether_proto = 0xFFFF;
4061 rule->tuples.src_ip[3] =
4062 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4063 rule->tuples_mask.src_ip[3] =
4064 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4066 rule->tuples.dst_ip[3] =
4067 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4068 rule->tuples_mask.dst_ip[3] =
4069 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4071 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4072 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4074 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4075 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4077 rule->tuples.ether_proto = ETH_P_IP;
4078 rule->tuples_mask.ether_proto = 0xFFFF;
4084 be32_to_cpu_array(rule->tuples.src_ip,
4085 fs->h_u.tcp_ip6_spec.ip6src, 4);
4086 be32_to_cpu_array(rule->tuples_mask.src_ip,
4087 fs->m_u.tcp_ip6_spec.ip6src, 4);
4089 be32_to_cpu_array(rule->tuples.dst_ip,
4090 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4091 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4092 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4094 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4095 rule->tuples_mask.src_port =
4096 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4098 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4099 rule->tuples_mask.dst_port =
4100 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4102 rule->tuples.ether_proto = ETH_P_IPV6;
4103 rule->tuples_mask.ether_proto = 0xFFFF;
4106 case IPV6_USER_FLOW:
4107 be32_to_cpu_array(rule->tuples.src_ip,
4108 fs->h_u.usr_ip6_spec.ip6src, 4);
4109 be32_to_cpu_array(rule->tuples_mask.src_ip,
4110 fs->m_u.usr_ip6_spec.ip6src, 4);
4112 be32_to_cpu_array(rule->tuples.dst_ip,
4113 fs->h_u.usr_ip6_spec.ip6dst, 4);
4114 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4115 fs->m_u.usr_ip6_spec.ip6dst, 4);
4117 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4118 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4120 rule->tuples.ether_proto = ETH_P_IPV6;
4121 rule->tuples_mask.ether_proto = 0xFFFF;
4125 ether_addr_copy(rule->tuples.src_mac,
4126 fs->h_u.ether_spec.h_source);
4127 ether_addr_copy(rule->tuples_mask.src_mac,
4128 fs->m_u.ether_spec.h_source);
4130 ether_addr_copy(rule->tuples.dst_mac,
4131 fs->h_u.ether_spec.h_dest);
4132 ether_addr_copy(rule->tuples_mask.dst_mac,
4133 fs->m_u.ether_spec.h_dest);
4135 rule->tuples.ether_proto =
4136 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4137 rule->tuples_mask.ether_proto =
4138 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4145 switch (flow_type) {
4148 rule->tuples.ip_proto = IPPROTO_SCTP;
4149 rule->tuples_mask.ip_proto = 0xFF;
4153 rule->tuples.ip_proto = IPPROTO_TCP;
4154 rule->tuples_mask.ip_proto = 0xFF;
4158 rule->tuples.ip_proto = IPPROTO_UDP;
4159 rule->tuples_mask.ip_proto = 0xFF;
4165 if ((fs->flow_type & FLOW_EXT)) {
4166 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4167 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4170 if (fs->flow_type & FLOW_MAC_EXT) {
4171 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4172 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4178 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4179 struct ethtool_rxnfc *cmd)
4181 struct hclge_vport *vport = hclge_get_vport(handle);
4182 struct hclge_dev *hdev = vport->back;
4183 u16 dst_vport_id = 0, q_index = 0;
4184 struct ethtool_rx_flow_spec *fs;
4185 struct hclge_fd_rule *rule;
4190 if (!hnae3_dev_fd_supported(hdev))
4193 if (!hdev->fd_cfg.fd_en) {
4194 dev_warn(&hdev->pdev->dev,
4195 "Please enable flow director first\n");
4199 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4201 ret = hclge_fd_check_spec(hdev, fs, &unused);
4203 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4207 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4208 action = HCLGE_FD_ACTION_DROP_PACKET;
4210 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4211 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4214 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4215 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4218 dev_err(&hdev->pdev->dev,
4219 "Error: queue id (%d) > max tqp num (%d)\n",
4224 if (vf > hdev->num_req_vfs) {
4225 dev_err(&hdev->pdev->dev,
4226 "Error: vf id (%d) > max vf num (%d)\n",
4227 vf, hdev->num_req_vfs);
4231 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4235 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4239 ret = hclge_fd_get_tuple(hdev, fs, rule);
4243 rule->flow_type = fs->flow_type;
4245 rule->location = fs->location;
4246 rule->unused_tuple = unused;
4247 rule->vf_id = dst_vport_id;
4248 rule->queue_id = q_index;
4249 rule->action = action;
4251 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4255 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4259 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4270 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4271 struct ethtool_rxnfc *cmd)
4273 struct hclge_vport *vport = hclge_get_vport(handle);
4274 struct hclge_dev *hdev = vport->back;
4275 struct ethtool_rx_flow_spec *fs;
4278 if (!hnae3_dev_fd_supported(hdev))
4281 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4283 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4286 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4287 dev_err(&hdev->pdev->dev,
4288 "Delete fail, rule %d is inexistent\n",
4293 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4294 fs->location, NULL, false);
4298 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4302 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
4304 struct hclge_desc desc;
4305 struct hclge_config_mac_mode_cmd *req =
4306 (struct hclge_config_mac_mode_cmd *)desc.data;
4310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
4311 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
4312 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
4313 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
4314 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
4315 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
4316 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
4317 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
4318 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
4319 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
4320 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
4321 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
4322 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
4323 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
4324 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
4325 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
4327 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4329 dev_err(&hdev->pdev->dev,
4330 "mac enable fail, ret =%d.\n", ret);
4333 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
4335 struct hclge_config_mac_mode_cmd *req;
4336 struct hclge_desc desc;
4340 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
4341 /* 1 Read out the MAC mode config at first */
4342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
4343 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4345 dev_err(&hdev->pdev->dev,
4346 "mac loopback get fail, ret =%d.\n", ret);
4350 /* 2 Then setup the loopback flag */
4351 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
4352 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
4353 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
4354 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
4356 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
4358 /* 3 Config mac work mode with loopback flag
4359 * and its original configure parameters
4361 hclge_cmd_reuse_desc(&desc, false);
4362 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4364 dev_err(&hdev->pdev->dev,
4365 "mac loopback set fail, ret =%d.\n", ret);
4369 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
4370 enum hnae3_loop loop_mode)
4372 #define HCLGE_SERDES_RETRY_MS 10
4373 #define HCLGE_SERDES_RETRY_NUM 100
4374 struct hclge_serdes_lb_cmd *req;
4375 struct hclge_desc desc;
4379 req = (struct hclge_serdes_lb_cmd *)desc.data;
4380 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
4382 switch (loop_mode) {
4383 case HNAE3_LOOP_SERIAL_SERDES:
4384 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
4386 case HNAE3_LOOP_PARALLEL_SERDES:
4387 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
4390 dev_err(&hdev->pdev->dev,
4391 "unsupported serdes loopback mode %d\n", loop_mode);
4396 req->enable = loop_mode_b;
4397 req->mask = loop_mode_b;
4399 req->mask = loop_mode_b;
4402 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4404 dev_err(&hdev->pdev->dev,
4405 "serdes loopback set fail, ret = %d\n", ret);
4410 msleep(HCLGE_SERDES_RETRY_MS);
4411 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
4413 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4415 dev_err(&hdev->pdev->dev,
4416 "serdes loopback get, ret = %d\n", ret);
4419 } while (++i < HCLGE_SERDES_RETRY_NUM &&
4420 !(req->result & HCLGE_CMD_SERDES_DONE_B));
4422 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
4423 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
4425 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
4426 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
4430 hclge_cfg_mac_mode(hdev, en);
4434 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
4435 int stream_id, bool enable)
4437 struct hclge_desc desc;
4438 struct hclge_cfg_com_tqp_queue_cmd *req =
4439 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
4442 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
4443 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
4444 req->stream_id = cpu_to_le16(stream_id);
4445 req->enable |= enable << HCLGE_TQP_ENABLE_B;
4447 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4449 dev_err(&hdev->pdev->dev,
4450 "Tqp enable fail, status =%d.\n", ret);
4454 static int hclge_set_loopback(struct hnae3_handle *handle,
4455 enum hnae3_loop loop_mode, bool en)
4457 struct hclge_vport *vport = hclge_get_vport(handle);
4458 struct hclge_dev *hdev = vport->back;
4461 switch (loop_mode) {
4462 case HNAE3_LOOP_APP:
4463 ret = hclge_set_app_loopback(hdev, en);
4465 case HNAE3_LOOP_SERIAL_SERDES:
4466 case HNAE3_LOOP_PARALLEL_SERDES:
4467 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
4471 dev_err(&hdev->pdev->dev,
4472 "loop_mode %d is not supported\n", loop_mode);
4476 for (i = 0; i < vport->alloc_tqps; i++) {
4477 ret = hclge_tqp_enable(hdev, i, 0, en);
4485 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
4487 struct hclge_vport *vport = hclge_get_vport(handle);
4488 struct hnae3_queue *queue;
4489 struct hclge_tqp *tqp;
4492 for (i = 0; i < vport->alloc_tqps; i++) {
4493 queue = handle->kinfo.tqp[i];
4494 tqp = container_of(queue, struct hclge_tqp, q);
4495 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
4499 static int hclge_ae_start(struct hnae3_handle *handle)
4501 struct hclge_vport *vport = hclge_get_vport(handle);
4502 struct hclge_dev *hdev = vport->back;
4505 for (i = 0; i < vport->alloc_tqps; i++)
4506 hclge_tqp_enable(hdev, i, 0, true);
4509 hclge_cfg_mac_mode(hdev, true);
4510 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
4511 mod_timer(&hdev->service_timer, jiffies + HZ);
4512 hdev->hw.mac.link = 0;
4514 /* reset tqp stats */
4515 hclge_reset_tqp_stats(handle);
4517 hclge_mac_start_phy(hdev);
4522 static void hclge_ae_stop(struct hnae3_handle *handle)
4524 struct hclge_vport *vport = hclge_get_vport(handle);
4525 struct hclge_dev *hdev = vport->back;
4528 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4530 del_timer_sync(&hdev->service_timer);
4531 cancel_work_sync(&hdev->service_task);
4532 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
4534 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
4535 hclge_mac_stop_phy(hdev);
4539 for (i = 0; i < vport->alloc_tqps; i++)
4540 hclge_tqp_enable(hdev, i, 0, false);
4543 hclge_cfg_mac_mode(hdev, false);
4545 hclge_mac_stop_phy(hdev);
4547 /* reset tqp stats */
4548 hclge_reset_tqp_stats(handle);
4549 del_timer_sync(&hdev->service_timer);
4550 cancel_work_sync(&hdev->service_task);
4551 hclge_update_link_status(hdev);
4554 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
4555 u16 cmdq_resp, u8 resp_code,
4556 enum hclge_mac_vlan_tbl_opcode op)
4558 struct hclge_dev *hdev = vport->back;
4559 int return_status = -EIO;
4562 dev_err(&hdev->pdev->dev,
4563 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
4568 if (op == HCLGE_MAC_VLAN_ADD) {
4569 if ((!resp_code) || (resp_code == 1)) {
4571 } else if (resp_code == 2) {
4572 return_status = -ENOSPC;
4573 dev_err(&hdev->pdev->dev,
4574 "add mac addr failed for uc_overflow.\n");
4575 } else if (resp_code == 3) {
4576 return_status = -ENOSPC;
4577 dev_err(&hdev->pdev->dev,
4578 "add mac addr failed for mc_overflow.\n");
4580 dev_err(&hdev->pdev->dev,
4581 "add mac addr failed for undefined, code=%d.\n",
4584 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
4587 } else if (resp_code == 1) {
4588 return_status = -ENOENT;
4589 dev_dbg(&hdev->pdev->dev,
4590 "remove mac addr failed for miss.\n");
4592 dev_err(&hdev->pdev->dev,
4593 "remove mac addr failed for undefined, code=%d.\n",
4596 } else if (op == HCLGE_MAC_VLAN_LKUP) {
4599 } else if (resp_code == 1) {
4600 return_status = -ENOENT;
4601 dev_dbg(&hdev->pdev->dev,
4602 "lookup mac addr failed for miss.\n");
4604 dev_err(&hdev->pdev->dev,
4605 "lookup mac addr failed for undefined, code=%d.\n",
4609 return_status = -EINVAL;
4610 dev_err(&hdev->pdev->dev,
4611 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
4615 return return_status;
4618 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
4623 if (vfid > 255 || vfid < 0)
4626 if (vfid >= 0 && vfid <= 191) {
4627 word_num = vfid / 32;
4628 bit_num = vfid % 32;
4630 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
4632 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
4634 word_num = (vfid - 192) / 32;
4635 bit_num = vfid % 32;
4637 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
4639 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
4645 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
4647 #define HCLGE_DESC_NUMBER 3
4648 #define HCLGE_FUNC_NUMBER_PER_DESC 6
4651 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
4652 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
4653 if (desc[i].data[j])
4659 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
4662 const unsigned char *mac_addr = addr;
4663 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
4664 (mac_addr[0]) | (mac_addr[1] << 8);
4665 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
4667 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
4668 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
4671 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
4674 u16 high_val = addr[1] | (addr[0] << 8);
4675 struct hclge_dev *hdev = vport->back;
4676 u32 rsh = 4 - hdev->mta_mac_sel_type;
4677 u16 ret_val = (high_val >> rsh) & 0xfff;
4682 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
4683 enum hclge_mta_dmac_sel_type mta_mac_sel,
4686 struct hclge_mta_filter_mode_cmd *req;
4687 struct hclge_desc desc;
4690 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
4691 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
4693 hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
4695 hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
4696 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
4698 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4700 dev_err(&hdev->pdev->dev,
4701 "Config mat filter mode failed for cmd_send, ret =%d.\n",
4707 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
4711 struct hclge_cfg_func_mta_filter_cmd *req;
4712 struct hclge_desc desc;
4715 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
4716 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
4718 hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
4720 req->function_id = func_id;
4722 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4724 dev_err(&hdev->pdev->dev,
4725 "Config func_id enable failed for cmd_send, ret =%d.\n",
4731 static int hclge_set_mta_table_item(struct hclge_vport *vport,
4735 struct hclge_dev *hdev = vport->back;
4736 struct hclge_cfg_func_mta_item_cmd *req;
4737 struct hclge_desc desc;
4741 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
4742 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
4743 hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
4745 hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
4746 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
4747 req->item_idx = cpu_to_le16(item_idx);
4749 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4751 dev_err(&hdev->pdev->dev,
4752 "Config mta table item failed for cmd_send, ret =%d.\n",
4758 set_bit(idx, vport->mta_shadow);
4760 clear_bit(idx, vport->mta_shadow);
4765 static int hclge_update_mta_status(struct hnae3_handle *handle)
4767 unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
4768 struct hclge_vport *vport = hclge_get_vport(handle);
4769 struct net_device *netdev = handle->kinfo.netdev;
4770 struct netdev_hw_addr *ha;
4773 memset(mta_status, 0, sizeof(mta_status));
4775 /* update mta_status from mc addr list */
4776 netdev_for_each_mc_addr(ha, netdev) {
4777 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
4778 set_bit(tbl_idx, mta_status);
4781 return hclge_update_mta_status_common(vport, mta_status,
4782 0, HCLGE_MTA_TBL_SIZE, true);
4785 int hclge_update_mta_status_common(struct hclge_vport *vport,
4786 unsigned long *status,
4791 struct hclge_dev *hdev = vport->back;
4792 u16 update_max = idx + count;
4798 /* setup mta check range */
4799 if (update_filter) {
4801 check_max = HCLGE_MTA_TBL_SIZE;
4804 check_max = update_max;
4808 /* check and update all mta item */
4809 for (; i < check_max; i++) {
4810 /* ignore unused item */
4811 if (!test_bit(i, vport->mta_shadow))
4814 /* if i in update range then update it */
4815 if (i >= idx && i < update_max)
4816 if (!test_bit(i - idx, status))
4817 hclge_set_mta_table_item(vport, i, false);
4819 if (!used && test_bit(i, vport->mta_shadow))
4823 /* no longer use mta, disable it */
4824 if (vport->accept_mta_mc && update_filter && !used) {
4825 ret = hclge_cfg_func_mta_filter(hdev,
4829 dev_err(&hdev->pdev->dev,
4830 "disable func mta filter fail ret=%d\n",
4833 vport->accept_mta_mc = false;
4839 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
4840 struct hclge_mac_vlan_tbl_entry_cmd *req)
4842 struct hclge_dev *hdev = vport->back;
4843 struct hclge_desc desc;
4848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
4850 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4852 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4854 dev_err(&hdev->pdev->dev,
4855 "del mac addr failed for cmd_send, ret =%d.\n",
4859 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4860 retval = le16_to_cpu(desc.retval);
4862 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4863 HCLGE_MAC_VLAN_REMOVE);
4866 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
4867 struct hclge_mac_vlan_tbl_entry_cmd *req,
4868 struct hclge_desc *desc,
4871 struct hclge_dev *hdev = vport->back;
4876 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
4878 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4879 memcpy(desc[0].data,
4881 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4882 hclge_cmd_setup_basic_desc(&desc[1],
4883 HCLGE_OPC_MAC_VLAN_ADD,
4885 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4886 hclge_cmd_setup_basic_desc(&desc[2],
4887 HCLGE_OPC_MAC_VLAN_ADD,
4889 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4891 memcpy(desc[0].data,
4893 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4894 ret = hclge_cmd_send(&hdev->hw, desc, 1);
4897 dev_err(&hdev->pdev->dev,
4898 "lookup mac addr failed for cmd_send, ret =%d.\n",
4902 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
4903 retval = le16_to_cpu(desc[0].retval);
4905 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4906 HCLGE_MAC_VLAN_LKUP);
4909 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
4910 struct hclge_mac_vlan_tbl_entry_cmd *req,
4911 struct hclge_desc *mc_desc)
4913 struct hclge_dev *hdev = vport->back;
4920 struct hclge_desc desc;
4922 hclge_cmd_setup_basic_desc(&desc,
4923 HCLGE_OPC_MAC_VLAN_ADD,
4925 memcpy(desc.data, req,
4926 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4927 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4928 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4929 retval = le16_to_cpu(desc.retval);
4931 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4933 HCLGE_MAC_VLAN_ADD);
4935 hclge_cmd_reuse_desc(&mc_desc[0], false);
4936 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4937 hclge_cmd_reuse_desc(&mc_desc[1], false);
4938 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4939 hclge_cmd_reuse_desc(&mc_desc[2], false);
4940 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
4941 memcpy(mc_desc[0].data, req,
4942 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4943 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
4944 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
4945 retval = le16_to_cpu(mc_desc[0].retval);
4947 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4949 HCLGE_MAC_VLAN_ADD);
4953 dev_err(&hdev->pdev->dev,
4954 "add mac addr failed for cmd_send, ret =%d.\n",
4962 static int hclge_add_uc_addr(struct hnae3_handle *handle,
4963 const unsigned char *addr)
4965 struct hclge_vport *vport = hclge_get_vport(handle);
4967 return hclge_add_uc_addr_common(vport, addr);
4970 int hclge_add_uc_addr_common(struct hclge_vport *vport,
4971 const unsigned char *addr)
4973 struct hclge_dev *hdev = vport->back;
4974 struct hclge_mac_vlan_tbl_entry_cmd req;
4975 struct hclge_desc desc;
4976 u16 egress_port = 0;
4979 /* mac addr check */
4980 if (is_zero_ether_addr(addr) ||
4981 is_broadcast_ether_addr(addr) ||
4982 is_multicast_ether_addr(addr)) {
4983 dev_err(&hdev->pdev->dev,
4984 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4986 is_zero_ether_addr(addr),
4987 is_broadcast_ether_addr(addr),
4988 is_multicast_ether_addr(addr));
4992 memset(&req, 0, sizeof(req));
4993 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4995 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
4996 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
4998 req.egress_port = cpu_to_le16(egress_port);
5000 hclge_prepare_mac_addr(&req, addr);
5002 /* Lookup the mac address in the mac_vlan table, and add
5003 * it if the entry is inexistent. Repeated unicast entry
5004 * is not allowed in the mac vlan table.
5006 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5008 return hclge_add_mac_vlan_tbl(vport, &req, NULL);
5010 /* check if we just hit the duplicate */
5014 dev_err(&hdev->pdev->dev,
5015 "PF failed to add unicast entry(%pM) in the MAC table\n",
5021 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5022 const unsigned char *addr)
5024 struct hclge_vport *vport = hclge_get_vport(handle);
5026 return hclge_rm_uc_addr_common(vport, addr);
5029 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5030 const unsigned char *addr)
5032 struct hclge_dev *hdev = vport->back;
5033 struct hclge_mac_vlan_tbl_entry_cmd req;
5036 /* mac addr check */
5037 if (is_zero_ether_addr(addr) ||
5038 is_broadcast_ether_addr(addr) ||
5039 is_multicast_ether_addr(addr)) {
5040 dev_dbg(&hdev->pdev->dev,
5041 "Remove mac err! invalid mac:%pM.\n",
5046 memset(&req, 0, sizeof(req));
5047 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5048 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5049 hclge_prepare_mac_addr(&req, addr);
5050 ret = hclge_remove_mac_vlan_tbl(vport, &req);
5055 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5056 const unsigned char *addr)
5058 struct hclge_vport *vport = hclge_get_vport(handle);
5060 return hclge_add_mc_addr_common(vport, addr);
5063 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5064 const unsigned char *addr)
5066 struct hclge_dev *hdev = vport->back;
5067 struct hclge_mac_vlan_tbl_entry_cmd req;
5068 struct hclge_desc desc[3];
5072 /* mac addr check */
5073 if (!is_multicast_ether_addr(addr)) {
5074 dev_err(&hdev->pdev->dev,
5075 "Add mc mac err! invalid mac:%pM.\n",
5079 memset(&req, 0, sizeof(req));
5080 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5081 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5082 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5083 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5084 hclge_prepare_mac_addr(&req, addr);
5085 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5087 /* This mac addr exist, update VFID for it */
5088 hclge_update_desc_vfid(desc, vport->vport_id, false);
5089 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5091 /* This mac addr do not exist, add new entry for it */
5092 memset(desc[0].data, 0, sizeof(desc[0].data));
5093 memset(desc[1].data, 0, sizeof(desc[0].data));
5094 memset(desc[2].data, 0, sizeof(desc[0].data));
5095 hclge_update_desc_vfid(desc, vport->vport_id, false);
5096 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5099 /* If mc mac vlan table is full, use MTA table */
5100 if (status == -ENOSPC) {
5101 if (!vport->accept_mta_mc) {
5102 status = hclge_cfg_func_mta_filter(hdev,
5106 dev_err(&hdev->pdev->dev,
5107 "set mta filter mode fail ret=%d\n",
5111 vport->accept_mta_mc = true;
5114 /* Set MTA table for this MAC address */
5115 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
5116 status = hclge_set_mta_table_item(vport, tbl_idx, true);
5122 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5123 const unsigned char *addr)
5125 struct hclge_vport *vport = hclge_get_vport(handle);
5127 return hclge_rm_mc_addr_common(vport, addr);
5130 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5131 const unsigned char *addr)
5133 struct hclge_dev *hdev = vport->back;
5134 struct hclge_mac_vlan_tbl_entry_cmd req;
5135 enum hclge_cmd_status status;
5136 struct hclge_desc desc[3];
5138 /* mac addr check */
5139 if (!is_multicast_ether_addr(addr)) {
5140 dev_dbg(&hdev->pdev->dev,
5141 "Remove mc mac err! invalid mac:%pM.\n",
5146 memset(&req, 0, sizeof(req));
5147 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5148 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5149 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5150 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5151 hclge_prepare_mac_addr(&req, addr);
5152 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5154 /* This mac addr exist, remove this handle's VFID for it */
5155 hclge_update_desc_vfid(desc, vport->vport_id, true);
5157 if (hclge_is_all_function_id_zero(desc))
5158 /* All the vfid is zero, so need to delete this entry */
5159 status = hclge_remove_mac_vlan_tbl(vport, &req);
5161 /* Not all the vfid is zero, update the vfid */
5162 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5165 /* Maybe this mac address is in mta table, but it cannot be
5166 * deleted here because an entry of mta represents an address
5167 * range rather than a specific address. the delete action to
5168 * all entries will take effect in update_mta_status called by
5169 * hns3_nic_set_rx_mode.
5177 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5178 u16 cmdq_resp, u8 resp_code)
5180 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
5181 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
5182 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
5183 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
5188 dev_err(&hdev->pdev->dev,
5189 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5194 switch (resp_code) {
5195 case HCLGE_ETHERTYPE_SUCCESS_ADD:
5196 case HCLGE_ETHERTYPE_ALREADY_ADD:
5199 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5200 dev_err(&hdev->pdev->dev,
5201 "add mac ethertype failed for manager table overflow.\n");
5202 return_status = -EIO;
5204 case HCLGE_ETHERTYPE_KEY_CONFLICT:
5205 dev_err(&hdev->pdev->dev,
5206 "add mac ethertype failed for key conflict.\n");
5207 return_status = -EIO;
5210 dev_err(&hdev->pdev->dev,
5211 "add mac ethertype failed for undefined, code=%d.\n",
5213 return_status = -EIO;
5216 return return_status;
5219 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5220 const struct hclge_mac_mgr_tbl_entry_cmd *req)
5222 struct hclge_desc desc;
5227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5228 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5232 dev_err(&hdev->pdev->dev,
5233 "add mac ethertype failed for cmd_send, ret =%d.\n",
5238 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5239 retval = le16_to_cpu(desc.retval);
5241 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
5244 static int init_mgr_tbl(struct hclge_dev *hdev)
5249 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
5250 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
5252 dev_err(&hdev->pdev->dev,
5253 "add mac ethertype failed, ret =%d.\n",
5262 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
5264 struct hclge_vport *vport = hclge_get_vport(handle);
5265 struct hclge_dev *hdev = vport->back;
5267 ether_addr_copy(p, hdev->hw.mac.mac_addr);
5270 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
5273 const unsigned char *new_addr = (const unsigned char *)p;
5274 struct hclge_vport *vport = hclge_get_vport(handle);
5275 struct hclge_dev *hdev = vport->back;
5278 /* mac addr check */
5279 if (is_zero_ether_addr(new_addr) ||
5280 is_broadcast_ether_addr(new_addr) ||
5281 is_multicast_ether_addr(new_addr)) {
5282 dev_err(&hdev->pdev->dev,
5283 "Change uc mac err! invalid mac:%p.\n",
5288 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
5289 dev_warn(&hdev->pdev->dev,
5290 "remove old uc mac address fail.\n");
5292 ret = hclge_add_uc_addr(handle, new_addr);
5294 dev_err(&hdev->pdev->dev,
5295 "add uc mac address fail, ret =%d.\n",
5299 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
5300 dev_err(&hdev->pdev->dev,
5301 "restore uc mac address fail.\n");
5306 ret = hclge_pause_addr_cfg(hdev, new_addr);
5308 dev_err(&hdev->pdev->dev,
5309 "configure mac pause address fail, ret =%d.\n",
5314 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
5319 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
5322 struct hclge_vport *vport = hclge_get_vport(handle);
5323 struct hclge_dev *hdev = vport->back;
5325 if (!hdev->hw.mac.phydev)
5328 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
5331 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
5334 struct hclge_vlan_filter_ctrl_cmd *req;
5335 struct hclge_desc desc;
5338 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
5340 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
5341 req->vlan_type = vlan_type;
5342 req->vlan_fe = filter_en;
5344 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5346 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
5352 #define HCLGE_FILTER_TYPE_VF 0
5353 #define HCLGE_FILTER_TYPE_PORT 1
5355 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
5357 struct hclge_vport *vport = hclge_get_vport(handle);
5358 struct hclge_dev *hdev = vport->back;
5360 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
5363 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
5364 bool is_kill, u16 vlan, u8 qos,
5367 #define HCLGE_MAX_VF_BYTES 16
5368 struct hclge_vlan_filter_vf_cfg_cmd *req0;
5369 struct hclge_vlan_filter_vf_cfg_cmd *req1;
5370 struct hclge_desc desc[2];
5375 hclge_cmd_setup_basic_desc(&desc[0],
5376 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
5377 hclge_cmd_setup_basic_desc(&desc[1],
5378 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
5380 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5382 vf_byte_off = vfid / 8;
5383 vf_byte_val = 1 << (vfid % 8);
5385 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
5386 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
5388 req0->vlan_id = cpu_to_le16(vlan);
5389 req0->vlan_cfg = is_kill;
5391 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
5392 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
5394 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
5396 ret = hclge_cmd_send(&hdev->hw, desc, 2);
5398 dev_err(&hdev->pdev->dev,
5399 "Send vf vlan command fail, ret =%d.\n",
5405 #define HCLGE_VF_VLAN_NO_ENTRY 2
5406 if (!req0->resp_code || req0->resp_code == 1)
5409 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
5410 dev_warn(&hdev->pdev->dev,
5411 "vf vlan table is full, vf vlan filter is disabled\n");
5415 dev_err(&hdev->pdev->dev,
5416 "Add vf vlan filter fail, ret =%d.\n",
5419 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
5420 if (!req0->resp_code)
5423 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
5424 dev_warn(&hdev->pdev->dev,
5425 "vlan %d filter is not in vf vlan table\n",
5430 dev_err(&hdev->pdev->dev,
5431 "Kill vf vlan filter fail, ret =%d.\n",
5438 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
5439 u16 vlan_id, bool is_kill)
5441 struct hclge_vlan_filter_pf_cfg_cmd *req;
5442 struct hclge_desc desc;
5443 u8 vlan_offset_byte_val;
5444 u8 vlan_offset_byte;
5448 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
5450 vlan_offset_160 = vlan_id / 160;
5451 vlan_offset_byte = (vlan_id % 160) / 8;
5452 vlan_offset_byte_val = 1 << (vlan_id % 8);
5454 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
5455 req->vlan_offset = vlan_offset_160;
5456 req->vlan_cfg = is_kill;
5457 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
5459 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5461 dev_err(&hdev->pdev->dev,
5462 "port vlan command, send fail, ret =%d.\n", ret);
5466 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
5467 u16 vport_id, u16 vlan_id, u8 qos,
5470 u16 vport_idx, vport_num = 0;
5473 if (is_kill && !vlan_id)
5476 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
5479 dev_err(&hdev->pdev->dev,
5480 "Set %d vport vlan filter config fail, ret =%d.\n",
5485 /* vlan 0 may be added twice when 8021q module is enabled */
5486 if (!is_kill && !vlan_id &&
5487 test_bit(vport_id, hdev->vlan_table[vlan_id]))
5490 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
5491 dev_err(&hdev->pdev->dev,
5492 "Add port vlan failed, vport %d is already in vlan %d\n",
5498 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
5499 dev_err(&hdev->pdev->dev,
5500 "Delete port vlan failed, vport %d is not in vlan %d\n",
5505 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
5508 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
5509 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
5515 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
5516 u16 vlan_id, bool is_kill)
5518 struct hclge_vport *vport = hclge_get_vport(handle);
5519 struct hclge_dev *hdev = vport->back;
5521 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
5525 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
5526 u16 vlan, u8 qos, __be16 proto)
5528 struct hclge_vport *vport = hclge_get_vport(handle);
5529 struct hclge_dev *hdev = vport->back;
5531 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
5533 if (proto != htons(ETH_P_8021Q))
5534 return -EPROTONOSUPPORT;
5536 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
5539 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
5541 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
5542 struct hclge_vport_vtag_tx_cfg_cmd *req;
5543 struct hclge_dev *hdev = vport->back;
5544 struct hclge_desc desc;
5547 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
5549 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
5550 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
5551 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
5552 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
5553 vcfg->accept_tag1 ? 1 : 0);
5554 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
5555 vcfg->accept_untag1 ? 1 : 0);
5556 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
5557 vcfg->accept_tag2 ? 1 : 0);
5558 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
5559 vcfg->accept_untag2 ? 1 : 0);
5560 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
5561 vcfg->insert_tag1_en ? 1 : 0);
5562 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
5563 vcfg->insert_tag2_en ? 1 : 0);
5564 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
5566 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
5567 req->vf_bitmap[req->vf_offset] =
5568 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5570 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5572 dev_err(&hdev->pdev->dev,
5573 "Send port txvlan cfg command fail, ret =%d\n",
5579 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
5581 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
5582 struct hclge_vport_vtag_rx_cfg_cmd *req;
5583 struct hclge_dev *hdev = vport->back;
5584 struct hclge_desc desc;
5587 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
5589 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
5590 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
5591 vcfg->strip_tag1_en ? 1 : 0);
5592 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
5593 vcfg->strip_tag2_en ? 1 : 0);
5594 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
5595 vcfg->vlan1_vlan_prionly ? 1 : 0);
5596 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
5597 vcfg->vlan2_vlan_prionly ? 1 : 0);
5599 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
5600 req->vf_bitmap[req->vf_offset] =
5601 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
5603 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5605 dev_err(&hdev->pdev->dev,
5606 "Send port rxvlan cfg command fail, ret =%d\n",
5612 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
5614 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
5615 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
5616 struct hclge_desc desc;
5619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
5620 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
5621 rx_req->ot_fst_vlan_type =
5622 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
5623 rx_req->ot_sec_vlan_type =
5624 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
5625 rx_req->in_fst_vlan_type =
5626 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
5627 rx_req->in_sec_vlan_type =
5628 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
5630 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5632 dev_err(&hdev->pdev->dev,
5633 "Send rxvlan protocol type command fail, ret =%d\n",
5638 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
5640 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
5641 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
5642 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
5644 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5646 dev_err(&hdev->pdev->dev,
5647 "Send txvlan protocol type command fail, ret =%d\n",
5653 static int hclge_init_vlan_config(struct hclge_dev *hdev)
5655 #define HCLGE_DEF_VLAN_TYPE 0x8100
5657 struct hnae3_handle *handle;
5658 struct hclge_vport *vport;
5662 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
5666 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
5670 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
5671 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
5672 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
5673 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
5674 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
5675 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
5677 ret = hclge_set_vlan_protocol_type(hdev);
5681 for (i = 0; i < hdev->num_alloc_vport; i++) {
5682 vport = &hdev->vport[i];
5683 vport->txvlan_cfg.accept_tag1 = true;
5684 vport->txvlan_cfg.accept_untag1 = true;
5686 /* accept_tag2 and accept_untag2 are not supported on
5687 * pdev revision(0x20), new revision support them. The
5688 * value of this two fields will not return error when driver
5689 * send command to fireware in revision(0x20).
5690 * This two fields can not configured by user.
5692 vport->txvlan_cfg.accept_tag2 = true;
5693 vport->txvlan_cfg.accept_untag2 = true;
5695 vport->txvlan_cfg.insert_tag1_en = false;
5696 vport->txvlan_cfg.insert_tag2_en = false;
5697 vport->txvlan_cfg.default_tag1 = 0;
5698 vport->txvlan_cfg.default_tag2 = 0;
5700 ret = hclge_set_vlan_tx_offload_cfg(vport);
5704 vport->rxvlan_cfg.strip_tag1_en = false;
5705 vport->rxvlan_cfg.strip_tag2_en = true;
5706 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
5707 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
5709 ret = hclge_set_vlan_rx_offload_cfg(vport);
5714 handle = &hdev->vport[0].nic;
5715 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
5718 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
5720 struct hclge_vport *vport = hclge_get_vport(handle);
5722 vport->rxvlan_cfg.strip_tag1_en = false;
5723 vport->rxvlan_cfg.strip_tag2_en = enable;
5724 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
5725 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
5727 return hclge_set_vlan_rx_offload_cfg(vport);
5730 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
5732 struct hclge_config_max_frm_size_cmd *req;
5733 struct hclge_desc desc;
5737 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5739 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
5740 max_frm_size > HCLGE_MAC_MAX_FRAME)
5743 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
5745 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
5747 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
5748 req->max_frm_size = cpu_to_le16(max_frm_size);
5749 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
5751 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5753 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
5755 hdev->mps = max_frm_size;
5760 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
5762 struct hclge_vport *vport = hclge_get_vport(handle);
5763 struct hclge_dev *hdev = vport->back;
5766 ret = hclge_set_mac_mtu(hdev, new_mtu);
5768 dev_err(&hdev->pdev->dev,
5769 "Change mtu fail, ret =%d\n", ret);
5773 ret = hclge_buffer_alloc(hdev);
5775 dev_err(&hdev->pdev->dev,
5776 "Allocate buffer fail, ret =%d\n", ret);
5781 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
5784 struct hclge_reset_tqp_queue_cmd *req;
5785 struct hclge_desc desc;
5788 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
5790 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
5791 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5792 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
5794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5796 dev_err(&hdev->pdev->dev,
5797 "Send tqp reset cmd error, status =%d\n", ret);
5804 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
5806 struct hclge_reset_tqp_queue_cmd *req;
5807 struct hclge_desc desc;
5810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
5812 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
5813 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5815 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5817 dev_err(&hdev->pdev->dev,
5818 "Get reset status error, status =%d\n", ret);
5822 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
5825 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
5828 struct hnae3_queue *queue;
5829 struct hclge_tqp *tqp;
5831 queue = handle->kinfo.tqp[queue_id];
5832 tqp = container_of(queue, struct hclge_tqp, q);
5837 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
5839 struct hclge_vport *vport = hclge_get_vport(handle);
5840 struct hclge_dev *hdev = vport->back;
5841 int reset_try_times = 0;
5846 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5849 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
5851 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
5853 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
5857 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5859 dev_warn(&hdev->pdev->dev,
5860 "Send reset tqp cmd fail, ret = %d\n", ret);
5864 reset_try_times = 0;
5865 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5866 /* Wait for tqp hw reset */
5868 reset_status = hclge_get_reset_status(hdev, queue_gid);
5873 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5874 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5878 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5880 dev_warn(&hdev->pdev->dev,
5881 "Deassert the soft reset fail, ret = %d\n", ret);
5886 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
5888 struct hclge_dev *hdev = vport->back;
5889 int reset_try_times = 0;
5894 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
5896 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5898 dev_warn(&hdev->pdev->dev,
5899 "Send reset tqp cmd fail, ret = %d\n", ret);
5903 reset_try_times = 0;
5904 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5905 /* Wait for tqp hw reset */
5907 reset_status = hclge_get_reset_status(hdev, queue_gid);
5912 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5913 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5917 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5919 dev_warn(&hdev->pdev->dev,
5920 "Deassert the soft reset fail, ret = %d\n", ret);
5923 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
5925 struct hclge_vport *vport = hclge_get_vport(handle);
5926 struct hclge_dev *hdev = vport->back;
5928 return hdev->fw_version;
5931 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5933 struct phy_device *phydev = hdev->hw.mac.phydev;
5938 phy_set_asym_pause(phydev, rx_en, tx_en);
5941 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5946 hdev->fc_mode_last_time = HCLGE_FC_FULL;
5947 else if (rx_en && !tx_en)
5948 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
5949 else if (!rx_en && tx_en)
5950 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
5952 hdev->fc_mode_last_time = HCLGE_FC_NONE;
5954 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
5957 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
5959 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
5964 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
5969 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
5971 struct phy_device *phydev = hdev->hw.mac.phydev;
5972 u16 remote_advertising = 0;
5973 u16 local_advertising = 0;
5974 u32 rx_pause, tx_pause;
5977 if (!phydev->link || !phydev->autoneg)
5980 local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
5983 remote_advertising = LPA_PAUSE_CAP;
5985 if (phydev->asym_pause)
5986 remote_advertising |= LPA_PAUSE_ASYM;
5988 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
5989 remote_advertising);
5990 tx_pause = flowctl & FLOW_CTRL_TX;
5991 rx_pause = flowctl & FLOW_CTRL_RX;
5993 if (phydev->duplex == HCLGE_MAC_HALF) {
5998 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6001 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6002 u32 *rx_en, u32 *tx_en)
6004 struct hclge_vport *vport = hclge_get_vport(handle);
6005 struct hclge_dev *hdev = vport->back;
6007 *auto_neg = hclge_get_autoneg(handle);
6009 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6015 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6018 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6021 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6030 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6031 u32 rx_en, u32 tx_en)
6033 struct hclge_vport *vport = hclge_get_vport(handle);
6034 struct hclge_dev *hdev = vport->back;
6035 struct phy_device *phydev = hdev->hw.mac.phydev;
6038 fc_autoneg = hclge_get_autoneg(handle);
6039 if (auto_neg != fc_autoneg) {
6040 dev_info(&hdev->pdev->dev,
6041 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6045 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6046 dev_info(&hdev->pdev->dev,
6047 "Priority flow control enabled. Cannot set link flow control.\n");
6051 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6054 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6056 /* Only support flow control negotiation for netdev with
6057 * phy attached for now.
6062 return phy_start_aneg(phydev);
6065 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6066 u8 *auto_neg, u32 *speed, u8 *duplex)
6068 struct hclge_vport *vport = hclge_get_vport(handle);
6069 struct hclge_dev *hdev = vport->back;
6072 *speed = hdev->hw.mac.speed;
6074 *duplex = hdev->hw.mac.duplex;
6076 *auto_neg = hdev->hw.mac.autoneg;
6079 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6081 struct hclge_vport *vport = hclge_get_vport(handle);
6082 struct hclge_dev *hdev = vport->back;
6085 *media_type = hdev->hw.mac.media_type;
6088 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6089 u8 *tp_mdix_ctrl, u8 *tp_mdix)
6091 struct hclge_vport *vport = hclge_get_vport(handle);
6092 struct hclge_dev *hdev = vport->back;
6093 struct phy_device *phydev = hdev->hw.mac.phydev;
6094 int mdix_ctrl, mdix, retval, is_resolved;
6097 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6098 *tp_mdix = ETH_TP_MDI_INVALID;
6102 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6104 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6105 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6106 HCLGE_PHY_MDIX_CTRL_S);
6108 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6109 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6110 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6112 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6114 switch (mdix_ctrl) {
6116 *tp_mdix_ctrl = ETH_TP_MDI;
6119 *tp_mdix_ctrl = ETH_TP_MDI_X;
6122 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6125 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6130 *tp_mdix = ETH_TP_MDI_INVALID;
6132 *tp_mdix = ETH_TP_MDI_X;
6134 *tp_mdix = ETH_TP_MDI;
6137 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6139 return hclge_mac_connect_phy(hdev);
6142 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6144 hclge_mac_disconnect_phy(hdev);
6147 static int hclge_init_client_instance(struct hnae3_client *client,
6148 struct hnae3_ae_dev *ae_dev)
6150 struct hclge_dev *hdev = ae_dev->priv;
6151 struct hclge_vport *vport;
6154 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6155 vport = &hdev->vport[i];
6157 switch (client->type) {
6158 case HNAE3_CLIENT_KNIC:
6160 hdev->nic_client = client;
6161 vport->nic.client = client;
6162 ret = client->ops->init_instance(&vport->nic);
6166 ret = hclge_init_instance_hw(hdev);
6168 client->ops->uninit_instance(&vport->nic,
6173 hnae3_set_client_init_flag(client, ae_dev, 1);
6175 if (hdev->roce_client &&
6176 hnae3_dev_roce_supported(hdev)) {
6177 struct hnae3_client *rc = hdev->roce_client;
6179 ret = hclge_init_roce_base_info(vport);
6183 ret = rc->ops->init_instance(&vport->roce);
6187 hnae3_set_client_init_flag(hdev->roce_client,
6192 case HNAE3_CLIENT_UNIC:
6193 hdev->nic_client = client;
6194 vport->nic.client = client;
6196 ret = client->ops->init_instance(&vport->nic);
6200 hnae3_set_client_init_flag(client, ae_dev, 1);
6203 case HNAE3_CLIENT_ROCE:
6204 if (hnae3_dev_roce_supported(hdev)) {
6205 hdev->roce_client = client;
6206 vport->roce.client = client;
6209 if (hdev->roce_client && hdev->nic_client) {
6210 ret = hclge_init_roce_base_info(vport);
6214 ret = client->ops->init_instance(&vport->roce);
6218 hnae3_set_client_init_flag(client, ae_dev, 1);
6230 hdev->nic_client = NULL;
6231 vport->nic.client = NULL;
6234 hdev->roce_client = NULL;
6235 vport->roce.client = NULL;
6239 static void hclge_uninit_client_instance(struct hnae3_client *client,
6240 struct hnae3_ae_dev *ae_dev)
6242 struct hclge_dev *hdev = ae_dev->priv;
6243 struct hclge_vport *vport;
6246 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6247 vport = &hdev->vport[i];
6248 if (hdev->roce_client) {
6249 hdev->roce_client->ops->uninit_instance(&vport->roce,
6251 hdev->roce_client = NULL;
6252 vport->roce.client = NULL;
6254 if (client->type == HNAE3_CLIENT_ROCE)
6256 if (hdev->nic_client && client->ops->uninit_instance) {
6257 hclge_uninit_instance_hw(hdev);
6258 client->ops->uninit_instance(&vport->nic, 0);
6259 hdev->nic_client = NULL;
6260 vport->nic.client = NULL;
6265 static int hclge_pci_init(struct hclge_dev *hdev)
6267 struct pci_dev *pdev = hdev->pdev;
6268 struct hclge_hw *hw;
6271 ret = pci_enable_device(pdev);
6273 dev_err(&pdev->dev, "failed to enable PCI device\n");
6277 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6279 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6282 "can't set consistent PCI DMA");
6283 goto err_disable_device;
6285 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
6288 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
6290 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
6291 goto err_disable_device;
6294 pci_set_master(pdev);
6296 hw->io_base = pcim_iomap(pdev, 2, 0);
6298 dev_err(&pdev->dev, "Can't map configuration register space\n");
6300 goto err_clr_master;
6303 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
6307 pci_clear_master(pdev);
6308 pci_release_regions(pdev);
6310 pci_disable_device(pdev);
6315 static void hclge_pci_uninit(struct hclge_dev *hdev)
6317 struct pci_dev *pdev = hdev->pdev;
6319 pcim_iounmap(pdev, hdev->hw.io_base);
6320 pci_free_irq_vectors(pdev);
6321 pci_clear_master(pdev);
6322 pci_release_mem_regions(pdev);
6323 pci_disable_device(pdev);
6326 static void hclge_state_init(struct hclge_dev *hdev)
6328 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
6329 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6330 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
6331 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6332 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
6333 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
6336 static void hclge_state_uninit(struct hclge_dev *hdev)
6338 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6340 if (hdev->service_timer.function)
6341 del_timer_sync(&hdev->service_timer);
6342 if (hdev->service_task.func)
6343 cancel_work_sync(&hdev->service_task);
6344 if (hdev->rst_service_task.func)
6345 cancel_work_sync(&hdev->rst_service_task);
6346 if (hdev->mbx_service_task.func)
6347 cancel_work_sync(&hdev->mbx_service_task);
6350 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
6352 struct pci_dev *pdev = ae_dev->pdev;
6353 struct hclge_dev *hdev;
6356 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
6363 hdev->ae_dev = ae_dev;
6364 hdev->reset_type = HNAE3_NONE_RESET;
6365 ae_dev->priv = hdev;
6367 ret = hclge_pci_init(hdev);
6369 dev_err(&pdev->dev, "PCI init failed\n");
6373 /* Firmware command queue initialize */
6374 ret = hclge_cmd_queue_init(hdev);
6376 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
6377 goto err_pci_uninit;
6380 /* Firmware command initialize */
6381 ret = hclge_cmd_init(hdev);
6383 goto err_cmd_uninit;
6385 ret = hclge_get_cap(hdev);
6387 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
6389 goto err_cmd_uninit;
6392 ret = hclge_configure(hdev);
6394 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
6395 goto err_cmd_uninit;
6398 ret = hclge_init_msi(hdev);
6400 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
6401 goto err_cmd_uninit;
6404 ret = hclge_misc_irq_init(hdev);
6407 "Misc IRQ(vector0) init error, ret = %d.\n",
6409 goto err_msi_uninit;
6412 ret = hclge_alloc_tqps(hdev);
6414 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
6415 goto err_msi_irq_uninit;
6418 ret = hclge_alloc_vport(hdev);
6420 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
6421 goto err_msi_irq_uninit;
6424 ret = hclge_map_tqp(hdev);
6426 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
6427 goto err_msi_irq_uninit;
6430 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
6431 ret = hclge_mac_mdio_config(hdev);
6433 dev_err(&hdev->pdev->dev,
6434 "mdio config fail ret=%d\n", ret);
6435 goto err_msi_irq_uninit;
6439 ret = hclge_mac_init(hdev);
6441 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
6442 goto err_mdiobus_unreg;
6445 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
6447 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
6448 goto err_mdiobus_unreg;
6451 ret = hclge_init_vlan_config(hdev);
6453 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
6454 goto err_mdiobus_unreg;
6457 ret = hclge_tm_schd_init(hdev);
6459 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
6460 goto err_mdiobus_unreg;
6463 hclge_rss_init_cfg(hdev);
6464 ret = hclge_rss_init_hw(hdev);
6466 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
6467 goto err_mdiobus_unreg;
6470 ret = init_mgr_tbl(hdev);
6472 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
6473 goto err_mdiobus_unreg;
6476 ret = hclge_init_fd_config(hdev);
6479 "fd table init fail, ret=%d\n", ret);
6480 goto err_mdiobus_unreg;
6483 hclge_dcb_ops_set(hdev);
6485 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
6486 INIT_WORK(&hdev->service_task, hclge_service_task);
6487 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
6488 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
6490 hclge_clear_all_event_cause(hdev);
6492 /* Enable MISC vector(vector0) */
6493 hclge_enable_vector(&hdev->misc_vector, true);
6495 hclge_state_init(hdev);
6497 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
6501 if (hdev->hw.mac.phydev)
6502 mdiobus_unregister(hdev->hw.mac.mdio_bus);
6504 hclge_misc_irq_uninit(hdev);
6506 pci_free_irq_vectors(pdev);
6508 hclge_destroy_cmd_queue(&hdev->hw);
6510 pcim_iounmap(pdev, hdev->hw.io_base);
6511 pci_clear_master(pdev);
6512 pci_release_regions(pdev);
6513 pci_disable_device(pdev);
6518 static void hclge_stats_clear(struct hclge_dev *hdev)
6520 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
6523 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
6525 struct hclge_dev *hdev = ae_dev->priv;
6526 struct pci_dev *pdev = ae_dev->pdev;
6529 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6531 hclge_stats_clear(hdev);
6532 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
6534 ret = hclge_cmd_init(hdev);
6536 dev_err(&pdev->dev, "Cmd queue init failed\n");
6540 ret = hclge_get_cap(hdev);
6542 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
6547 ret = hclge_configure(hdev);
6549 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
6553 ret = hclge_map_tqp(hdev);
6555 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
6559 ret = hclge_mac_init(hdev);
6561 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
6565 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
6567 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
6571 ret = hclge_init_vlan_config(hdev);
6573 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
6577 ret = hclge_tm_init_hw(hdev);
6579 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
6583 ret = hclge_rss_init_hw(hdev);
6585 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
6589 ret = hclge_init_fd_config(hdev);
6592 "fd table init fail, ret=%d\n", ret);
6596 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
6602 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
6604 struct hclge_dev *hdev = ae_dev->priv;
6605 struct hclge_mac *mac = &hdev->hw.mac;
6607 hclge_state_uninit(hdev);
6610 mdiobus_unregister(mac->mdio_bus);
6612 /* Disable MISC vector(vector0) */
6613 hclge_enable_vector(&hdev->misc_vector, false);
6614 synchronize_irq(hdev->misc_vector.vector_irq);
6616 hclge_destroy_cmd_queue(&hdev->hw);
6617 hclge_misc_irq_uninit(hdev);
6618 hclge_pci_uninit(hdev);
6619 ae_dev->priv = NULL;
6622 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
6624 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
6625 struct hclge_vport *vport = hclge_get_vport(handle);
6626 struct hclge_dev *hdev = vport->back;
6628 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
6631 static void hclge_get_channels(struct hnae3_handle *handle,
6632 struct ethtool_channels *ch)
6634 struct hclge_vport *vport = hclge_get_vport(handle);
6636 ch->max_combined = hclge_get_max_channels(handle);
6637 ch->other_count = 1;
6639 ch->combined_count = vport->alloc_tqps;
6642 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
6643 u16 *alloc_tqps, u16 *max_rss_size)
6645 struct hclge_vport *vport = hclge_get_vport(handle);
6646 struct hclge_dev *hdev = vport->back;
6648 *alloc_tqps = vport->alloc_tqps;
6649 *max_rss_size = hdev->rss_size_max;
6652 static void hclge_release_tqp(struct hclge_vport *vport)
6654 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6655 struct hclge_dev *hdev = vport->back;
6658 for (i = 0; i < kinfo->num_tqps; i++) {
6659 struct hclge_tqp *tqp =
6660 container_of(kinfo->tqp[i], struct hclge_tqp, q);
6662 tqp->q.handle = NULL;
6663 tqp->q.tqp_index = 0;
6664 tqp->alloced = false;
6667 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
6671 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
6673 struct hclge_vport *vport = hclge_get_vport(handle);
6674 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6675 struct hclge_dev *hdev = vport->back;
6676 int cur_rss_size = kinfo->rss_size;
6677 int cur_tqps = kinfo->num_tqps;
6678 u16 tc_offset[HCLGE_MAX_TC_NUM];
6679 u16 tc_valid[HCLGE_MAX_TC_NUM];
6680 u16 tc_size[HCLGE_MAX_TC_NUM];
6685 /* Free old tqps, and reallocate with new tqp number when nic setup */
6686 hclge_release_tqp(vport);
6688 ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
6690 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
6694 ret = hclge_map_tqp_to_vport(hdev, vport);
6696 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
6700 ret = hclge_tm_schd_init(hdev);
6702 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
6706 roundup_size = roundup_pow_of_two(kinfo->rss_size);
6707 roundup_size = ilog2(roundup_size);
6708 /* Set the RSS TC mode according to the new RSS size */
6709 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
6712 if (!(hdev->hw_tc_map & BIT(i)))
6716 tc_size[i] = roundup_size;
6717 tc_offset[i] = kinfo->rss_size * i;
6719 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
6723 /* Reinitializes the rss indirect table according to the new RSS size */
6724 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
6728 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
6729 rss_indir[i] = i % kinfo->rss_size;
6731 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
6733 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
6739 dev_info(&hdev->pdev->dev,
6740 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
6741 cur_rss_size, kinfo->rss_size,
6742 cur_tqps, kinfo->rss_size * kinfo->num_tc);
6747 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
6748 u32 *regs_num_64_bit)
6750 struct hclge_desc desc;
6754 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
6755 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6757 dev_err(&hdev->pdev->dev,
6758 "Query register number cmd failed, ret = %d.\n", ret);
6762 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
6763 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
6765 total_num = *regs_num_32_bit + *regs_num_64_bit;
6772 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
6775 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
6777 struct hclge_desc *desc;
6778 u32 *reg_val = data;
6787 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
6788 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
6792 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
6793 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
6795 dev_err(&hdev->pdev->dev,
6796 "Query 32 bit register cmd failed, ret = %d.\n", ret);
6801 for (i = 0; i < cmd_num; i++) {
6803 desc_data = (__le32 *)(&desc[i].data[0]);
6804 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
6806 desc_data = (__le32 *)(&desc[i]);
6807 n = HCLGE_32_BIT_REG_RTN_DATANUM;
6809 for (k = 0; k < n; k++) {
6810 *reg_val++ = le32_to_cpu(*desc_data++);
6822 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
6825 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
6827 struct hclge_desc *desc;
6828 u64 *reg_val = data;
6837 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
6838 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
6842 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
6843 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
6845 dev_err(&hdev->pdev->dev,
6846 "Query 64 bit register cmd failed, ret = %d.\n", ret);
6851 for (i = 0; i < cmd_num; i++) {
6853 desc_data = (__le64 *)(&desc[i].data[0]);
6854 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
6856 desc_data = (__le64 *)(&desc[i]);
6857 n = HCLGE_64_BIT_REG_RTN_DATANUM;
6859 for (k = 0; k < n; k++) {
6860 *reg_val++ = le64_to_cpu(*desc_data++);
6872 static int hclge_get_regs_len(struct hnae3_handle *handle)
6874 struct hclge_vport *vport = hclge_get_vport(handle);
6875 struct hclge_dev *hdev = vport->back;
6876 u32 regs_num_32_bit, regs_num_64_bit;
6879 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
6881 dev_err(&hdev->pdev->dev,
6882 "Get register number failed, ret = %d.\n", ret);
6886 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
6889 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
6892 struct hclge_vport *vport = hclge_get_vport(handle);
6893 struct hclge_dev *hdev = vport->back;
6894 u32 regs_num_32_bit, regs_num_64_bit;
6897 *version = hdev->fw_version;
6899 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
6901 dev_err(&hdev->pdev->dev,
6902 "Get register number failed, ret = %d.\n", ret);
6906 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
6908 dev_err(&hdev->pdev->dev,
6909 "Get 32 bit register failed, ret = %d.\n", ret);
6913 data = (u32 *)data + regs_num_32_bit;
6914 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
6917 dev_err(&hdev->pdev->dev,
6918 "Get 64 bit register failed, ret = %d.\n", ret);
6921 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
6923 struct hclge_set_led_state_cmd *req;
6924 struct hclge_desc desc;
6927 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
6929 req = (struct hclge_set_led_state_cmd *)desc.data;
6930 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
6931 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
6933 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6935 dev_err(&hdev->pdev->dev,
6936 "Send set led state cmd error, ret =%d\n", ret);
6941 enum hclge_led_status {
6944 HCLGE_LED_NO_CHANGE = 0xFF,
6947 static int hclge_set_led_id(struct hnae3_handle *handle,
6948 enum ethtool_phys_id_state status)
6950 struct hclge_vport *vport = hclge_get_vport(handle);
6951 struct hclge_dev *hdev = vport->back;
6954 case ETHTOOL_ID_ACTIVE:
6955 return hclge_set_led_status(hdev, HCLGE_LED_ON);
6956 case ETHTOOL_ID_INACTIVE:
6957 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
6963 static void hclge_get_link_mode(struct hnae3_handle *handle,
6964 unsigned long *supported,
6965 unsigned long *advertising)
6967 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
6968 struct hclge_vport *vport = hclge_get_vport(handle);
6969 struct hclge_dev *hdev = vport->back;
6970 unsigned int idx = 0;
6972 for (; idx < size; idx++) {
6973 supported[idx] = hdev->hw.mac.supported[idx];
6974 advertising[idx] = hdev->hw.mac.advertising[idx];
6978 static const struct hnae3_ae_ops hclge_ops = {
6979 .init_ae_dev = hclge_init_ae_dev,
6980 .uninit_ae_dev = hclge_uninit_ae_dev,
6981 .init_client_instance = hclge_init_client_instance,
6982 .uninit_client_instance = hclge_uninit_client_instance,
6983 .map_ring_to_vector = hclge_map_ring_to_vector,
6984 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
6985 .get_vector = hclge_get_vector,
6986 .put_vector = hclge_put_vector,
6987 .set_promisc_mode = hclge_set_promisc_mode,
6988 .set_loopback = hclge_set_loopback,
6989 .start = hclge_ae_start,
6990 .stop = hclge_ae_stop,
6991 .get_status = hclge_get_status,
6992 .get_ksettings_an_result = hclge_get_ksettings_an_result,
6993 .update_speed_duplex_h = hclge_update_speed_duplex_h,
6994 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
6995 .get_media_type = hclge_get_media_type,
6996 .get_rss_key_size = hclge_get_rss_key_size,
6997 .get_rss_indir_size = hclge_get_rss_indir_size,
6998 .get_rss = hclge_get_rss,
6999 .set_rss = hclge_set_rss,
7000 .set_rss_tuple = hclge_set_rss_tuple,
7001 .get_rss_tuple = hclge_get_rss_tuple,
7002 .get_tc_size = hclge_get_tc_size,
7003 .get_mac_addr = hclge_get_mac_addr,
7004 .set_mac_addr = hclge_set_mac_addr,
7005 .do_ioctl = hclge_do_ioctl,
7006 .add_uc_addr = hclge_add_uc_addr,
7007 .rm_uc_addr = hclge_rm_uc_addr,
7008 .add_mc_addr = hclge_add_mc_addr,
7009 .rm_mc_addr = hclge_rm_mc_addr,
7010 .update_mta_status = hclge_update_mta_status,
7011 .set_autoneg = hclge_set_autoneg,
7012 .get_autoneg = hclge_get_autoneg,
7013 .get_pauseparam = hclge_get_pauseparam,
7014 .set_pauseparam = hclge_set_pauseparam,
7015 .set_mtu = hclge_set_mtu,
7016 .reset_queue = hclge_reset_tqp,
7017 .get_stats = hclge_get_stats,
7018 .update_stats = hclge_update_stats,
7019 .get_strings = hclge_get_strings,
7020 .get_sset_count = hclge_get_sset_count,
7021 .get_fw_version = hclge_get_fw_version,
7022 .get_mdix_mode = hclge_get_mdix_mode,
7023 .enable_vlan_filter = hclge_enable_vlan_filter,
7024 .set_vlan_filter = hclge_set_vlan_filter,
7025 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7026 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7027 .reset_event = hclge_reset_event,
7028 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7029 .set_channels = hclge_set_channels,
7030 .get_channels = hclge_get_channels,
7031 .get_regs_len = hclge_get_regs_len,
7032 .get_regs = hclge_get_regs,
7033 .set_led_id = hclge_set_led_id,
7034 .get_link_mode = hclge_get_link_mode,
7035 .add_fd_entry = hclge_add_fd_entry,
7036 .del_fd_entry = hclge_del_fd_entry,
7039 static struct hnae3_ae_algo ae_algo = {
7041 .pdev_id_table = ae_algo_pci_tbl,
7044 static int hclge_init(void)
7046 pr_info("%s is initializing\n", HCLGE_NAME);
7048 hnae3_register_ae_algo(&ae_algo);
7053 static void hclge_exit(void)
7055 hnae3_unregister_ae_algo(&ae_algo);
7057 module_init(hclge_init);
7058 module_exit(hclge_exit);
7060 MODULE_LICENSE("GPL");
7061 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
7062 MODULE_DESCRIPTION("HCLGE Driver");
7063 MODULE_VERSION(HCLGE_MOD_VERSION);