1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2014-2015 Hisilicon Limited.
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/interrupt.h>
13 #include <linux/ipv6.h>
14 #include <linux/module.h>
15 #include <linux/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
21 #include "hns_dsaf_mac.h"
23 #define NIC_MAX_Q_PER_VF 16
24 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
26 #define SERVICE_TIMER_HZ (1 * HZ)
28 #define RCB_IRQ_NOT_INITED 0
29 #define RCB_IRQ_INITED 1
30 #define HNS_BUFFER_SIZE_2048 2048
32 #define BD_MAX_SEND_SIZE 8191
33 #define SKB_TMP_LEN(SKB) \
34 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
36 static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
37 int send_sz, dma_addr_t dma, int frag_end,
38 int buf_num, enum hns_desc_type type, int mtu)
40 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
41 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
43 struct ipv6hdr *ipv6hdr;
55 desc_cb->length = size;
59 desc->addr = cpu_to_le64(dma);
60 desc->tx.send_size = cpu_to_le16((u16)send_sz);
62 /* config bd buffer end */
63 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
64 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
66 /* fill port_id in the tx bd for sending management pkts */
67 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
68 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
70 if (type == DESC_TYPE_SKB) {
71 skb = (struct sk_buff *)priv;
73 if (skb->ip_summed == CHECKSUM_PARTIAL) {
74 skb_reset_mac_len(skb);
75 protocol = skb->protocol;
78 if (protocol == htons(ETH_P_8021Q)) {
79 ip_offset += VLAN_HLEN;
80 protocol = vlan_get_protocol(skb);
81 skb->protocol = protocol;
84 if (skb->protocol == htons(ETH_P_IP)) {
86 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
87 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
89 /* check for tcp/udp header */
90 if (iphdr->protocol == IPPROTO_TCP &&
94 l4_len = tcp_hdrlen(skb);
95 mss = skb_shinfo(skb)->gso_size;
96 paylen = skb->len - SKB_TMP_LEN(skb);
98 } else if (skb->protocol == htons(ETH_P_IPV6)) {
99 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
100 ipv6hdr = ipv6_hdr(skb);
101 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
103 /* check for tcp/udp header */
104 if (ipv6hdr->nexthdr == IPPROTO_TCP &&
105 skb_is_gso(skb) && skb_is_gso_v6(skb)) {
108 l4_len = tcp_hdrlen(skb);
109 mss = skb_shinfo(skb)->gso_size;
110 paylen = skb->len - SKB_TMP_LEN(skb);
113 desc->tx.ip_offset = ip_offset;
114 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
115 desc->tx.mss = cpu_to_le16(mss);
116 desc->tx.l4_len = l4_len;
117 desc->tx.paylen = cpu_to_le16(paylen);
121 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
123 desc->tx.bn_pid = bn_pid;
124 desc->tx.ra_ri_cs_fe_vld = rrcfv;
126 ring_ptr_move_fw(ring, next_to_use);
129 static void fill_v2_desc(struct hnae_ring *ring, void *priv,
130 int size, dma_addr_t dma, int frag_end,
131 int buf_num, enum hns_desc_type type, int mtu)
133 fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
137 static const struct acpi_device_id hns_enet_acpi_match[] = {
142 MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
144 static void fill_desc(struct hnae_ring *ring, void *priv,
145 int size, dma_addr_t dma, int frag_end,
146 int buf_num, enum hns_desc_type type, int mtu)
148 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
149 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
153 u32 asid_bufnum_pid = 0;
154 u32 flag_ipoffset = 0;
156 desc_cb->priv = priv;
157 desc_cb->length = size;
159 desc_cb->type = type;
161 desc->addr = cpu_to_le64(dma);
162 desc->tx.send_size = cpu_to_le16((u16)size);
164 /*config bd buffer end */
165 flag_ipoffset |= 1 << HNS_TXD_VLD_B;
167 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
169 if (type == DESC_TYPE_SKB) {
170 skb = (struct sk_buff *)priv;
172 if (skb->ip_summed == CHECKSUM_PARTIAL) {
173 protocol = skb->protocol;
174 ip_offset = ETH_HLEN;
176 /*if it is a SW VLAN check the next protocol*/
177 if (protocol == htons(ETH_P_8021Q)) {
178 ip_offset += VLAN_HLEN;
179 protocol = vlan_get_protocol(skb);
180 skb->protocol = protocol;
183 if (skb->protocol == htons(ETH_P_IP)) {
184 flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
185 /* check for tcp/udp header */
186 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
188 } else if (skb->protocol == htons(ETH_P_IPV6)) {
189 /* ipv6 has not l3 cs, check for L4 header */
190 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
193 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
197 flag_ipoffset |= frag_end << HNS_TXD_FE_B;
199 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
200 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
202 ring_ptr_move_fw(ring, next_to_use);
205 static void unfill_desc(struct hnae_ring *ring)
207 ring_ptr_move_bw(ring, next_to_use);
210 static int hns_nic_maybe_stop_tx(
211 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
213 struct sk_buff *skb = *out_skb;
214 struct sk_buff *new_skb = NULL;
217 /* no. of segments (plus a header) */
218 buf_num = skb_shinfo(skb)->nr_frags + 1;
220 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
221 if (ring_space(ring) < 1)
224 new_skb = skb_copy(skb, GFP_ATOMIC);
228 dev_kfree_skb_any(skb);
231 } else if (buf_num > ring_space(ring)) {
239 static int hns_nic_maybe_stop_tso(
240 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
246 struct sk_buff *skb = *out_skb;
247 struct sk_buff *new_skb = NULL;
250 size = skb_headlen(skb);
251 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
253 frag_num = skb_shinfo(skb)->nr_frags;
254 for (i = 0; i < frag_num; i++) {
255 frag = &skb_shinfo(skb)->frags[i];
256 size = skb_frag_size(frag);
257 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
260 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
261 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
262 if (ring_space(ring) < buf_num)
264 /* manual split the send packet */
265 new_skb = skb_copy(skb, GFP_ATOMIC);
268 dev_kfree_skb_any(skb);
271 } else if (ring_space(ring) < buf_num) {
279 static void fill_tso_desc(struct hnae_ring *ring, void *priv,
280 int size, dma_addr_t dma, int frag_end,
281 int buf_num, enum hns_desc_type type, int mtu)
287 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
288 sizeoflast = size % BD_MAX_SEND_SIZE;
289 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
291 /* when the frag size is bigger than hardware, split this frag */
292 for (k = 0; k < frag_buf_num; k++)
293 fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
294 (k == frag_buf_num - 1) ?
295 sizeoflast : BD_MAX_SEND_SIZE,
296 dma + BD_MAX_SEND_SIZE * k,
297 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
299 (type == DESC_TYPE_SKB && !k) ?
300 DESC_TYPE_SKB : DESC_TYPE_PAGE,
304 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
306 struct hns_nic_ring_data *ring_data)
308 struct hns_nic_priv *priv = netdev_priv(ndev);
309 struct hnae_ring *ring = ring_data->ring;
310 struct device *dev = ring_to_dev(ring);
311 struct netdev_queue *dev_queue;
316 int size, next_to_use;
319 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
321 ring->stats.tx_busy++;
322 goto out_net_tx_busy;
324 ring->stats.sw_err_cnt++;
325 netdev_err(ndev, "no memory to xmit!\n");
331 /* no. of segments (plus a header) */
332 seg_num = skb_shinfo(skb)->nr_frags + 1;
333 next_to_use = ring->next_to_use;
335 /* fill the first part */
336 size = skb_headlen(skb);
337 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
338 if (dma_mapping_error(dev, dma)) {
339 netdev_err(ndev, "TX head DMA map failed\n");
340 ring->stats.sw_err_cnt++;
343 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
344 buf_num, DESC_TYPE_SKB, ndev->mtu);
346 /* fill the fragments */
347 for (i = 1; i < seg_num; i++) {
348 frag = &skb_shinfo(skb)->frags[i - 1];
349 size = skb_frag_size(frag);
350 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
351 if (dma_mapping_error(dev, dma)) {
352 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
353 ring->stats.sw_err_cnt++;
354 goto out_map_frag_fail;
356 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
357 seg_num - 1 == i ? 1 : 0, buf_num,
358 DESC_TYPE_PAGE, ndev->mtu);
361 /*complete translate all packets*/
362 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
363 netdev_tx_sent_queue(dev_queue, skb->len);
365 netif_trans_update(ndev);
366 ndev->stats.tx_bytes += skb->len;
367 ndev->stats.tx_packets++;
369 wmb(); /* commit all data before submit */
370 assert(skb->queue_mapping < priv->ae_handle->q_num);
371 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
377 while (ring->next_to_use != next_to_use) {
379 if (ring->next_to_use != next_to_use)
381 ring->desc_cb[ring->next_to_use].dma,
382 ring->desc_cb[ring->next_to_use].length,
385 dma_unmap_single(dev,
386 ring->desc_cb[next_to_use].dma,
387 ring->desc_cb[next_to_use].length,
393 dev_kfree_skb_any(skb);
398 netif_stop_subqueue(ndev, skb->queue_mapping);
400 /* Herbert's original patch had:
401 * smp_mb__after_netif_stop_queue();
402 * but since that doesn't exist yet, just open code it.
405 return NETDEV_TX_BUSY;
408 static void hns_nic_reuse_page(struct sk_buff *skb, int i,
409 struct hnae_ring *ring, int pull_len,
410 struct hnae_desc_cb *desc_cb)
412 struct hnae_desc *desc;
418 twobufs = ((PAGE_SIZE < 8192) &&
419 hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
421 desc = &ring->desc[ring->next_to_clean];
422 size = le16_to_cpu(desc->rx.size);
425 truesize = hnae_buf_size(ring);
427 truesize = ALIGN(size, L1_CACHE_BYTES);
428 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
431 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
432 size - pull_len, truesize);
434 /* avoid re-using remote pages,flag default unreuse */
435 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
439 /* if we are only owner of page we can reuse it */
440 if (likely(page_count(desc_cb->priv) == 1)) {
441 /* flip page offset to other buffer */
442 desc_cb->page_offset ^= truesize;
444 desc_cb->reuse_flag = 1;
445 /* bump ref count on page before it is given*/
446 get_page(desc_cb->priv);
451 /* move offset up to the next cache line */
452 desc_cb->page_offset += truesize;
454 if (desc_cb->page_offset <= last_offset) {
455 desc_cb->reuse_flag = 1;
456 /* bump ref count on page before it is given*/
457 get_page(desc_cb->priv);
461 static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
463 *out_bnum = hnae_get_field(bnum_flag,
464 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
467 static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
469 *out_bnum = hnae_get_field(bnum_flag,
470 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
473 static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
474 struct sk_buff *skb, u32 flag)
476 struct net_device *netdev = ring_data->napi.dev;
480 /* check if RX checksum offload is enabled */
481 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
484 /* In hardware, we only support checksum for the following protocols:
486 * 2) TCP(over IPv4 or IPv6),
487 * 3) UDP(over IPv4 or IPv6),
488 * 4) SCTP(over IPv4 or IPv6)
489 * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
490 * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
492 * Hardware limitation:
493 * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
494 * Error" bit (which usually can be used to indicate whether checksum
495 * was calculated by the hardware and if there was any error encountered
496 * during checksum calculation).
498 * Software workaround:
499 * We do get info within the RX descriptor about the kind of L3/L4
500 * protocol coming in the packet and the error status. These errors
501 * might not just be checksum errors but could be related to version,
502 * length of IPv4, UDP, TCP etc.
503 * Because there is no-way of knowing if it is a L3/L4 error due to bad
504 * checksum or any other L3/L4 error, we will not (cannot) convey
505 * checksum status for such cases to upper stack and will not maintain
506 * the RX L3/L4 checksum counters as well.
509 l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
510 l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
512 /* check L3 protocol for which checksum is supported */
513 if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
516 /* check for any(not just checksum)flagged L3 protocol errors */
517 if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
520 /* we do not support checksum of fragmented packets */
521 if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
524 /* check L4 protocol for which checksum is supported */
525 if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
526 (l4id != HNS_RX_FLAG_L4ID_UDP) &&
527 (l4id != HNS_RX_FLAG_L4ID_SCTP))
530 /* check for any(not just checksum)flagged L4 protocol errors */
531 if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
534 /* now, this has to be a packet with valid RX checksum */
535 skb->ip_summed = CHECKSUM_UNNECESSARY;
538 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
539 struct sk_buff **out_skb, int *out_bnum)
541 struct hnae_ring *ring = ring_data->ring;
542 struct net_device *ndev = ring_data->napi.dev;
543 struct hns_nic_priv *priv = netdev_priv(ndev);
545 struct hnae_desc *desc;
546 struct hnae_desc_cb *desc_cb;
552 desc = &ring->desc[ring->next_to_clean];
553 desc_cb = &ring->desc_cb[ring->next_to_clean];
557 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
559 /* prefetch first cache line of first page */
561 #if L1_CACHE_BYTES < 128
562 prefetch(va + L1_CACHE_BYTES);
565 skb = *out_skb = napi_alloc_skb(&ring_data->napi,
567 if (unlikely(!skb)) {
568 netdev_err(ndev, "alloc rx skb fail\n");
569 ring->stats.sw_err_cnt++;
573 prefetchw(skb->data);
574 length = le16_to_cpu(desc->rx.pkt_len);
575 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
576 priv->ops.get_rxd_bnum(bnum_flag, &bnum);
579 if (length <= HNS_RX_HEAD_SIZE) {
580 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
582 /* we can reuse buffer as-is, just make sure it is local */
583 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
584 desc_cb->reuse_flag = 1;
585 else /* this page cannot be reused so discard it */
586 put_page(desc_cb->priv);
588 ring_ptr_move_fw(ring, next_to_clean);
590 if (unlikely(bnum != 1)) { /* check err*/
595 ring->stats.seg_pkt_cnt++;
597 pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE);
598 memcpy(__skb_put(skb, pull_len), va,
599 ALIGN(pull_len, sizeof(long)));
601 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
602 ring_ptr_move_fw(ring, next_to_clean);
604 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
608 for (i = 1; i < bnum; i++) {
609 desc = &ring->desc[ring->next_to_clean];
610 desc_cb = &ring->desc_cb[ring->next_to_clean];
612 hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
613 ring_ptr_move_fw(ring, next_to_clean);
617 /* check except process, free skb and jump the desc */
618 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
620 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
621 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
622 bnum, ring->max_desc_num_per_pkt,
623 length, (int)MAX_SKB_FRAGS,
624 ((u64 *)desc)[0], ((u64 *)desc)[1]);
625 ring->stats.err_bd_num++;
626 dev_kfree_skb_any(skb);
630 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
632 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
633 netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
634 ((u64 *)desc)[0], ((u64 *)desc)[1]);
635 ring->stats.non_vld_descs++;
636 dev_kfree_skb_any(skb);
640 if (unlikely((!desc->rx.pkt_len) ||
641 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
642 ring->stats.err_pkt_len++;
643 dev_kfree_skb_any(skb);
647 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
648 ring->stats.l2_err++;
649 dev_kfree_skb_any(skb);
653 ring->stats.rx_pkts++;
654 ring->stats.rx_bytes += skb->len;
656 /* indicate to upper stack if our hardware has already calculated
659 hns_nic_rx_checksum(ring_data, skb, bnum_flag);
665 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
668 struct hnae_desc_cb res_cbs;
669 struct hnae_desc_cb *desc_cb;
670 struct hnae_ring *ring = ring_data->ring;
671 struct net_device *ndev = ring_data->napi.dev;
673 for (i = 0; i < cleand_count; i++) {
674 desc_cb = &ring->desc_cb[ring->next_to_use];
675 if (desc_cb->reuse_flag) {
676 ring->stats.reuse_pg_cnt++;
677 hnae_reuse_buffer(ring, ring->next_to_use);
679 ret = hnae_reserve_buffer_map(ring, &res_cbs);
681 ring->stats.sw_err_cnt++;
682 netdev_err(ndev, "hnae reserve buffer map failed.\n");
685 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
688 ring_ptr_move_fw(ring, next_to_use);
691 wmb(); /* make all data has been write before submit */
692 writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
695 /* return error number for error or number of desc left to take
697 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
700 struct net_device *ndev = ring_data->napi.dev;
702 skb->protocol = eth_type_trans(skb, ndev);
703 (void)napi_gro_receive(&ring_data->napi, skb);
706 static int hns_desc_unused(struct hnae_ring *ring)
708 int ntc = ring->next_to_clean;
709 int ntu = ring->next_to_use;
711 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
714 #define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */
715 #define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */
717 #define HNS_COAL_BDNUM 3
719 static u32 hns_coal_rx_bdnum(struct hnae_ring *ring)
721 bool coal_enable = ring->q->handle->coal_adapt_en;
724 ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE)
725 return HNS_COAL_BDNUM;
730 static void hns_update_rx_rate(struct hnae_ring *ring)
732 bool coal_enable = ring->q->handle->coal_adapt_en;
737 time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4)))
740 /* ring->stats.rx_bytes overflowed */
741 if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) {
742 ring->coal_last_rx_bytes = ring->stats.rx_bytes;
743 ring->coal_last_jiffies = jiffies;
747 total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes;
748 time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies);
749 do_div(total_bytes, time_passed_ms);
750 ring->coal_rx_rate = total_bytes >> 10;
752 ring->coal_last_rx_bytes = ring->stats.rx_bytes;
753 ring->coal_last_jiffies = jiffies;
757 * smooth_alg - smoothing algrithm for adjusting coalesce parameter
759 static u32 smooth_alg(u32 new_param, u32 old_param)
761 u32 gap = (new_param > old_param) ? new_param - old_param
762 : old_param - new_param;
767 if (new_param > old_param)
768 return old_param + gap;
770 return old_param - gap;
774 * hns_nic_adp_coalesce - self adapte coalesce according to rx rate
775 * @ring_data: pointer to hns_nic_ring_data
777 static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data)
779 struct hnae_ring *ring = ring_data->ring;
780 struct hnae_handle *handle = ring->q->handle;
781 u32 new_coal_param, old_coal_param = ring->coal_param;
783 if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE)
784 new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
785 else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE)
786 new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM;
788 new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM;
790 if (new_coal_param == old_coal_param &&
791 new_coal_param == handle->coal_param)
794 new_coal_param = smooth_alg(new_coal_param, old_coal_param);
795 ring->coal_param = new_coal_param;
798 * Because all ring in one port has one coalesce param, when one ring
799 * calculate its own coalesce param, it cannot write to hardware at
800 * once. There are three conditions as follows:
801 * 1. current ring's coalesce param is larger than the hardware.
802 * 2. or ring which adapt last time can change again.
805 if (new_coal_param == handle->coal_param) {
806 handle->coal_last_jiffies = jiffies;
807 handle->coal_ring_idx = ring_data->queue_index;
808 } else if (new_coal_param > handle->coal_param ||
809 handle->coal_ring_idx == ring_data->queue_index ||
810 time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) {
811 handle->dev->ops->set_coalesce_usecs(handle,
813 handle->dev->ops->set_coalesce_frames(handle,
815 handle->coal_param = new_coal_param;
816 handle->coal_ring_idx = ring_data->queue_index;
817 handle->coal_last_jiffies = jiffies;
821 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
824 struct hnae_ring *ring = ring_data->ring;
827 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
828 int recv_pkts, recv_bds, clean_count, err;
829 int unused_count = hns_desc_unused(ring);
831 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
832 rmb(); /* make sure num taken effect before the other data is touched */
834 recv_pkts = 0, recv_bds = 0, clean_count = 0;
837 while (recv_pkts < budget && recv_bds < num) {
838 /* reuse or realloc buffers */
839 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
840 hns_nic_alloc_rx_buffers(ring_data,
841 clean_count + unused_count);
843 unused_count = hns_desc_unused(ring);
847 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
848 if (unlikely(!skb)) /* this fault cannot be repaired */
853 if (unlikely(err)) { /* do jump the err */
858 /* do update ip stack process*/
859 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
865 /* make all data has been write before submit */
866 if (clean_count + unused_count > 0)
867 hns_nic_alloc_rx_buffers(ring_data,
868 clean_count + unused_count);
873 static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
875 struct hnae_ring *ring = ring_data->ring;
879 hns_update_rx_rate(ring);
881 /* for hardware bug fixed */
882 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
883 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
885 if (num <= hns_coal_rx_bdnum(ring)) {
886 if (ring->q->handle->coal_adapt_en)
887 hns_nic_adpt_coalesce(ring_data);
891 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
900 static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
902 struct hnae_ring *ring = ring_data->ring;
905 hns_update_rx_rate(ring);
906 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
908 if (num <= hns_coal_rx_bdnum(ring)) {
909 if (ring->q->handle->coal_adapt_en)
910 hns_nic_adpt_coalesce(ring_data);
918 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
919 int *bytes, int *pkts)
921 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
923 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
924 (*bytes) += desc_cb->length;
925 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
926 hnae_free_buffer_detach(ring, ring->next_to_clean);
928 ring_ptr_move_fw(ring, next_to_clean);
931 static int is_valid_clean_head(struct hnae_ring *ring, int h)
933 int u = ring->next_to_use;
934 int c = ring->next_to_clean;
936 if (unlikely(h > ring->desc_num))
939 assert(u > 0 && u < ring->desc_num);
940 assert(c > 0 && c < ring->desc_num);
941 assert(u != c && h != c); /* must be checked before call this func */
943 return u > c ? (h > c && h <= u) : (h > c || h <= u);
946 /* netif_tx_lock will turn down the performance, set only when necessary */
947 #ifdef CONFIG_NET_POLL_CONTROLLER
948 #define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
949 #define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
951 #define NETIF_TX_LOCK(ring)
952 #define NETIF_TX_UNLOCK(ring)
955 /* reclaim all desc in one budget
956 * return error or number of desc left
958 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
961 struct hnae_ring *ring = ring_data->ring;
962 struct net_device *ndev = ring_data->napi.dev;
963 struct netdev_queue *dev_queue;
964 struct hns_nic_priv *priv = netdev_priv(ndev);
970 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
971 rmb(); /* make sure head is ready before touch any data */
973 if (is_ring_empty(ring) || head == ring->next_to_clean) {
974 NETIF_TX_UNLOCK(ring);
975 return 0; /* no data to poll */
978 if (!is_valid_clean_head(ring, head)) {
979 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
980 ring->next_to_use, ring->next_to_clean);
981 ring->stats.io_err_cnt++;
982 NETIF_TX_UNLOCK(ring);
988 while (head != ring->next_to_clean) {
989 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
990 /* issue prefetch for next Tx descriptor */
991 prefetch(&ring->desc_cb[ring->next_to_clean]);
993 /* update tx ring statistics. */
994 ring->stats.tx_pkts += pkts;
995 ring->stats.tx_bytes += bytes;
997 NETIF_TX_UNLOCK(ring);
999 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1000 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1002 if (unlikely(priv->link && !netif_carrier_ok(ndev)))
1003 netif_carrier_on(ndev);
1005 if (unlikely(pkts && netif_carrier_ok(ndev) &&
1006 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
1007 /* Make sure that anybody stopping the queue after this
1008 * sees the new next_to_clean.
1011 if (netif_tx_queue_stopped(dev_queue) &&
1012 !test_bit(NIC_STATE_DOWN, &priv->state)) {
1013 netif_tx_wake_queue(dev_queue);
1014 ring->stats.restart_queue++;
1020 static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
1022 struct hnae_ring *ring = ring_data->ring;
1025 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1027 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1029 if (head != ring->next_to_clean) {
1030 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1031 ring_data->ring, 1);
1039 static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
1041 struct hnae_ring *ring = ring_data->ring;
1042 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1044 if (head == ring->next_to_clean)
1050 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
1052 struct hnae_ring *ring = ring_data->ring;
1053 struct net_device *ndev = ring_data->napi.dev;
1054 struct netdev_queue *dev_queue;
1058 NETIF_TX_LOCK(ring);
1060 head = ring->next_to_use; /* ntu :soft setted ring position*/
1063 while (head != ring->next_to_clean)
1064 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
1066 NETIF_TX_UNLOCK(ring);
1068 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1069 netdev_tx_reset_queue(dev_queue);
1072 static int hns_nic_common_poll(struct napi_struct *napi, int budget)
1074 int clean_complete = 0;
1075 struct hns_nic_ring_data *ring_data =
1076 container_of(napi, struct hns_nic_ring_data, napi);
1077 struct hnae_ring *ring = ring_data->ring;
1080 clean_complete += ring_data->poll_one(
1081 ring_data, budget - clean_complete,
1082 ring_data->ex_process);
1084 if (clean_complete < budget) {
1085 if (ring_data->fini_process(ring_data)) {
1086 napi_complete(napi);
1087 ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1093 return clean_complete;
1096 static irqreturn_t hns_irq_handle(int irq, void *dev)
1098 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
1100 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1101 ring_data->ring, 1);
1102 napi_schedule(&ring_data->napi);
1108 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
1111 static void hns_nic_adjust_link(struct net_device *ndev)
1113 struct hns_nic_priv *priv = netdev_priv(ndev);
1114 struct hnae_handle *h = priv->ae_handle;
1117 /* If there is no phy, do not need adjust link */
1119 /* When phy link down, do nothing */
1120 if (ndev->phydev->link == 0)
1123 if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
1124 ndev->phydev->duplex)) {
1125 /* because Hi161X chip don't support to change gmac
1126 * speed and duplex with traffic. Delay 200ms to
1127 * make sure there is no more data in chip FIFO.
1129 netif_carrier_off(ndev);
1131 h->dev->ops->adjust_link(h, ndev->phydev->speed,
1132 ndev->phydev->duplex);
1133 netif_carrier_on(ndev);
1137 state = state && h->dev->ops->get_status(h);
1139 if (state != priv->link) {
1141 netif_carrier_on(ndev);
1142 netif_tx_wake_all_queues(ndev);
1143 netdev_info(ndev, "link up\n");
1145 netif_carrier_off(ndev);
1146 netdev_info(ndev, "link down\n");
1153 *hns_nic_init_phy - init phy
1156 * Return 0 on success, negative on failure
1158 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1160 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
1161 struct phy_device *phy_dev = h->phy_dev;
1167 ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1168 linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1169 linkmode_copy(phy_dev->advertising, phy_dev->supported);
1171 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1172 phy_dev->autoneg = false;
1174 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1175 phy_dev->dev_flags = 0;
1177 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
1180 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
1185 phy_attached_info(phy_dev);
1190 static int hns_nic_ring_open(struct net_device *netdev, int idx)
1192 struct hns_nic_priv *priv = netdev_priv(netdev);
1193 struct hnae_handle *h = priv->ae_handle;
1195 napi_enable(&priv->ring_data[idx].napi);
1197 enable_irq(priv->ring_data[idx].ring->irq);
1198 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1203 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1205 struct hns_nic_priv *priv = netdev_priv(ndev);
1206 struct hnae_handle *h = priv->ae_handle;
1207 struct sockaddr *mac_addr = p;
1210 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1211 return -EADDRNOTAVAIL;
1213 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1215 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1219 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
1224 static void hns_nic_update_stats(struct net_device *netdev)
1226 struct hns_nic_priv *priv = netdev_priv(netdev);
1227 struct hnae_handle *h = priv->ae_handle;
1229 h->dev->ops->update_stats(h, &netdev->stats);
1232 /* set mac addr if it is configed. or leave it to the AE driver */
1233 static void hns_init_mac_addr(struct net_device *ndev)
1235 struct hns_nic_priv *priv = netdev_priv(ndev);
1237 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
1238 eth_hw_addr_random(ndev);
1239 dev_warn(priv->dev, "No valid mac, use random mac %pM",
1244 static void hns_nic_ring_close(struct net_device *netdev, int idx)
1246 struct hns_nic_priv *priv = netdev_priv(netdev);
1247 struct hnae_handle *h = priv->ae_handle;
1249 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
1250 disable_irq(priv->ring_data[idx].ring->irq);
1252 napi_disable(&priv->ring_data[idx].napi);
1255 static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
1256 struct hnae_ring *ring, cpumask_t *mask)
1260 /* Diffrent irq banlance between 16core and 32core.
1261 * The cpu mask set by ring index according to the ring flag
1262 * which indicate the ring is tx or rx.
1264 if (q_num == num_possible_cpus()) {
1265 if (is_tx_ring(ring))
1268 cpu = ring_idx - q_num;
1270 if (is_tx_ring(ring))
1273 cpu = (ring_idx - q_num) * 2 + 1;
1276 cpumask_clear(mask);
1277 cpumask_set_cpu(cpu, mask);
1282 static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
1286 for (i = 0; i < q_num * 2; i++) {
1287 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1288 irq_set_affinity_hint(priv->ring_data[i].ring->irq,
1290 free_irq(priv->ring_data[i].ring->irq,
1291 &priv->ring_data[i]);
1292 priv->ring_data[i].ring->irq_init_flag =
1298 static int hns_nic_init_irq(struct hns_nic_priv *priv)
1300 struct hnae_handle *h = priv->ae_handle;
1301 struct hns_nic_ring_data *rd;
1306 for (i = 0; i < h->q_num * 2; i++) {
1307 rd = &priv->ring_data[i];
1309 if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
1312 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1313 "%s-%s%d", priv->netdev->name,
1314 (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
1316 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1318 ret = request_irq(rd->ring->irq,
1319 hns_irq_handle, 0, rd->ring->ring_name, rd);
1321 netdev_err(priv->netdev, "request irq(%d) fail\n",
1325 disable_irq(rd->ring->irq);
1327 cpu = hns_nic_init_affinity_mask(h->q_num, i,
1328 rd->ring, &rd->mask);
1330 if (cpu_online(cpu))
1331 irq_set_affinity_hint(rd->ring->irq,
1334 rd->ring->irq_init_flag = RCB_IRQ_INITED;
1340 hns_nic_free_irq(h->q_num, priv);
1344 static int hns_nic_net_up(struct net_device *ndev)
1346 struct hns_nic_priv *priv = netdev_priv(ndev);
1347 struct hnae_handle *h = priv->ae_handle;
1351 if (!test_bit(NIC_STATE_DOWN, &priv->state))
1354 ret = hns_nic_init_irq(priv);
1356 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1360 for (i = 0; i < h->q_num * 2; i++) {
1361 ret = hns_nic_ring_open(ndev, i);
1363 goto out_has_some_queues;
1366 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1368 goto out_set_mac_addr_err;
1370 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1375 phy_start(ndev->phydev);
1377 clear_bit(NIC_STATE_DOWN, &priv->state);
1378 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1383 netif_stop_queue(ndev);
1384 out_set_mac_addr_err:
1385 out_has_some_queues:
1386 for (j = i - 1; j >= 0; j--)
1387 hns_nic_ring_close(ndev, j);
1389 hns_nic_free_irq(h->q_num, priv);
1390 set_bit(NIC_STATE_DOWN, &priv->state);
1395 static void hns_nic_net_down(struct net_device *ndev)
1398 struct hnae_ae_ops *ops;
1399 struct hns_nic_priv *priv = netdev_priv(ndev);
1401 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1404 (void)del_timer_sync(&priv->service_timer);
1405 netif_tx_stop_all_queues(ndev);
1406 netif_carrier_off(ndev);
1407 netif_tx_disable(ndev);
1411 phy_stop(ndev->phydev);
1413 ops = priv->ae_handle->dev->ops;
1416 ops->stop(priv->ae_handle);
1418 netif_tx_stop_all_queues(ndev);
1420 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1421 hns_nic_ring_close(ndev, i);
1422 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1424 /* clean tx buffers*/
1425 hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1429 void hns_nic_net_reset(struct net_device *ndev)
1431 struct hns_nic_priv *priv = netdev_priv(ndev);
1432 struct hnae_handle *handle = priv->ae_handle;
1434 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1435 usleep_range(1000, 2000);
1437 (void)hnae_reinit_handle(handle);
1439 clear_bit(NIC_STATE_RESETTING, &priv->state);
1442 void hns_nic_net_reinit(struct net_device *netdev)
1444 struct hns_nic_priv *priv = netdev_priv(netdev);
1445 enum hnae_port_type type = priv->ae_handle->port_type;
1447 netif_trans_update(priv->netdev);
1448 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1449 usleep_range(1000, 2000);
1451 hns_nic_net_down(netdev);
1453 /* Only do hns_nic_net_reset in debug mode
1454 * because of hardware limitation.
1456 if (type == HNAE_PORT_DEBUG)
1457 hns_nic_net_reset(netdev);
1459 (void)hns_nic_net_up(netdev);
1460 clear_bit(NIC_STATE_REINITING, &priv->state);
1463 static int hns_nic_net_open(struct net_device *ndev)
1465 struct hns_nic_priv *priv = netdev_priv(ndev);
1466 struct hnae_handle *h = priv->ae_handle;
1469 if (test_bit(NIC_STATE_TESTING, &priv->state))
1473 netif_carrier_off(ndev);
1475 ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1477 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1482 ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1485 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1489 ret = hns_nic_net_up(ndev);
1492 "hns net up fail, ret=%d!\n", ret);
1499 static int hns_nic_net_stop(struct net_device *ndev)
1501 hns_nic_net_down(ndev);
1506 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1507 #define HNS_TX_TIMEO_LIMIT (40 * HZ)
1508 static void hns_nic_net_timeout(struct net_device *ndev)
1510 struct hns_nic_priv *priv = netdev_priv(ndev);
1512 if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
1513 ndev->watchdog_timeo *= 2;
1514 netdev_info(ndev, "watchdog_timo changed to %d.\n",
1515 ndev->watchdog_timeo);
1517 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1518 hns_tx_timeout_reset(priv);
1522 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1525 struct phy_device *phy_dev = netdev->phydev;
1527 if (!netif_running(netdev))
1533 return phy_mii_ioctl(phy_dev, ifr, cmd);
1536 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1537 struct net_device *ndev)
1539 struct hns_nic_priv *priv = netdev_priv(ndev);
1541 assert(skb->queue_mapping < ndev->ae_handle->q_num);
1543 return hns_nic_net_xmit_hw(ndev, skb,
1544 &tx_ring_data(priv, skb->queue_mapping));
1547 static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
1548 struct sk_buff *skb)
1550 dev_kfree_skb_any(skb);
1553 #define HNS_LB_TX_RING 0
1554 static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
1556 struct sk_buff *skb;
1557 struct ethhdr *ethhdr;
1560 /* allocate test skb */
1561 skb = alloc_skb(64, GFP_KERNEL);
1567 memset(skb->data, 0xFF, skb->len);
1569 /* must be tcp/ip package */
1570 ethhdr = (struct ethhdr *)skb->data;
1571 ethhdr->h_proto = htons(ETH_P_IP);
1573 frame_len = skb->len & (~1ul);
1574 memset(&skb->data[frame_len / 2], 0xAA,
1577 skb->queue_mapping = HNS_LB_TX_RING;
1582 static int hns_enable_serdes_lb(struct net_device *ndev)
1584 struct hns_nic_priv *priv = netdev_priv(ndev);
1585 struct hnae_handle *h = priv->ae_handle;
1586 struct hnae_ae_ops *ops = h->dev->ops;
1590 ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
1594 ret = ops->start ? ops->start(h) : 0;
1598 /* link adjust duplex*/
1599 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1605 ops->adjust_link(h, speed, duplex);
1607 /* wait h/w ready */
1613 static void hns_disable_serdes_lb(struct net_device *ndev)
1615 struct hns_nic_priv *priv = netdev_priv(ndev);
1616 struct hnae_handle *h = priv->ae_handle;
1617 struct hnae_ae_ops *ops = h->dev->ops;
1620 ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
1624 *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
1625 *function as follows:
1626 * 1. if one rx ring has found the page_offset is not equal 0 between head
1627 * and tail, it means that the chip fetched the wrong descs for the ring
1628 * which buffer size is 4096.
1629 * 2. we set the chip serdes loopback and set rss indirection to the ring.
1630 * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
1631 * recieving all packages and it will fetch new descriptions.
1632 * 4. recover to the original state.
1636 static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
1638 struct hns_nic_priv *priv = netdev_priv(ndev);
1639 struct hnae_handle *h = priv->ae_handle;
1640 struct hnae_ae_ops *ops = h->dev->ops;
1641 struct hns_nic_ring_data *rd;
1642 struct hnae_ring *ring;
1643 struct sk_buff *skb;
1654 /* alloc indir memory */
1655 indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
1656 org_indir = kzalloc(indir_size, GFP_KERNEL);
1660 /* store the orginal indirection */
1661 ops->get_rss(h, org_indir, NULL, NULL);
1663 cur_indir = kzalloc(indir_size, GFP_KERNEL);
1666 goto cur_indir_alloc_err;
1670 if (hns_enable_serdes_lb(ndev)) {
1672 goto enable_serdes_lb_err;
1675 /* foreach every rx ring to clear fetch desc */
1676 for (i = 0; i < h->q_num; i++) {
1677 ring = &h->qs[i]->rx_ring;
1678 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1679 tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
1681 fetch_num = ring_dist(ring, head, tail);
1683 while (head != tail) {
1684 if (ring->desc_cb[head].page_offset != 0) {
1690 if (head == ring->desc_num)
1695 for (j = 0; j < indir_size / sizeof(*org_indir); j++)
1697 ops->set_rss(h, cur_indir, NULL, 0);
1699 for (j = 0; j < fetch_num; j++) {
1700 /* alloc one skb and init */
1701 skb = hns_assemble_skb(ndev);
1704 rd = &tx_ring_data(priv, skb->queue_mapping);
1705 hns_nic_net_xmit_hw(ndev, skb, rd);
1708 while (retry_times++ < 10) {
1711 rd = &rx_ring_data(priv, i);
1712 if (rd->poll_one(rd, fetch_num,
1713 hns_nic_drop_rx_fetch))
1718 while (retry_times++ < 10) {
1720 /* clean tx ring 0 send package */
1721 rd = &tx_ring_data(priv,
1723 if (rd->poll_one(rd, fetch_num, NULL))
1731 /* restore everything */
1732 ops->set_rss(h, org_indir, NULL, 0);
1733 hns_disable_serdes_lb(ndev);
1734 enable_serdes_lb_err:
1736 cur_indir_alloc_err:
1742 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1744 struct hns_nic_priv *priv = netdev_priv(ndev);
1745 struct hnae_handle *h = priv->ae_handle;
1746 bool if_running = netif_running(ndev);
1749 /* MTU < 68 is an error and causes problems on some kernels */
1754 if (new_mtu == ndev->mtu)
1757 if (!h->dev->ops->set_mtu)
1761 (void)hns_nic_net_stop(ndev);
1765 if (priv->enet_ver != AE_VERSION_1 &&
1766 ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
1767 new_mtu > BD_SIZE_2048_MAX_MTU) {
1769 hnae_reinit_all_ring_desc(h);
1771 /* clear the package which the chip has fetched */
1772 ret = hns_nic_clear_all_rx_fetch(ndev);
1774 /* the page offset must be consist with desc */
1775 hnae_reinit_all_ring_page_off(h);
1778 netdev_err(ndev, "clear the fetched desc fail\n");
1783 ret = h->dev->ops->set_mtu(h, new_mtu);
1785 netdev_err(ndev, "set mtu fail, return value %d\n",
1790 /* finally, set new mtu to netdevice */
1791 ndev->mtu = new_mtu;
1795 if (hns_nic_net_open(ndev)) {
1796 netdev_err(ndev, "hns net open fail\n");
1804 static int hns_nic_set_features(struct net_device *netdev,
1805 netdev_features_t features)
1807 struct hns_nic_priv *priv = netdev_priv(netdev);
1809 switch (priv->enet_ver) {
1811 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1812 netdev_info(netdev, "enet v1 do not support tso!\n");
1815 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1816 priv->ops.fill_desc = fill_tso_desc;
1817 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1818 /* The chip only support 7*4096 */
1819 netif_set_gso_max_size(netdev, 7 * 4096);
1821 priv->ops.fill_desc = fill_v2_desc;
1822 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1826 netdev->features = features;
1830 static netdev_features_t hns_nic_fix_features(
1831 struct net_device *netdev, netdev_features_t features)
1833 struct hns_nic_priv *priv = netdev_priv(netdev);
1835 switch (priv->enet_ver) {
1837 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
1838 NETIF_F_HW_VLAN_CTAG_FILTER);
1846 static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
1848 struct hns_nic_priv *priv = netdev_priv(netdev);
1849 struct hnae_handle *h = priv->ae_handle;
1851 if (h->dev->ops->add_uc_addr)
1852 return h->dev->ops->add_uc_addr(h, addr);
1857 static int hns_nic_uc_unsync(struct net_device *netdev,
1858 const unsigned char *addr)
1860 struct hns_nic_priv *priv = netdev_priv(netdev);
1861 struct hnae_handle *h = priv->ae_handle;
1863 if (h->dev->ops->rm_uc_addr)
1864 return h->dev->ops->rm_uc_addr(h, addr);
1870 * nic_set_multicast_list - set mutl mac address
1871 * @netdev: net device
1876 static void hns_set_multicast_list(struct net_device *ndev)
1878 struct hns_nic_priv *priv = netdev_priv(ndev);
1879 struct hnae_handle *h = priv->ae_handle;
1880 struct netdev_hw_addr *ha = NULL;
1883 netdev_err(ndev, "hnae handle is null\n");
1887 if (h->dev->ops->clr_mc_addr)
1888 if (h->dev->ops->clr_mc_addr(h))
1889 netdev_err(ndev, "clear multicast address fail\n");
1891 if (h->dev->ops->set_mc_addr) {
1892 netdev_for_each_mc_addr(ha, ndev)
1893 if (h->dev->ops->set_mc_addr(h, ha->addr))
1894 netdev_err(ndev, "set multicast fail\n");
1898 static void hns_nic_set_rx_mode(struct net_device *ndev)
1900 struct hns_nic_priv *priv = netdev_priv(ndev);
1901 struct hnae_handle *h = priv->ae_handle;
1903 if (h->dev->ops->set_promisc_mode) {
1904 if (ndev->flags & IFF_PROMISC)
1905 h->dev->ops->set_promisc_mode(h, 1);
1907 h->dev->ops->set_promisc_mode(h, 0);
1910 hns_set_multicast_list(ndev);
1912 if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
1913 netdev_err(ndev, "sync uc address fail\n");
1916 static void hns_nic_get_stats64(struct net_device *ndev,
1917 struct rtnl_link_stats64 *stats)
1924 struct hns_nic_priv *priv = netdev_priv(ndev);
1925 struct hnae_handle *h = priv->ae_handle;
1927 for (idx = 0; idx < h->q_num; idx++) {
1928 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1929 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1930 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1931 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1934 stats->tx_bytes = tx_bytes;
1935 stats->tx_packets = tx_pkts;
1936 stats->rx_bytes = rx_bytes;
1937 stats->rx_packets = rx_pkts;
1939 stats->rx_errors = ndev->stats.rx_errors;
1940 stats->multicast = ndev->stats.multicast;
1941 stats->rx_length_errors = ndev->stats.rx_length_errors;
1942 stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1943 stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1945 stats->tx_errors = ndev->stats.tx_errors;
1946 stats->rx_dropped = ndev->stats.rx_dropped;
1947 stats->tx_dropped = ndev->stats.tx_dropped;
1948 stats->collisions = ndev->stats.collisions;
1949 stats->rx_over_errors = ndev->stats.rx_over_errors;
1950 stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1951 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1952 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1953 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1954 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1955 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1956 stats->tx_window_errors = ndev->stats.tx_window_errors;
1957 stats->rx_compressed = ndev->stats.rx_compressed;
1958 stats->tx_compressed = ndev->stats.tx_compressed;
1962 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
1963 struct net_device *sb_dev)
1965 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
1966 struct hns_nic_priv *priv = netdev_priv(ndev);
1968 /* fix hardware broadcast/multicast packets queue loopback */
1969 if (!AE_IS_VER1(priv->enet_ver) &&
1970 is_multicast_ether_addr(eth_hdr->h_dest))
1973 return netdev_pick_tx(ndev, skb, NULL);
1976 static const struct net_device_ops hns_nic_netdev_ops = {
1977 .ndo_open = hns_nic_net_open,
1978 .ndo_stop = hns_nic_net_stop,
1979 .ndo_start_xmit = hns_nic_net_xmit,
1980 .ndo_tx_timeout = hns_nic_net_timeout,
1981 .ndo_set_mac_address = hns_nic_net_set_mac_address,
1982 .ndo_change_mtu = hns_nic_change_mtu,
1983 .ndo_do_ioctl = hns_nic_do_ioctl,
1984 .ndo_set_features = hns_nic_set_features,
1985 .ndo_fix_features = hns_nic_fix_features,
1986 .ndo_get_stats64 = hns_nic_get_stats64,
1987 .ndo_set_rx_mode = hns_nic_set_rx_mode,
1988 .ndo_select_queue = hns_nic_select_queue,
1991 static void hns_nic_update_link_status(struct net_device *netdev)
1993 struct hns_nic_priv *priv = netdev_priv(netdev);
1995 struct hnae_handle *h = priv->ae_handle;
1998 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
2001 (void)genphy_read_status(h->phy_dev);
2003 hns_nic_adjust_link(netdev);
2006 /* for dumping key regs*/
2007 static void hns_nic_dump(struct hns_nic_priv *priv)
2009 struct hnae_handle *h = priv->ae_handle;
2010 struct hnae_ae_ops *ops = h->dev->ops;
2011 u32 *data, reg_num, i;
2013 if (ops->get_regs_len && ops->get_regs) {
2014 reg_num = ops->get_regs_len(priv->ae_handle);
2015 reg_num = (reg_num + 3ul) & ~3ul;
2016 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
2018 ops->get_regs(priv->ae_handle, data);
2019 for (i = 0; i < reg_num; i += 4)
2020 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
2021 i, data[i], data[i + 1],
2022 data[i + 2], data[i + 3]);
2027 for (i = 0; i < h->q_num; i++) {
2028 pr_info("tx_queue%d_next_to_clean:%d\n",
2029 i, h->qs[i]->tx_ring.next_to_clean);
2030 pr_info("tx_queue%d_next_to_use:%d\n",
2031 i, h->qs[i]->tx_ring.next_to_use);
2032 pr_info("rx_queue%d_next_to_clean:%d\n",
2033 i, h->qs[i]->rx_ring.next_to_clean);
2034 pr_info("rx_queue%d_next_to_use:%d\n",
2035 i, h->qs[i]->rx_ring.next_to_use);
2039 /* for resetting subtask */
2040 static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
2042 enum hnae_port_type type = priv->ae_handle->port_type;
2044 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
2046 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2048 /* If we're already down, removing or resetting, just bail */
2049 if (test_bit(NIC_STATE_DOWN, &priv->state) ||
2050 test_bit(NIC_STATE_REMOVING, &priv->state) ||
2051 test_bit(NIC_STATE_RESETTING, &priv->state))
2055 netdev_info(priv->netdev, "try to reset %s port!\n",
2056 (type == HNAE_PORT_DEBUG ? "debug" : "service"));
2059 /* put off any impending NetWatchDogTimeout */
2060 netif_trans_update(priv->netdev);
2061 hns_nic_net_reinit(priv->netdev);
2066 /* for doing service complete*/
2067 static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
2069 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
2070 /* make sure to commit the things */
2071 smp_mb__before_atomic();
2072 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2075 static void hns_nic_service_task(struct work_struct *work)
2077 struct hns_nic_priv *priv
2078 = container_of(work, struct hns_nic_priv, service_task);
2079 struct hnae_handle *h = priv->ae_handle;
2081 hns_nic_reset_subtask(priv);
2082 hns_nic_update_link_status(priv->netdev);
2083 h->dev->ops->update_led_status(h);
2084 hns_nic_update_stats(priv->netdev);
2086 hns_nic_service_event_complete(priv);
2089 static void hns_nic_task_schedule(struct hns_nic_priv *priv)
2091 if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
2092 !test_bit(NIC_STATE_REMOVING, &priv->state) &&
2093 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
2094 (void)schedule_work(&priv->service_task);
2097 static void hns_nic_service_timer(struct timer_list *t)
2099 struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
2101 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
2103 hns_nic_task_schedule(priv);
2107 * hns_tx_timeout_reset - initiate reset due to Tx timeout
2108 * @priv: driver private struct
2110 static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
2112 /* Do the reset outside of interrupt context */
2113 if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
2114 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2115 netdev_warn(priv->netdev,
2116 "initiating reset due to tx timeout(%llu,0x%lx)\n",
2117 priv->tx_timeout_count, priv->state);
2118 priv->tx_timeout_count++;
2119 hns_nic_task_schedule(priv);
2123 static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2125 struct hnae_handle *h = priv->ae_handle;
2126 struct hns_nic_ring_data *rd;
2127 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
2130 if (h->q_num > NIC_MAX_Q_PER_VF) {
2131 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
2135 priv->ring_data = kzalloc(array3_size(h->q_num,
2136 sizeof(*priv->ring_data), 2),
2138 if (!priv->ring_data)
2141 for (i = 0; i < h->q_num; i++) {
2142 rd = &priv->ring_data[i];
2143 rd->queue_index = i;
2144 rd->ring = &h->qs[i]->tx_ring;
2145 rd->poll_one = hns_nic_tx_poll_one;
2146 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
2147 hns_nic_tx_fini_pro_v2;
2149 netif_napi_add(priv->netdev, &rd->napi,
2150 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2151 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2153 for (i = h->q_num; i < h->q_num * 2; i++) {
2154 rd = &priv->ring_data[i];
2155 rd->queue_index = i - h->q_num;
2156 rd->ring = &h->qs[i - h->q_num]->rx_ring;
2157 rd->poll_one = hns_nic_rx_poll_one;
2158 rd->ex_process = hns_nic_rx_up_pro;
2159 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
2160 hns_nic_rx_fini_pro_v2;
2162 netif_napi_add(priv->netdev, &rd->napi,
2163 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2164 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2170 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
2172 struct hnae_handle *h = priv->ae_handle;
2175 for (i = 0; i < h->q_num * 2; i++) {
2176 netif_napi_del(&priv->ring_data[i].napi);
2177 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
2178 (void)irq_set_affinity_hint(
2179 priv->ring_data[i].ring->irq,
2181 free_irq(priv->ring_data[i].ring->irq,
2182 &priv->ring_data[i]);
2185 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2187 kfree(priv->ring_data);
2190 static void hns_nic_set_priv_ops(struct net_device *netdev)
2192 struct hns_nic_priv *priv = netdev_priv(netdev);
2193 struct hnae_handle *h = priv->ae_handle;
2195 if (AE_IS_VER1(priv->enet_ver)) {
2196 priv->ops.fill_desc = fill_desc;
2197 priv->ops.get_rxd_bnum = get_rx_desc_bnum;
2198 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2200 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
2201 if ((netdev->features & NETIF_F_TSO) ||
2202 (netdev->features & NETIF_F_TSO6)) {
2203 priv->ops.fill_desc = fill_tso_desc;
2204 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
2205 /* This chip only support 7*4096 */
2206 netif_set_gso_max_size(netdev, 7 * 4096);
2208 priv->ops.fill_desc = fill_v2_desc;
2209 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2211 /* enable tso when init
2212 * control tso on/off through TSE bit in bd
2214 h->dev->ops->set_tso_stats(h, 1);
2218 static int hns_nic_try_get_ae(struct net_device *ndev)
2220 struct hns_nic_priv *priv = netdev_priv(ndev);
2221 struct hnae_handle *h;
2224 h = hnae_get_handle(&priv->netdev->dev,
2225 priv->fwnode, priv->port_id, NULL);
2226 if (IS_ERR_OR_NULL(h)) {
2228 dev_dbg(priv->dev, "has not handle, register notifier!\n");
2231 priv->ae_handle = h;
2233 ret = hns_nic_init_phy(ndev, h);
2235 dev_err(priv->dev, "probe phy device fail!\n");
2239 ret = hns_nic_init_ring_data(priv);
2242 goto out_init_ring_data;
2245 hns_nic_set_priv_ops(ndev);
2247 ret = register_netdev(ndev);
2249 dev_err(priv->dev, "probe register netdev fail!\n");
2250 goto out_reg_ndev_fail;
2255 hns_nic_uninit_ring_data(priv);
2256 priv->ring_data = NULL;
2259 hnae_put_handle(priv->ae_handle);
2260 priv->ae_handle = NULL;
2265 static int hns_nic_notifier_action(struct notifier_block *nb,
2266 unsigned long action, void *data)
2268 struct hns_nic_priv *priv =
2269 container_of(nb, struct hns_nic_priv, notifier_block);
2271 assert(action == HNAE_AE_REGISTER);
2273 if (!hns_nic_try_get_ae(priv->netdev)) {
2274 hnae_unregister_notifier(&priv->notifier_block);
2275 priv->notifier_block.notifier_call = NULL;
2280 static int hns_nic_dev_probe(struct platform_device *pdev)
2282 struct device *dev = &pdev->dev;
2283 struct net_device *ndev;
2284 struct hns_nic_priv *priv;
2288 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
2292 platform_set_drvdata(pdev, ndev);
2294 priv = netdev_priv(ndev);
2296 priv->netdev = ndev;
2298 if (dev_of_node(dev)) {
2299 struct device_node *ae_node;
2301 if (of_device_is_compatible(dev->of_node,
2302 "hisilicon,hns-nic-v1"))
2303 priv->enet_ver = AE_VERSION_1;
2305 priv->enet_ver = AE_VERSION_2;
2307 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
2310 dev_err(dev, "not find ae-handle\n");
2311 goto out_read_prop_fail;
2313 priv->fwnode = &ae_node->fwnode;
2314 } else if (is_acpi_node(dev->fwnode)) {
2315 struct fwnode_reference_args args;
2317 if (acpi_dev_found(hns_enet_acpi_match[0].id))
2318 priv->enet_ver = AE_VERSION_1;
2319 else if (acpi_dev_found(hns_enet_acpi_match[1].id))
2320 priv->enet_ver = AE_VERSION_2;
2324 /* try to find port-idx-in-ae first */
2325 ret = acpi_node_get_property_reference(dev->fwnode,
2326 "ae-handle", 0, &args);
2328 dev_err(dev, "not find ae-handle\n");
2329 goto out_read_prop_fail;
2331 if (!is_acpi_device_node(args.fwnode)) {
2333 goto out_read_prop_fail;
2335 priv->fwnode = args.fwnode;
2337 dev_err(dev, "cannot read cfg data from OF or acpi\n");
2341 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
2343 /* only for old code compatible */
2344 ret = device_property_read_u32(dev, "port-id", &port_id);
2346 goto out_read_prop_fail;
2347 /* for old dts, we need to caculate the port offset */
2348 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
2349 : port_id - HNS_SRV_OFFSET;
2351 priv->port_id = port_id;
2353 hns_init_mac_addr(ndev);
2355 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
2356 ndev->priv_flags |= IFF_UNICAST_FLT;
2357 ndev->netdev_ops = &hns_nic_netdev_ops;
2358 hns_ethtool_set_ops(ndev);
2360 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2361 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2363 ndev->vlan_features |=
2364 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2365 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
2367 /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
2368 ndev->min_mtu = MAC_MIN_MTU;
2369 switch (priv->enet_ver) {
2371 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
2372 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2373 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2374 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
2375 ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
2376 ndev->max_mtu = MAC_MAX_MTU_V2 -
2377 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2380 ndev->max_mtu = MAC_MAX_MTU -
2381 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2385 SET_NETDEV_DEV(ndev, dev);
2387 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
2388 dev_dbg(dev, "set mask to 64bit\n");
2390 dev_err(dev, "set mask to 64bit fail!\n");
2392 /* carrier off reporting is important to ethtool even BEFORE open */
2393 netif_carrier_off(ndev);
2395 timer_setup(&priv->service_timer, hns_nic_service_timer, 0);
2396 INIT_WORK(&priv->service_task, hns_nic_service_task);
2398 set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
2399 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2400 set_bit(NIC_STATE_DOWN, &priv->state);
2402 if (hns_nic_try_get_ae(priv->netdev)) {
2403 priv->notifier_block.notifier_call = hns_nic_notifier_action;
2404 ret = hnae_register_notifier(&priv->notifier_block);
2406 dev_err(dev, "register notifier fail!\n");
2407 goto out_notify_fail;
2409 dev_dbg(dev, "has not handle, register notifier!\n");
2415 (void)cancel_work_sync(&priv->service_task);
2417 /* safe for ACPI FW */
2418 of_node_put(to_of_node(priv->fwnode));
2423 static int hns_nic_dev_remove(struct platform_device *pdev)
2425 struct net_device *ndev = platform_get_drvdata(pdev);
2426 struct hns_nic_priv *priv = netdev_priv(ndev);
2428 if (ndev->reg_state != NETREG_UNINITIALIZED)
2429 unregister_netdev(ndev);
2431 if (priv->ring_data)
2432 hns_nic_uninit_ring_data(priv);
2433 priv->ring_data = NULL;
2436 phy_disconnect(ndev->phydev);
2438 if (!IS_ERR_OR_NULL(priv->ae_handle))
2439 hnae_put_handle(priv->ae_handle);
2440 priv->ae_handle = NULL;
2441 if (priv->notifier_block.notifier_call)
2442 hnae_unregister_notifier(&priv->notifier_block);
2443 priv->notifier_block.notifier_call = NULL;
2445 set_bit(NIC_STATE_REMOVING, &priv->state);
2446 (void)cancel_work_sync(&priv->service_task);
2448 /* safe for ACPI FW */
2449 of_node_put(to_of_node(priv->fwnode));
2455 static const struct of_device_id hns_enet_of_match[] = {
2456 {.compatible = "hisilicon,hns-nic-v1",},
2457 {.compatible = "hisilicon,hns-nic-v2",},
2461 MODULE_DEVICE_TABLE(of, hns_enet_of_match);
2463 static struct platform_driver hns_nic_dev_driver = {
2466 .of_match_table = hns_enet_of_match,
2467 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
2469 .probe = hns_nic_dev_probe,
2470 .remove = hns_nic_dev_remove,
2473 module_platform_driver(hns_nic_dev_driver);
2475 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2476 MODULE_AUTHOR("Hisilicon, Inc.");
2477 MODULE_LICENSE("GPL");
2478 MODULE_ALIAS("platform:hns-nic");