]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
net: hns3: Add reset handle for flow director
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #include <linux/if_vlan.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/skbuff.h>
13 #include <linux/sctp.h>
14 #include <linux/vermagic.h>
15 #include <net/gre.h>
16 #include <net/pkt_cls.h>
17 #include <net/vxlan.h>
18
19 #include "hnae3.h"
20 #include "hns3_enet.h"
21
22 static void hns3_clear_all_ring(struct hnae3_handle *h);
23 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
24
25 static const char hns3_driver_name[] = "hns3";
26 const char hns3_driver_version[] = VERMAGIC_STRING;
27 static const char hns3_driver_string[] =
28                         "Hisilicon Ethernet Network Driver for Hip08 Family";
29 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
30 static struct hnae3_client client;
31
32 /* hns3_pci_tbl - PCI Device ID Table
33  *
34  * Last entry must be all 0s
35  *
36  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
37  *   Class, Class Mask, private data (not used) }
38  */
39 static const struct pci_device_id hns3_pci_tbl[] = {
40         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
41         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
43          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
45          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
47          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
49          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
51          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
52         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
53         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
54          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
55         /* required last entry */
56         {0, }
57 };
58 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
59
60 static irqreturn_t hns3_irq_handle(int irq, void *vector)
61 {
62         struct hns3_enet_tqp_vector *tqp_vector = vector;
63
64         napi_schedule(&tqp_vector->napi);
65
66         return IRQ_HANDLED;
67 }
68
69 /* This callback function is used to set affinity changes to the irq affinity
70  * masks when the irq_set_affinity_notifier function is used.
71  */
72 static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
73                                          const cpumask_t *mask)
74 {
75         struct hns3_enet_tqp_vector *tqp_vectors =
76                 container_of(notify, struct hns3_enet_tqp_vector,
77                              affinity_notify);
78
79         tqp_vectors->affinity_mask = *mask;
80 }
81
82 static void hns3_nic_irq_affinity_release(struct kref *ref)
83 {
84 }
85
86 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
87 {
88         struct hns3_enet_tqp_vector *tqp_vectors;
89         unsigned int i;
90
91         for (i = 0; i < priv->vector_num; i++) {
92                 tqp_vectors = &priv->tqp_vector[i];
93
94                 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
95                         continue;
96
97                 /* clear the affinity notifier and affinity mask */
98                 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
99                 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
100
101                 /* release the irq resource */
102                 free_irq(tqp_vectors->vector_irq, tqp_vectors);
103                 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
104         }
105 }
106
107 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
108 {
109         struct hns3_enet_tqp_vector *tqp_vectors;
110         int txrx_int_idx = 0;
111         int rx_int_idx = 0;
112         int tx_int_idx = 0;
113         unsigned int i;
114         int ret;
115
116         for (i = 0; i < priv->vector_num; i++) {
117                 tqp_vectors = &priv->tqp_vector[i];
118
119                 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
120                         continue;
121
122                 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
123                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
124                                  "%s-%s-%d", priv->netdev->name, "TxRx",
125                                  txrx_int_idx++);
126                         txrx_int_idx++;
127                 } else if (tqp_vectors->rx_group.ring) {
128                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
129                                  "%s-%s-%d", priv->netdev->name, "Rx",
130                                  rx_int_idx++);
131                 } else if (tqp_vectors->tx_group.ring) {
132                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
133                                  "%s-%s-%d", priv->netdev->name, "Tx",
134                                  tx_int_idx++);
135                 } else {
136                         /* Skip this unused q_vector */
137                         continue;
138                 }
139
140                 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
141
142                 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
143                                   tqp_vectors->name,
144                                        tqp_vectors);
145                 if (ret) {
146                         netdev_err(priv->netdev, "request irq(%d) fail\n",
147                                    tqp_vectors->vector_irq);
148                         return ret;
149                 }
150
151                 tqp_vectors->affinity_notify.notify =
152                                         hns3_nic_irq_affinity_notify;
153                 tqp_vectors->affinity_notify.release =
154                                         hns3_nic_irq_affinity_release;
155                 irq_set_affinity_notifier(tqp_vectors->vector_irq,
156                                           &tqp_vectors->affinity_notify);
157                 irq_set_affinity_hint(tqp_vectors->vector_irq,
158                                       &tqp_vectors->affinity_mask);
159
160                 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
161         }
162
163         return 0;
164 }
165
166 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
167                                  u32 mask_en)
168 {
169         writel(mask_en, tqp_vector->mask_addr);
170 }
171
172 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
173 {
174         napi_enable(&tqp_vector->napi);
175
176         /* enable vector */
177         hns3_mask_vector_irq(tqp_vector, 1);
178 }
179
180 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
181 {
182         /* disable vector */
183         hns3_mask_vector_irq(tqp_vector, 0);
184
185         disable_irq(tqp_vector->vector_irq);
186         napi_disable(&tqp_vector->napi);
187 }
188
189 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
190                                  u32 rl_value)
191 {
192         u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
193
194         /* this defines the configuration for RL (Interrupt Rate Limiter).
195          * Rl defines rate of interrupts i.e. number of interrupts-per-second
196          * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
197          */
198
199         if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
200             !tqp_vector->rx_group.coal.gl_adapt_enable)
201                 /* According to the hardware, the range of rl_reg is
202                  * 0-59 and the unit is 4.
203                  */
204                 rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
205
206         writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
207 }
208
209 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
210                                     u32 gl_value)
211 {
212         u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
213
214         writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
215 }
216
217 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
218                                     u32 gl_value)
219 {
220         u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
221
222         writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
223 }
224
225 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
226                                    struct hns3_nic_priv *priv)
227 {
228         /* initialize the configuration for interrupt coalescing.
229          * 1. GL (Interrupt Gap Limiter)
230          * 2. RL (Interrupt Rate Limiter)
231          */
232
233         /* Default: enable interrupt coalescing self-adaptive and GL */
234         tqp_vector->tx_group.coal.gl_adapt_enable = 1;
235         tqp_vector->rx_group.coal.gl_adapt_enable = 1;
236
237         tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
238         tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
239
240         tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
241         tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
242         tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
243 }
244
245 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
246                                       struct hns3_nic_priv *priv)
247 {
248         struct hnae3_handle *h = priv->ae_handle;
249
250         hns3_set_vector_coalesce_tx_gl(tqp_vector,
251                                        tqp_vector->tx_group.coal.int_gl);
252         hns3_set_vector_coalesce_rx_gl(tqp_vector,
253                                        tqp_vector->rx_group.coal.int_gl);
254         hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
255 }
256
257 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
258 {
259         struct hnae3_handle *h = hns3_get_handle(netdev);
260         struct hnae3_knic_private_info *kinfo = &h->kinfo;
261         unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
262         int i, ret;
263
264         if (kinfo->num_tc <= 1) {
265                 netdev_reset_tc(netdev);
266         } else {
267                 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
268                 if (ret) {
269                         netdev_err(netdev,
270                                    "netdev_set_num_tc fail, ret=%d!\n", ret);
271                         return ret;
272                 }
273
274                 for (i = 0; i < HNAE3_MAX_TC; i++) {
275                         if (!kinfo->tc_info[i].enable)
276                                 continue;
277
278                         netdev_set_tc_queue(netdev,
279                                             kinfo->tc_info[i].tc,
280                                             kinfo->tc_info[i].tqp_count,
281                                             kinfo->tc_info[i].tqp_offset);
282                 }
283         }
284
285         ret = netif_set_real_num_tx_queues(netdev, queue_size);
286         if (ret) {
287                 netdev_err(netdev,
288                            "netif_set_real_num_tx_queues fail, ret=%d!\n",
289                            ret);
290                 return ret;
291         }
292
293         ret = netif_set_real_num_rx_queues(netdev, queue_size);
294         if (ret) {
295                 netdev_err(netdev,
296                            "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
297                 return ret;
298         }
299
300         return 0;
301 }
302
303 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
304 {
305         u16 alloc_tqps, max_rss_size, rss_size;
306
307         h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
308         rss_size = alloc_tqps / h->kinfo.num_tc;
309
310         return min_t(u16, rss_size, max_rss_size);
311 }
312
313 static int hns3_nic_net_up(struct net_device *netdev)
314 {
315         struct hns3_nic_priv *priv = netdev_priv(netdev);
316         struct hnae3_handle *h = priv->ae_handle;
317         int i, j;
318         int ret;
319
320         ret = hns3_nic_reset_all_ring(h);
321         if (ret)
322                 return ret;
323
324         /* get irq resource for all vectors */
325         ret = hns3_nic_init_irq(priv);
326         if (ret) {
327                 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
328                 return ret;
329         }
330
331         /* enable the vectors */
332         for (i = 0; i < priv->vector_num; i++)
333                 hns3_vector_enable(&priv->tqp_vector[i]);
334
335         /* start the ae_dev */
336         ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
337         if (ret)
338                 goto out_start_err;
339
340         clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
341
342         return 0;
343
344 out_start_err:
345         for (j = i - 1; j >= 0; j--)
346                 hns3_vector_disable(&priv->tqp_vector[j]);
347
348         hns3_nic_uninit_irq(priv);
349
350         return ret;
351 }
352
353 static int hns3_nic_net_open(struct net_device *netdev)
354 {
355         struct hns3_nic_priv *priv = netdev_priv(netdev);
356         struct hnae3_handle *h = hns3_get_handle(netdev);
357         struct hnae3_knic_private_info *kinfo;
358         int i, ret;
359
360         netif_carrier_off(netdev);
361
362         ret = hns3_nic_set_real_num_queue(netdev);
363         if (ret)
364                 return ret;
365
366         ret = hns3_nic_net_up(netdev);
367         if (ret) {
368                 netdev_err(netdev,
369                            "hns net up fail, ret=%d!\n", ret);
370                 return ret;
371         }
372
373         kinfo = &h->kinfo;
374         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
375                 netdev_set_prio_tc_map(netdev, i,
376                                        kinfo->prio_tc[i]);
377         }
378
379         priv->ae_handle->last_reset_time = jiffies;
380         return 0;
381 }
382
383 static void hns3_nic_net_down(struct net_device *netdev)
384 {
385         struct hns3_nic_priv *priv = netdev_priv(netdev);
386         const struct hnae3_ae_ops *ops;
387         int i;
388
389         if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
390                 return;
391
392         /* disable vectors */
393         for (i = 0; i < priv->vector_num; i++)
394                 hns3_vector_disable(&priv->tqp_vector[i]);
395
396         /* stop ae_dev */
397         ops = priv->ae_handle->ae_algo->ops;
398         if (ops->stop)
399                 ops->stop(priv->ae_handle);
400
401         /* free irq resources */
402         hns3_nic_uninit_irq(priv);
403
404         hns3_clear_all_ring(priv->ae_handle);
405 }
406
407 static int hns3_nic_net_stop(struct net_device *netdev)
408 {
409         netif_tx_stop_all_queues(netdev);
410         netif_carrier_off(netdev);
411
412         hns3_nic_net_down(netdev);
413
414         return 0;
415 }
416
417 static int hns3_nic_uc_sync(struct net_device *netdev,
418                             const unsigned char *addr)
419 {
420         struct hnae3_handle *h = hns3_get_handle(netdev);
421
422         if (h->ae_algo->ops->add_uc_addr)
423                 return h->ae_algo->ops->add_uc_addr(h, addr);
424
425         return 0;
426 }
427
428 static int hns3_nic_uc_unsync(struct net_device *netdev,
429                               const unsigned char *addr)
430 {
431         struct hnae3_handle *h = hns3_get_handle(netdev);
432
433         if (h->ae_algo->ops->rm_uc_addr)
434                 return h->ae_algo->ops->rm_uc_addr(h, addr);
435
436         return 0;
437 }
438
439 static int hns3_nic_mc_sync(struct net_device *netdev,
440                             const unsigned char *addr)
441 {
442         struct hnae3_handle *h = hns3_get_handle(netdev);
443
444         if (h->ae_algo->ops->add_mc_addr)
445                 return h->ae_algo->ops->add_mc_addr(h, addr);
446
447         return 0;
448 }
449
450 static int hns3_nic_mc_unsync(struct net_device *netdev,
451                               const unsigned char *addr)
452 {
453         struct hnae3_handle *h = hns3_get_handle(netdev);
454
455         if (h->ae_algo->ops->rm_mc_addr)
456                 return h->ae_algo->ops->rm_mc_addr(h, addr);
457
458         return 0;
459 }
460
461 static void hns3_nic_set_rx_mode(struct net_device *netdev)
462 {
463         struct hnae3_handle *h = hns3_get_handle(netdev);
464
465         if (h->ae_algo->ops->set_promisc_mode) {
466                 if (netdev->flags & IFF_PROMISC)
467                         h->ae_algo->ops->set_promisc_mode(h, true, true);
468                 else if (netdev->flags & IFF_ALLMULTI)
469                         h->ae_algo->ops->set_promisc_mode(h, false, true);
470                 else
471                         h->ae_algo->ops->set_promisc_mode(h, false, false);
472         }
473         if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
474                 netdev_err(netdev, "sync uc address fail\n");
475         if (netdev->flags & IFF_MULTICAST) {
476                 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
477                         netdev_err(netdev, "sync mc address fail\n");
478
479                 if (h->ae_algo->ops->update_mta_status)
480                         h->ae_algo->ops->update_mta_status(h);
481         }
482 }
483
484 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
485                         u16 *mss, u32 *type_cs_vlan_tso)
486 {
487         u32 l4_offset, hdr_len;
488         union l3_hdr_info l3;
489         union l4_hdr_info l4;
490         u32 l4_paylen;
491         int ret;
492
493         if (!skb_is_gso(skb))
494                 return 0;
495
496         ret = skb_cow_head(skb, 0);
497         if (ret)
498                 return ret;
499
500         l3.hdr = skb_network_header(skb);
501         l4.hdr = skb_transport_header(skb);
502
503         /* Software should clear the IPv4's checksum field when tso is
504          * needed.
505          */
506         if (l3.v4->version == 4)
507                 l3.v4->check = 0;
508
509         /* tunnel packet.*/
510         if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
511                                          SKB_GSO_GRE_CSUM |
512                                          SKB_GSO_UDP_TUNNEL |
513                                          SKB_GSO_UDP_TUNNEL_CSUM)) {
514                 if ((!(skb_shinfo(skb)->gso_type &
515                     SKB_GSO_PARTIAL)) &&
516                     (skb_shinfo(skb)->gso_type &
517                     SKB_GSO_UDP_TUNNEL_CSUM)) {
518                         /* Software should clear the udp's checksum
519                          * field when tso is needed.
520                          */
521                         l4.udp->check = 0;
522                 }
523                 /* reset l3&l4 pointers from outer to inner headers */
524                 l3.hdr = skb_inner_network_header(skb);
525                 l4.hdr = skb_inner_transport_header(skb);
526
527                 /* Software should clear the IPv4's checksum field when
528                  * tso is needed.
529                  */
530                 if (l3.v4->version == 4)
531                         l3.v4->check = 0;
532         }
533
534         /* normal or tunnel packet*/
535         l4_offset = l4.hdr - skb->data;
536         hdr_len = (l4.tcp->doff * 4) + l4_offset;
537
538         /* remove payload length from inner pseudo checksum when tso*/
539         l4_paylen = skb->len - l4_offset;
540         csum_replace_by_diff(&l4.tcp->check,
541                              (__force __wsum)htonl(l4_paylen));
542
543         /* find the txbd field values */
544         *paylen = skb->len - hdr_len;
545         hnae3_set_bit(*type_cs_vlan_tso,
546                       HNS3_TXD_TSO_B, 1);
547
548         /* get MSS for TSO */
549         *mss = skb_shinfo(skb)->gso_size;
550
551         return 0;
552 }
553
554 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
555                                 u8 *il4_proto)
556 {
557         union {
558                 struct iphdr *v4;
559                 struct ipv6hdr *v6;
560                 unsigned char *hdr;
561         } l3;
562         unsigned char *l4_hdr;
563         unsigned char *exthdr;
564         u8 l4_proto_tmp;
565         __be16 frag_off;
566
567         /* find outer header point */
568         l3.hdr = skb_network_header(skb);
569         l4_hdr = skb_transport_header(skb);
570
571         if (skb->protocol == htons(ETH_P_IPV6)) {
572                 exthdr = l3.hdr + sizeof(*l3.v6);
573                 l4_proto_tmp = l3.v6->nexthdr;
574                 if (l4_hdr != exthdr)
575                         ipv6_skip_exthdr(skb, exthdr - skb->data,
576                                          &l4_proto_tmp, &frag_off);
577         } else if (skb->protocol == htons(ETH_P_IP)) {
578                 l4_proto_tmp = l3.v4->protocol;
579         } else {
580                 return -EINVAL;
581         }
582
583         *ol4_proto = l4_proto_tmp;
584
585         /* tunnel packet */
586         if (!skb->encapsulation) {
587                 *il4_proto = 0;
588                 return 0;
589         }
590
591         /* find inner header point */
592         l3.hdr = skb_inner_network_header(skb);
593         l4_hdr = skb_inner_transport_header(skb);
594
595         if (l3.v6->version == 6) {
596                 exthdr = l3.hdr + sizeof(*l3.v6);
597                 l4_proto_tmp = l3.v6->nexthdr;
598                 if (l4_hdr != exthdr)
599                         ipv6_skip_exthdr(skb, exthdr - skb->data,
600                                          &l4_proto_tmp, &frag_off);
601         } else if (l3.v4->version == 4) {
602                 l4_proto_tmp = l3.v4->protocol;
603         }
604
605         *il4_proto = l4_proto_tmp;
606
607         return 0;
608 }
609
610 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
611                                 u8 il4_proto, u32 *type_cs_vlan_tso,
612                                 u32 *ol_type_vlan_len_msec)
613 {
614         union {
615                 struct iphdr *v4;
616                 struct ipv6hdr *v6;
617                 unsigned char *hdr;
618         } l3;
619         union {
620                 struct tcphdr *tcp;
621                 struct udphdr *udp;
622                 struct gre_base_hdr *gre;
623                 unsigned char *hdr;
624         } l4;
625         unsigned char *l2_hdr;
626         u8 l4_proto = ol4_proto;
627         u32 ol2_len;
628         u32 ol3_len;
629         u32 ol4_len;
630         u32 l2_len;
631         u32 l3_len;
632
633         l3.hdr = skb_network_header(skb);
634         l4.hdr = skb_transport_header(skb);
635
636         /* compute L2 header size for normal packet, defined in 2 Bytes */
637         l2_len = l3.hdr - skb->data;
638         hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
639                         HNS3_TXD_L2LEN_S, l2_len >> 1);
640
641         /* tunnel packet*/
642         if (skb->encapsulation) {
643                 /* compute OL2 header size, defined in 2 Bytes */
644                 ol2_len = l2_len;
645                 hnae3_set_field(*ol_type_vlan_len_msec,
646                                 HNS3_TXD_L2LEN_M,
647                                 HNS3_TXD_L2LEN_S, ol2_len >> 1);
648
649                 /* compute OL3 header size, defined in 4 Bytes */
650                 ol3_len = l4.hdr - l3.hdr;
651                 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
652                                 HNS3_TXD_L3LEN_S, ol3_len >> 2);
653
654                 /* MAC in UDP, MAC in GRE (0x6558)*/
655                 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
656                         /* switch MAC header ptr from outer to inner header.*/
657                         l2_hdr = skb_inner_mac_header(skb);
658
659                         /* compute OL4 header size, defined in 4 Bytes. */
660                         ol4_len = l2_hdr - l4.hdr;
661                         hnae3_set_field(*ol_type_vlan_len_msec,
662                                         HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
663                                         ol4_len >> 2);
664
665                         /* switch IP header ptr from outer to inner header */
666                         l3.hdr = skb_inner_network_header(skb);
667
668                         /* compute inner l2 header size, defined in 2 Bytes. */
669                         l2_len = l3.hdr - l2_hdr;
670                         hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
671                                         HNS3_TXD_L2LEN_S, l2_len >> 1);
672                 } else {
673                         /* skb packet types not supported by hardware,
674                          * txbd len fild doesn't be filled.
675                          */
676                         return;
677                 }
678
679                 /* switch L4 header pointer from outer to inner */
680                 l4.hdr = skb_inner_transport_header(skb);
681
682                 l4_proto = il4_proto;
683         }
684
685         /* compute inner(/normal) L3 header size, defined in 4 Bytes */
686         l3_len = l4.hdr - l3.hdr;
687         hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
688                         HNS3_TXD_L3LEN_S, l3_len >> 2);
689
690         /* compute inner(/normal) L4 header size, defined in 4 Bytes */
691         switch (l4_proto) {
692         case IPPROTO_TCP:
693                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
694                                 HNS3_TXD_L4LEN_S, l4.tcp->doff);
695                 break;
696         case IPPROTO_SCTP:
697                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
698                                 HNS3_TXD_L4LEN_S,
699                                 (sizeof(struct sctphdr) >> 2));
700                 break;
701         case IPPROTO_UDP:
702                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
703                                 HNS3_TXD_L4LEN_S,
704                                 (sizeof(struct udphdr) >> 2));
705                 break;
706         default:
707                 /* skb packet types not supported by hardware,
708                  * txbd len fild doesn't be filled.
709                  */
710                 return;
711         }
712 }
713
714 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
715  * and it is udp packet, which has a dest port as the IANA assigned.
716  * the hardware is expected to do the checksum offload, but the
717  * hardware will not do the checksum offload when udp dest port is
718  * 4789.
719  */
720 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
721 {
722 #define IANA_VXLAN_PORT 4789
723         union {
724                 struct tcphdr *tcp;
725                 struct udphdr *udp;
726                 struct gre_base_hdr *gre;
727                 unsigned char *hdr;
728         } l4;
729
730         l4.hdr = skb_transport_header(skb);
731
732         if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
733                 return false;
734
735         skb_checksum_help(skb);
736
737         return true;
738 }
739
740 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
741                                    u8 il4_proto, u32 *type_cs_vlan_tso,
742                                    u32 *ol_type_vlan_len_msec)
743 {
744         union {
745                 struct iphdr *v4;
746                 struct ipv6hdr *v6;
747                 unsigned char *hdr;
748         } l3;
749         u32 l4_proto = ol4_proto;
750
751         l3.hdr = skb_network_header(skb);
752
753         /* define OL3 type and tunnel type(OL4).*/
754         if (skb->encapsulation) {
755                 /* define outer network header type.*/
756                 if (skb->protocol == htons(ETH_P_IP)) {
757                         if (skb_is_gso(skb))
758                                 hnae3_set_field(*ol_type_vlan_len_msec,
759                                                 HNS3_TXD_OL3T_M,
760                                                 HNS3_TXD_OL3T_S,
761                                                 HNS3_OL3T_IPV4_CSUM);
762                         else
763                                 hnae3_set_field(*ol_type_vlan_len_msec,
764                                                 HNS3_TXD_OL3T_M,
765                                                 HNS3_TXD_OL3T_S,
766                                                 HNS3_OL3T_IPV4_NO_CSUM);
767
768                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
769                         hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
770                                         HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
771                 }
772
773                 /* define tunnel type(OL4).*/
774                 switch (l4_proto) {
775                 case IPPROTO_UDP:
776                         hnae3_set_field(*ol_type_vlan_len_msec,
777                                         HNS3_TXD_TUNTYPE_M,
778                                         HNS3_TXD_TUNTYPE_S,
779                                         HNS3_TUN_MAC_IN_UDP);
780                         break;
781                 case IPPROTO_GRE:
782                         hnae3_set_field(*ol_type_vlan_len_msec,
783                                         HNS3_TXD_TUNTYPE_M,
784                                         HNS3_TXD_TUNTYPE_S,
785                                         HNS3_TUN_NVGRE);
786                         break;
787                 default:
788                         /* drop the skb tunnel packet if hardware don't support,
789                          * because hardware can't calculate csum when TSO.
790                          */
791                         if (skb_is_gso(skb))
792                                 return -EDOM;
793
794                         /* the stack computes the IP header already,
795                          * driver calculate l4 checksum when not TSO.
796                          */
797                         skb_checksum_help(skb);
798                         return 0;
799                 }
800
801                 l3.hdr = skb_inner_network_header(skb);
802                 l4_proto = il4_proto;
803         }
804
805         if (l3.v4->version == 4) {
806                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
807                                 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
808
809                 /* the stack computes the IP header already, the only time we
810                  * need the hardware to recompute it is in the case of TSO.
811                  */
812                 if (skb_is_gso(skb))
813                         hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
814         } else if (l3.v6->version == 6) {
815                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
816                                 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
817         }
818
819         switch (l4_proto) {
820         case IPPROTO_TCP:
821                 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
822                 hnae3_set_field(*type_cs_vlan_tso,
823                                 HNS3_TXD_L4T_M,
824                                 HNS3_TXD_L4T_S,
825                                 HNS3_L4T_TCP);
826                 break;
827         case IPPROTO_UDP:
828                 if (hns3_tunnel_csum_bug(skb))
829                         break;
830
831                 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
832                 hnae3_set_field(*type_cs_vlan_tso,
833                                 HNS3_TXD_L4T_M,
834                                 HNS3_TXD_L4T_S,
835                                 HNS3_L4T_UDP);
836                 break;
837         case IPPROTO_SCTP:
838                 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
839                 hnae3_set_field(*type_cs_vlan_tso,
840                                 HNS3_TXD_L4T_M,
841                                 HNS3_TXD_L4T_S,
842                                 HNS3_L4T_SCTP);
843                 break;
844         default:
845                 /* drop the skb tunnel packet if hardware don't support,
846                  * because hardware can't calculate csum when TSO.
847                  */
848                 if (skb_is_gso(skb))
849                         return -EDOM;
850
851                 /* the stack computes the IP header already,
852                  * driver calculate l4 checksum when not TSO.
853                  */
854                 skb_checksum_help(skb);
855                 return 0;
856         }
857
858         return 0;
859 }
860
861 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
862 {
863         /* Config bd buffer end */
864         hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
865                         HNS3_TXD_BDTYPE_S, 0);
866         hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
867         hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
868         hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
869 }
870
871 static int hns3_fill_desc_vtags(struct sk_buff *skb,
872                                 struct hns3_enet_ring *tx_ring,
873                                 u32 *inner_vlan_flag,
874                                 u32 *out_vlan_flag,
875                                 u16 *inner_vtag,
876                                 u16 *out_vtag)
877 {
878 #define HNS3_TX_VLAN_PRIO_SHIFT 13
879
880         if (skb->protocol == htons(ETH_P_8021Q) &&
881             !(tx_ring->tqp->handle->kinfo.netdev->features &
882             NETIF_F_HW_VLAN_CTAG_TX)) {
883                 /* When HW VLAN acceleration is turned off, and the stack
884                  * sets the protocol to 802.1q, the driver just need to
885                  * set the protocol to the encapsulated ethertype.
886                  */
887                 skb->protocol = vlan_get_protocol(skb);
888                 return 0;
889         }
890
891         if (skb_vlan_tag_present(skb)) {
892                 u16 vlan_tag;
893
894                 vlan_tag = skb_vlan_tag_get(skb);
895                 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
896
897                 /* Based on hw strategy, use out_vtag in two layer tag case,
898                  * and use inner_vtag in one tag case.
899                  */
900                 if (skb->protocol == htons(ETH_P_8021Q)) {
901                         hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
902                         *out_vtag = vlan_tag;
903                 } else {
904                         hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
905                         *inner_vtag = vlan_tag;
906                 }
907         } else if (skb->protocol == htons(ETH_P_8021Q)) {
908                 struct vlan_ethhdr *vhdr;
909                 int rc;
910
911                 rc = skb_cow_head(skb, 0);
912                 if (rc < 0)
913                         return rc;
914                 vhdr = (struct vlan_ethhdr *)skb->data;
915                 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
916                                         << HNS3_TX_VLAN_PRIO_SHIFT);
917         }
918
919         skb->protocol = vlan_get_protocol(skb);
920         return 0;
921 }
922
923 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
924                           int size, dma_addr_t dma, int frag_end,
925                           enum hns_desc_type type)
926 {
927         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
928         struct hns3_desc *desc = &ring->desc[ring->next_to_use];
929         u32 ol_type_vlan_len_msec = 0;
930         u16 bdtp_fe_sc_vld_ra_ri = 0;
931         u32 type_cs_vlan_tso = 0;
932         struct sk_buff *skb;
933         u16 inner_vtag = 0;
934         u16 out_vtag = 0;
935         u32 paylen = 0;
936         u16 mss = 0;
937         u8 ol4_proto;
938         u8 il4_proto;
939         int ret;
940
941         /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
942         desc_cb->priv = priv;
943         desc_cb->length = size;
944         desc_cb->dma = dma;
945         desc_cb->type = type;
946
947         /* now, fill the descriptor */
948         desc->addr = cpu_to_le64(dma);
949         desc->tx.send_size = cpu_to_le16((u16)size);
950         hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
951         desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
952
953         if (type == DESC_TYPE_SKB) {
954                 skb = (struct sk_buff *)priv;
955                 paylen = skb->len;
956
957                 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
958                                            &ol_type_vlan_len_msec,
959                                            &inner_vtag, &out_vtag);
960                 if (unlikely(ret))
961                         return ret;
962
963                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
964                         skb_reset_mac_len(skb);
965
966                         ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
967                         if (ret)
968                                 return ret;
969                         hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
970                                             &type_cs_vlan_tso,
971                                             &ol_type_vlan_len_msec);
972                         ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
973                                                       &type_cs_vlan_tso,
974                                                       &ol_type_vlan_len_msec);
975                         if (ret)
976                                 return ret;
977
978                         ret = hns3_set_tso(skb, &paylen, &mss,
979                                            &type_cs_vlan_tso);
980                         if (ret)
981                                 return ret;
982                 }
983
984                 /* Set txbd */
985                 desc->tx.ol_type_vlan_len_msec =
986                         cpu_to_le32(ol_type_vlan_len_msec);
987                 desc->tx.type_cs_vlan_tso_len =
988                         cpu_to_le32(type_cs_vlan_tso);
989                 desc->tx.paylen = cpu_to_le32(paylen);
990                 desc->tx.mss = cpu_to_le16(mss);
991                 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
992                 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
993         }
994
995         /* move ring pointer to next.*/
996         ring_ptr_move_fw(ring, next_to_use);
997
998         return 0;
999 }
1000
1001 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
1002                               int size, dma_addr_t dma, int frag_end,
1003                               enum hns_desc_type type)
1004 {
1005         unsigned int frag_buf_num;
1006         unsigned int k;
1007         int sizeoflast;
1008         int ret;
1009
1010         frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1011         sizeoflast = size % HNS3_MAX_BD_SIZE;
1012         sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1013
1014         /* When the frag size is bigger than hardware, split this frag */
1015         for (k = 0; k < frag_buf_num; k++) {
1016                 ret = hns3_fill_desc(ring, priv,
1017                                      (k == frag_buf_num - 1) ?
1018                                 sizeoflast : HNS3_MAX_BD_SIZE,
1019                                 dma + HNS3_MAX_BD_SIZE * k,
1020                                 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
1021                                 (type == DESC_TYPE_SKB && !k) ?
1022                                         DESC_TYPE_SKB : DESC_TYPE_PAGE);
1023                 if (ret)
1024                         return ret;
1025         }
1026
1027         return 0;
1028 }
1029
1030 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1031                                    struct hns3_enet_ring *ring)
1032 {
1033         struct sk_buff *skb = *out_skb;
1034         struct skb_frag_struct *frag;
1035         int bdnum_for_frag;
1036         int frag_num;
1037         int buf_num;
1038         int size;
1039         int i;
1040
1041         size = skb_headlen(skb);
1042         buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1043
1044         frag_num = skb_shinfo(skb)->nr_frags;
1045         for (i = 0; i < frag_num; i++) {
1046                 frag = &skb_shinfo(skb)->frags[i];
1047                 size = skb_frag_size(frag);
1048                 bdnum_for_frag =
1049                         (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1050                 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
1051                         return -ENOMEM;
1052
1053                 buf_num += bdnum_for_frag;
1054         }
1055
1056         if (buf_num > ring_space(ring))
1057                 return -EBUSY;
1058
1059         *bnum = buf_num;
1060         return 0;
1061 }
1062
1063 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1064                                   struct hns3_enet_ring *ring)
1065 {
1066         struct sk_buff *skb = *out_skb;
1067         int buf_num;
1068
1069         /* No. of segments (plus a header) */
1070         buf_num = skb_shinfo(skb)->nr_frags + 1;
1071
1072         if (unlikely(ring_space(ring) < buf_num))
1073                 return -EBUSY;
1074
1075         *bnum = buf_num;
1076
1077         return 0;
1078 }
1079
1080 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
1081 {
1082         struct device *dev = ring_to_dev(ring);
1083         unsigned int i;
1084
1085         for (i = 0; i < ring->desc_num; i++) {
1086                 /* check if this is where we started */
1087                 if (ring->next_to_use == next_to_use_orig)
1088                         break;
1089
1090                 /* unmap the descriptor dma address */
1091                 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1092                         dma_unmap_single(dev,
1093                                          ring->desc_cb[ring->next_to_use].dma,
1094                                         ring->desc_cb[ring->next_to_use].length,
1095                                         DMA_TO_DEVICE);
1096                 else
1097                         dma_unmap_page(dev,
1098                                        ring->desc_cb[ring->next_to_use].dma,
1099                                        ring->desc_cb[ring->next_to_use].length,
1100                                        DMA_TO_DEVICE);
1101
1102                 /* rollback one */
1103                 ring_ptr_move_bw(ring, next_to_use);
1104         }
1105 }
1106
1107 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1108 {
1109         struct hns3_nic_priv *priv = netdev_priv(netdev);
1110         struct hns3_nic_ring_data *ring_data =
1111                 &tx_ring_data(priv, skb->queue_mapping);
1112         struct hns3_enet_ring *ring = ring_data->ring;
1113         struct device *dev = priv->dev;
1114         struct netdev_queue *dev_queue;
1115         struct skb_frag_struct *frag;
1116         int next_to_use_head;
1117         int next_to_use_frag;
1118         dma_addr_t dma;
1119         int buf_num;
1120         int seg_num;
1121         int size;
1122         int ret;
1123         int i;
1124
1125         /* Prefetch the data used later */
1126         prefetch(skb->data);
1127
1128         switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1129         case -EBUSY:
1130                 u64_stats_update_begin(&ring->syncp);
1131                 ring->stats.tx_busy++;
1132                 u64_stats_update_end(&ring->syncp);
1133
1134                 goto out_net_tx_busy;
1135         case -ENOMEM:
1136                 u64_stats_update_begin(&ring->syncp);
1137                 ring->stats.sw_err_cnt++;
1138                 u64_stats_update_end(&ring->syncp);
1139                 netdev_err(netdev, "no memory to xmit!\n");
1140
1141                 goto out_err_tx_ok;
1142         default:
1143                 break;
1144         }
1145
1146         /* No. of segments (plus a header) */
1147         seg_num = skb_shinfo(skb)->nr_frags + 1;
1148         /* Fill the first part */
1149         size = skb_headlen(skb);
1150
1151         next_to_use_head = ring->next_to_use;
1152
1153         dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1154         if (dma_mapping_error(dev, dma)) {
1155                 netdev_err(netdev, "TX head DMA map failed\n");
1156                 ring->stats.sw_err_cnt++;
1157                 goto out_err_tx_ok;
1158         }
1159
1160         ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1161                            DESC_TYPE_SKB);
1162         if (ret)
1163                 goto head_dma_map_err;
1164
1165         next_to_use_frag = ring->next_to_use;
1166         /* Fill the fragments */
1167         for (i = 1; i < seg_num; i++) {
1168                 frag = &skb_shinfo(skb)->frags[i - 1];
1169                 size = skb_frag_size(frag);
1170                 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1171                 if (dma_mapping_error(dev, dma)) {
1172                         netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1173                         ring->stats.sw_err_cnt++;
1174                         goto frag_dma_map_err;
1175                 }
1176                 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1177                                     seg_num - 1 == i ? 1 : 0,
1178                                     DESC_TYPE_PAGE);
1179
1180                 if (ret)
1181                         goto frag_dma_map_err;
1182         }
1183
1184         /* Complete translate all packets */
1185         dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1186         netdev_tx_sent_queue(dev_queue, skb->len);
1187
1188         wmb(); /* Commit all data before submit */
1189
1190         hnae3_queue_xmit(ring->tqp, buf_num);
1191
1192         return NETDEV_TX_OK;
1193
1194 frag_dma_map_err:
1195         hns_nic_dma_unmap(ring, next_to_use_frag);
1196
1197 head_dma_map_err:
1198         hns_nic_dma_unmap(ring, next_to_use_head);
1199
1200 out_err_tx_ok:
1201         dev_kfree_skb_any(skb);
1202         return NETDEV_TX_OK;
1203
1204 out_net_tx_busy:
1205         netif_stop_subqueue(netdev, ring_data->queue_index);
1206         smp_mb(); /* Commit all data before submit */
1207
1208         return NETDEV_TX_BUSY;
1209 }
1210
1211 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1212 {
1213         struct hnae3_handle *h = hns3_get_handle(netdev);
1214         struct sockaddr *mac_addr = p;
1215         int ret;
1216
1217         if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1218                 return -EADDRNOTAVAIL;
1219
1220         if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1221                 netdev_info(netdev, "already using mac address %pM\n",
1222                             mac_addr->sa_data);
1223                 return 0;
1224         }
1225
1226         ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1227         if (ret) {
1228                 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1229                 return ret;
1230         }
1231
1232         ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1233
1234         return 0;
1235 }
1236
1237 static int hns3_nic_do_ioctl(struct net_device *netdev,
1238                              struct ifreq *ifr, int cmd)
1239 {
1240         struct hnae3_handle *h = hns3_get_handle(netdev);
1241
1242         if (!netif_running(netdev))
1243                 return -EINVAL;
1244
1245         if (!h->ae_algo->ops->do_ioctl)
1246                 return -EOPNOTSUPP;
1247
1248         return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1249 }
1250
1251 static int hns3_nic_set_features(struct net_device *netdev,
1252                                  netdev_features_t features)
1253 {
1254         netdev_features_t changed = netdev->features ^ features;
1255         struct hns3_nic_priv *priv = netdev_priv(netdev);
1256         struct hnae3_handle *h = priv->ae_handle;
1257         int ret;
1258
1259         if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1260                 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1261                         priv->ops.fill_desc = hns3_fill_desc_tso;
1262                         priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1263                 } else {
1264                         priv->ops.fill_desc = hns3_fill_desc;
1265                         priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1266                 }
1267         }
1268
1269         if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1270             h->ae_algo->ops->enable_vlan_filter) {
1271                 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1272                         h->ae_algo->ops->enable_vlan_filter(h, true);
1273                 else
1274                         h->ae_algo->ops->enable_vlan_filter(h, false);
1275         }
1276
1277         if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1278             h->ae_algo->ops->enable_hw_strip_rxvtag) {
1279                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1280                         ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1281                 else
1282                         ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1283
1284                 if (ret)
1285                         return ret;
1286         }
1287
1288         netdev->features = features;
1289         return 0;
1290 }
1291
1292 static void hns3_nic_get_stats64(struct net_device *netdev,
1293                                  struct rtnl_link_stats64 *stats)
1294 {
1295         struct hns3_nic_priv *priv = netdev_priv(netdev);
1296         int queue_num = priv->ae_handle->kinfo.num_tqps;
1297         struct hnae3_handle *handle = priv->ae_handle;
1298         struct hns3_enet_ring *ring;
1299         unsigned int start;
1300         unsigned int idx;
1301         u64 tx_bytes = 0;
1302         u64 rx_bytes = 0;
1303         u64 tx_pkts = 0;
1304         u64 rx_pkts = 0;
1305         u64 tx_drop = 0;
1306         u64 rx_drop = 0;
1307
1308         if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1309                 return;
1310
1311         handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1312
1313         for (idx = 0; idx < queue_num; idx++) {
1314                 /* fetch the tx stats */
1315                 ring = priv->ring_data[idx].ring;
1316                 do {
1317                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1318                         tx_bytes += ring->stats.tx_bytes;
1319                         tx_pkts += ring->stats.tx_pkts;
1320                         tx_drop += ring->stats.tx_busy;
1321                         tx_drop += ring->stats.sw_err_cnt;
1322                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1323
1324                 /* fetch the rx stats */
1325                 ring = priv->ring_data[idx + queue_num].ring;
1326                 do {
1327                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1328                         rx_bytes += ring->stats.rx_bytes;
1329                         rx_pkts += ring->stats.rx_pkts;
1330                         rx_drop += ring->stats.non_vld_descs;
1331                         rx_drop += ring->stats.err_pkt_len;
1332                         rx_drop += ring->stats.l2_err;
1333                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1334         }
1335
1336         stats->tx_bytes = tx_bytes;
1337         stats->tx_packets = tx_pkts;
1338         stats->rx_bytes = rx_bytes;
1339         stats->rx_packets = rx_pkts;
1340
1341         stats->rx_errors = netdev->stats.rx_errors;
1342         stats->multicast = netdev->stats.multicast;
1343         stats->rx_length_errors = netdev->stats.rx_length_errors;
1344         stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1345         stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1346
1347         stats->tx_errors = netdev->stats.tx_errors;
1348         stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1349         stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1350         stats->collisions = netdev->stats.collisions;
1351         stats->rx_over_errors = netdev->stats.rx_over_errors;
1352         stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1353         stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1354         stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1355         stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1356         stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1357         stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1358         stats->tx_window_errors = netdev->stats.tx_window_errors;
1359         stats->rx_compressed = netdev->stats.rx_compressed;
1360         stats->tx_compressed = netdev->stats.tx_compressed;
1361 }
1362
1363 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1364 {
1365         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1366         struct hnae3_handle *h = hns3_get_handle(netdev);
1367         struct hnae3_knic_private_info *kinfo = &h->kinfo;
1368         u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1369         u8 tc = mqprio_qopt->qopt.num_tc;
1370         u16 mode = mqprio_qopt->mode;
1371         u8 hw = mqprio_qopt->qopt.hw;
1372         bool if_running;
1373         int ret;
1374
1375         if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1376                mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1377                 return -EOPNOTSUPP;
1378
1379         if (tc > HNAE3_MAX_TC)
1380                 return -EINVAL;
1381
1382         if (!netdev)
1383                 return -EINVAL;
1384
1385         if_running = netif_running(netdev);
1386         if (if_running) {
1387                 hns3_nic_net_stop(netdev);
1388                 msleep(100);
1389         }
1390
1391         ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1392                 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1393         if (ret)
1394                 goto out;
1395
1396         ret = hns3_nic_set_real_num_queue(netdev);
1397
1398 out:
1399         if (if_running)
1400                 hns3_nic_net_open(netdev);
1401
1402         return ret;
1403 }
1404
1405 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1406                              void *type_data)
1407 {
1408         if (type != TC_SETUP_QDISC_MQPRIO)
1409                 return -EOPNOTSUPP;
1410
1411         return hns3_setup_tc(dev, type_data);
1412 }
1413
1414 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1415                                 __be16 proto, u16 vid)
1416 {
1417         struct hnae3_handle *h = hns3_get_handle(netdev);
1418         struct hns3_nic_priv *priv = netdev_priv(netdev);
1419         int ret = -EIO;
1420
1421         if (h->ae_algo->ops->set_vlan_filter)
1422                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1423
1424         if (!ret)
1425                 set_bit(vid, priv->active_vlans);
1426
1427         return ret;
1428 }
1429
1430 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1431                                  __be16 proto, u16 vid)
1432 {
1433         struct hnae3_handle *h = hns3_get_handle(netdev);
1434         struct hns3_nic_priv *priv = netdev_priv(netdev);
1435         int ret = -EIO;
1436
1437         if (h->ae_algo->ops->set_vlan_filter)
1438                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1439
1440         if (!ret)
1441                 clear_bit(vid, priv->active_vlans);
1442
1443         return ret;
1444 }
1445
1446 static void hns3_restore_vlan(struct net_device *netdev)
1447 {
1448         struct hns3_nic_priv *priv = netdev_priv(netdev);
1449         u16 vid;
1450         int ret;
1451
1452         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1453                 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1454                 if (ret)
1455                         netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
1456                                     vid, ret);
1457         }
1458 }
1459
1460 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1461                                 u8 qos, __be16 vlan_proto)
1462 {
1463         struct hnae3_handle *h = hns3_get_handle(netdev);
1464         int ret = -EIO;
1465
1466         if (h->ae_algo->ops->set_vf_vlan_filter)
1467                 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1468                                                    qos, vlan_proto);
1469
1470         return ret;
1471 }
1472
1473 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1474 {
1475         struct hnae3_handle *h = hns3_get_handle(netdev);
1476         bool if_running = netif_running(netdev);
1477         int ret;
1478
1479         if (!h->ae_algo->ops->set_mtu)
1480                 return -EOPNOTSUPP;
1481
1482         /* if this was called with netdev up then bring netdevice down */
1483         if (if_running) {
1484                 (void)hns3_nic_net_stop(netdev);
1485                 msleep(100);
1486         }
1487
1488         ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1489         if (ret)
1490                 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1491                            ret);
1492         else
1493                 netdev->mtu = new_mtu;
1494
1495         /* if the netdev was running earlier, bring it up again */
1496         if (if_running && hns3_nic_net_open(netdev))
1497                 ret = -EINVAL;
1498
1499         return ret;
1500 }
1501
1502 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1503 {
1504         struct hns3_nic_priv *priv = netdev_priv(ndev);
1505         struct hns3_enet_ring *tx_ring = NULL;
1506         int timeout_queue = 0;
1507         int hw_head, hw_tail;
1508         int i;
1509
1510         /* Find the stopped queue the same way the stack does */
1511         for (i = 0; i < ndev->real_num_tx_queues; i++) {
1512                 struct netdev_queue *q;
1513                 unsigned long trans_start;
1514
1515                 q = netdev_get_tx_queue(ndev, i);
1516                 trans_start = q->trans_start;
1517                 if (netif_xmit_stopped(q) &&
1518                     time_after(jiffies,
1519                                (trans_start + ndev->watchdog_timeo))) {
1520                         timeout_queue = i;
1521                         break;
1522                 }
1523         }
1524
1525         if (i == ndev->num_tx_queues) {
1526                 netdev_info(ndev,
1527                             "no netdev TX timeout queue found, timeout count: %llu\n",
1528                             priv->tx_timeout_count);
1529                 return false;
1530         }
1531
1532         tx_ring = priv->ring_data[timeout_queue].ring;
1533
1534         hw_head = readl_relaxed(tx_ring->tqp->io_base +
1535                                 HNS3_RING_TX_RING_HEAD_REG);
1536         hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1537                                 HNS3_RING_TX_RING_TAIL_REG);
1538         netdev_info(ndev,
1539                     "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1540                     priv->tx_timeout_count,
1541                     timeout_queue,
1542                     tx_ring->next_to_use,
1543                     tx_ring->next_to_clean,
1544                     hw_head,
1545                     hw_tail,
1546                     readl(tx_ring->tqp_vector->mask_addr));
1547
1548         return true;
1549 }
1550
1551 static void hns3_nic_net_timeout(struct net_device *ndev)
1552 {
1553         struct hns3_nic_priv *priv = netdev_priv(ndev);
1554         struct hnae3_handle *h = priv->ae_handle;
1555
1556         if (!hns3_get_tx_timeo_queue_info(ndev))
1557                 return;
1558
1559         priv->tx_timeout_count++;
1560
1561         if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
1562                 return;
1563
1564         /* request the reset */
1565         if (h->ae_algo->ops->reset_event)
1566                 h->ae_algo->ops->reset_event(h);
1567 }
1568
1569 static const struct net_device_ops hns3_nic_netdev_ops = {
1570         .ndo_open               = hns3_nic_net_open,
1571         .ndo_stop               = hns3_nic_net_stop,
1572         .ndo_start_xmit         = hns3_nic_net_xmit,
1573         .ndo_tx_timeout         = hns3_nic_net_timeout,
1574         .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1575         .ndo_do_ioctl           = hns3_nic_do_ioctl,
1576         .ndo_change_mtu         = hns3_nic_change_mtu,
1577         .ndo_set_features       = hns3_nic_set_features,
1578         .ndo_get_stats64        = hns3_nic_get_stats64,
1579         .ndo_setup_tc           = hns3_nic_setup_tc,
1580         .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1581         .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1582         .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1583         .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1584 };
1585
1586 static bool hns3_is_phys_func(struct pci_dev *pdev)
1587 {
1588         u32 dev_id = pdev->device;
1589
1590         switch (dev_id) {
1591         case HNAE3_DEV_ID_GE:
1592         case HNAE3_DEV_ID_25GE:
1593         case HNAE3_DEV_ID_25GE_RDMA:
1594         case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1595         case HNAE3_DEV_ID_50GE_RDMA:
1596         case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1597         case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1598                 return true;
1599         case HNAE3_DEV_ID_100G_VF:
1600         case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1601                 return false;
1602         default:
1603                 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1604                          dev_id);
1605         }
1606
1607         return false;
1608 }
1609
1610 static void hns3_disable_sriov(struct pci_dev *pdev)
1611 {
1612         /* If our VFs are assigned we cannot shut down SR-IOV
1613          * without causing issues, so just leave the hardware
1614          * available but disabled
1615          */
1616         if (pci_vfs_assigned(pdev)) {
1617                 dev_warn(&pdev->dev,
1618                          "disabling driver while VFs are assigned\n");
1619                 return;
1620         }
1621
1622         pci_disable_sriov(pdev);
1623 }
1624
1625 static void hns3_get_dev_capability(struct pci_dev *pdev,
1626                                     struct hnae3_ae_dev *ae_dev)
1627 {
1628         if (pdev->revision >= 0x21)
1629                 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1630 }
1631
1632 /* hns3_probe - Device initialization routine
1633  * @pdev: PCI device information struct
1634  * @ent: entry in hns3_pci_tbl
1635  *
1636  * hns3_probe initializes a PF identified by a pci_dev structure.
1637  * The OS initialization, configuring of the PF private structure,
1638  * and a hardware reset occur.
1639  *
1640  * Returns 0 on success, negative on failure
1641  */
1642 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1643 {
1644         struct hnae3_ae_dev *ae_dev;
1645         int ret;
1646
1647         ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1648                               GFP_KERNEL);
1649         if (!ae_dev) {
1650                 ret = -ENOMEM;
1651                 return ret;
1652         }
1653
1654         ae_dev->pdev = pdev;
1655         ae_dev->flag = ent->driver_data;
1656         ae_dev->dev_type = HNAE3_DEV_KNIC;
1657         ae_dev->reset_type = HNAE3_NONE_RESET;
1658         hns3_get_dev_capability(pdev, ae_dev);
1659         pci_set_drvdata(pdev, ae_dev);
1660
1661         hnae3_register_ae_dev(ae_dev);
1662
1663         return 0;
1664 }
1665
1666 /* hns3_remove - Device removal routine
1667  * @pdev: PCI device information struct
1668  */
1669 static void hns3_remove(struct pci_dev *pdev)
1670 {
1671         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1672
1673         if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1674                 hns3_disable_sriov(pdev);
1675
1676         hnae3_unregister_ae_dev(ae_dev);
1677 }
1678
1679 /**
1680  * hns3_pci_sriov_configure
1681  * @pdev: pointer to a pci_dev structure
1682  * @num_vfs: number of VFs to allocate
1683  *
1684  * Enable or change the number of VFs. Called when the user updates the number
1685  * of VFs in sysfs.
1686  **/
1687 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1688 {
1689         int ret;
1690
1691         if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1692                 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1693                 return -EINVAL;
1694         }
1695
1696         if (num_vfs) {
1697                 ret = pci_enable_sriov(pdev, num_vfs);
1698                 if (ret)
1699                         dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1700                 else
1701                         return num_vfs;
1702         } else if (!pci_vfs_assigned(pdev)) {
1703                 pci_disable_sriov(pdev);
1704         } else {
1705                 dev_warn(&pdev->dev,
1706                          "Unable to free VFs because some are assigned to VMs.\n");
1707         }
1708
1709         return 0;
1710 }
1711
1712 static void hns3_shutdown(struct pci_dev *pdev)
1713 {
1714         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1715
1716         hnae3_unregister_ae_dev(ae_dev);
1717         devm_kfree(&pdev->dev, ae_dev);
1718         pci_set_drvdata(pdev, NULL);
1719
1720         if (system_state == SYSTEM_POWER_OFF)
1721                 pci_set_power_state(pdev, PCI_D3hot);
1722 }
1723
1724 static struct pci_driver hns3_driver = {
1725         .name     = hns3_driver_name,
1726         .id_table = hns3_pci_tbl,
1727         .probe    = hns3_probe,
1728         .remove   = hns3_remove,
1729         .shutdown = hns3_shutdown,
1730         .sriov_configure = hns3_pci_sriov_configure,
1731 };
1732
1733 /* set default feature to hns3 */
1734 static void hns3_set_default_feature(struct net_device *netdev)
1735 {
1736         struct hnae3_handle *h = hns3_get_handle(netdev);
1737         struct pci_dev *pdev = h->pdev;
1738
1739         netdev->priv_flags |= IFF_UNICAST_FLT;
1740
1741         netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1742                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1743                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1744                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1745                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1746
1747         netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1748
1749         netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1750
1751         netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1752                 NETIF_F_HW_VLAN_CTAG_FILTER |
1753                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1754                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1755                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1756                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1757                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1758
1759         netdev->vlan_features |=
1760                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1761                 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1762                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1763                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1764                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1765
1766         netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1767                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1768                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1769                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1770                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1771                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1772
1773         if (pdev->revision != 0x20)
1774                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1775 }
1776
1777 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1778                              struct hns3_desc_cb *cb)
1779 {
1780         unsigned int order = hnae3_page_order(ring);
1781         struct page *p;
1782
1783         p = dev_alloc_pages(order);
1784         if (!p)
1785                 return -ENOMEM;
1786
1787         cb->priv = p;
1788         cb->page_offset = 0;
1789         cb->reuse_flag = 0;
1790         cb->buf  = page_address(p);
1791         cb->length = hnae3_page_size(ring);
1792         cb->type = DESC_TYPE_PAGE;
1793
1794         return 0;
1795 }
1796
1797 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1798                              struct hns3_desc_cb *cb)
1799 {
1800         if (cb->type == DESC_TYPE_SKB)
1801                 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1802         else if (!HNAE3_IS_TX_RING(ring))
1803                 put_page((struct page *)cb->priv);
1804         memset(cb, 0, sizeof(*cb));
1805 }
1806
1807 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1808 {
1809         cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1810                                cb->length, ring_to_dma_dir(ring));
1811
1812         if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
1813                 return -EIO;
1814
1815         return 0;
1816 }
1817
1818 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1819                               struct hns3_desc_cb *cb)
1820 {
1821         if (cb->type == DESC_TYPE_SKB)
1822                 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1823                                  ring_to_dma_dir(ring));
1824         else
1825                 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1826                                ring_to_dma_dir(ring));
1827 }
1828
1829 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1830 {
1831         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1832         ring->desc[i].addr = 0;
1833 }
1834
1835 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1836 {
1837         struct hns3_desc_cb *cb = &ring->desc_cb[i];
1838
1839         if (!ring->desc_cb[i].dma)
1840                 return;
1841
1842         hns3_buffer_detach(ring, i);
1843         hns3_free_buffer(ring, cb);
1844 }
1845
1846 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1847 {
1848         int i;
1849
1850         for (i = 0; i < ring->desc_num; i++)
1851                 hns3_free_buffer_detach(ring, i);
1852 }
1853
1854 /* free desc along with its attached buffer */
1855 static void hns3_free_desc(struct hns3_enet_ring *ring)
1856 {
1857         int size = ring->desc_num * sizeof(ring->desc[0]);
1858
1859         hns3_free_buffers(ring);
1860
1861         if (ring->desc) {
1862                 dma_free_coherent(ring_to_dev(ring), size,
1863                                   ring->desc, ring->desc_dma_addr);
1864                 ring->desc = NULL;
1865         }
1866 }
1867
1868 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1869 {
1870         int size = ring->desc_num * sizeof(ring->desc[0]);
1871
1872         ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
1873                                          &ring->desc_dma_addr,
1874                                          GFP_KERNEL);
1875         if (!ring->desc)
1876                 return -ENOMEM;
1877
1878         return 0;
1879 }
1880
1881 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1882                                    struct hns3_desc_cb *cb)
1883 {
1884         int ret;
1885
1886         ret = hns3_alloc_buffer(ring, cb);
1887         if (ret)
1888                 goto out;
1889
1890         ret = hns3_map_buffer(ring, cb);
1891         if (ret)
1892                 goto out_with_buf;
1893
1894         return 0;
1895
1896 out_with_buf:
1897         hns3_free_buffer(ring, cb);
1898 out:
1899         return ret;
1900 }
1901
1902 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1903 {
1904         int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1905
1906         if (ret)
1907                 return ret;
1908
1909         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1910
1911         return 0;
1912 }
1913
1914 /* Allocate memory for raw pkg, and map with dma */
1915 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1916 {
1917         int i, j, ret;
1918
1919         for (i = 0; i < ring->desc_num; i++) {
1920                 ret = hns3_alloc_buffer_attach(ring, i);
1921                 if (ret)
1922                         goto out_buffer_fail;
1923         }
1924
1925         return 0;
1926
1927 out_buffer_fail:
1928         for (j = i - 1; j >= 0; j--)
1929                 hns3_free_buffer_detach(ring, j);
1930         return ret;
1931 }
1932
1933 /* detach a in-used buffer and replace with a reserved one  */
1934 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1935                                 struct hns3_desc_cb *res_cb)
1936 {
1937         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1938         ring->desc_cb[i] = *res_cb;
1939         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1940         ring->desc[i].rx.bd_base_info = 0;
1941 }
1942
1943 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1944 {
1945         ring->desc_cb[i].reuse_flag = 0;
1946         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1947                 + ring->desc_cb[i].page_offset);
1948         ring->desc[i].rx.bd_base_info = 0;
1949 }
1950
1951 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1952                                       int *pkts)
1953 {
1954         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1955
1956         (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1957         (*bytes) += desc_cb->length;
1958         /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
1959         hns3_free_buffer_detach(ring, ring->next_to_clean);
1960
1961         ring_ptr_move_fw(ring, next_to_clean);
1962 }
1963
1964 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1965 {
1966         int u = ring->next_to_use;
1967         int c = ring->next_to_clean;
1968
1969         if (unlikely(h > ring->desc_num))
1970                 return 0;
1971
1972         return u > c ? (h > c && h <= u) : (h > c || h <= u);
1973 }
1974
1975 void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
1976 {
1977         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1978         struct hns3_nic_priv *priv = netdev_priv(netdev);
1979         struct netdev_queue *dev_queue;
1980         int bytes, pkts;
1981         int head;
1982
1983         head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1984         rmb(); /* Make sure head is ready before touch any data */
1985
1986         if (is_ring_empty(ring) || head == ring->next_to_clean)
1987                 return; /* no data to poll */
1988
1989         if (unlikely(!is_valid_clean_head(ring, head))) {
1990                 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1991                            ring->next_to_use, ring->next_to_clean);
1992
1993                 u64_stats_update_begin(&ring->syncp);
1994                 ring->stats.io_err_cnt++;
1995                 u64_stats_update_end(&ring->syncp);
1996                 return;
1997         }
1998
1999         bytes = 0;
2000         pkts = 0;
2001         while (head != ring->next_to_clean) {
2002                 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
2003                 /* Issue prefetch for next Tx descriptor */
2004                 prefetch(&ring->desc_cb[ring->next_to_clean]);
2005         }
2006
2007         ring->tqp_vector->tx_group.total_bytes += bytes;
2008         ring->tqp_vector->tx_group.total_packets += pkts;
2009
2010         u64_stats_update_begin(&ring->syncp);
2011         ring->stats.tx_bytes += bytes;
2012         ring->stats.tx_pkts += pkts;
2013         u64_stats_update_end(&ring->syncp);
2014
2015         dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2016         netdev_tx_completed_queue(dev_queue, pkts, bytes);
2017
2018         if (unlikely(pkts && netif_carrier_ok(netdev) &&
2019                      (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2020                 /* Make sure that anybody stopping the queue after this
2021                  * sees the new next_to_clean.
2022                  */
2023                 smp_mb();
2024                 if (netif_tx_queue_stopped(dev_queue) &&
2025                     !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2026                         netif_tx_wake_queue(dev_queue);
2027                         ring->stats.restart_queue++;
2028                 }
2029         }
2030 }
2031
2032 static int hns3_desc_unused(struct hns3_enet_ring *ring)
2033 {
2034         int ntc = ring->next_to_clean;
2035         int ntu = ring->next_to_use;
2036
2037         return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2038 }
2039
2040 static void
2041 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2042 {
2043         struct hns3_desc_cb *desc_cb;
2044         struct hns3_desc_cb res_cbs;
2045         int i, ret;
2046
2047         for (i = 0; i < cleand_count; i++) {
2048                 desc_cb = &ring->desc_cb[ring->next_to_use];
2049                 if (desc_cb->reuse_flag) {
2050                         u64_stats_update_begin(&ring->syncp);
2051                         ring->stats.reuse_pg_cnt++;
2052                         u64_stats_update_end(&ring->syncp);
2053
2054                         hns3_reuse_buffer(ring, ring->next_to_use);
2055                 } else {
2056                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
2057                         if (ret) {
2058                                 u64_stats_update_begin(&ring->syncp);
2059                                 ring->stats.sw_err_cnt++;
2060                                 u64_stats_update_end(&ring->syncp);
2061
2062                                 netdev_err(ring->tqp->handle->kinfo.netdev,
2063                                            "hnae reserve buffer map failed.\n");
2064                                 break;
2065                         }
2066                         hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2067                 }
2068
2069                 ring_ptr_move_fw(ring, next_to_use);
2070         }
2071
2072         wmb(); /* Make all data has been write before submit */
2073         writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2074 }
2075
2076 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2077                                 struct hns3_enet_ring *ring, int pull_len,
2078                                 struct hns3_desc_cb *desc_cb)
2079 {
2080         struct hns3_desc *desc;
2081         u32 truesize;
2082         int size;
2083         int last_offset;
2084         bool twobufs;
2085
2086         twobufs = ((PAGE_SIZE < 8192) &&
2087                 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2088
2089         desc = &ring->desc[ring->next_to_clean];
2090         size = le16_to_cpu(desc->rx.size);
2091
2092         truesize = hnae3_buf_size(ring);
2093
2094         if (!twobufs)
2095                 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
2096
2097         skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2098                         size - pull_len, truesize);
2099
2100          /* Avoid re-using remote pages,flag default unreuse */
2101         if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2102                 return;
2103
2104         if (twobufs) {
2105                 /* If we are only owner of page we can reuse it */
2106                 if (likely(page_count(desc_cb->priv) == 1)) {
2107                         /* Flip page offset to other buffer */
2108                         desc_cb->page_offset ^= truesize;
2109
2110                         desc_cb->reuse_flag = 1;
2111                         /* bump ref count on page before it is given*/
2112                         get_page(desc_cb->priv);
2113                 }
2114                 return;
2115         }
2116
2117         /* Move offset up to the next cache line */
2118         desc_cb->page_offset += truesize;
2119
2120         if (desc_cb->page_offset <= last_offset) {
2121                 desc_cb->reuse_flag = 1;
2122                 /* Bump ref count on page before it is given*/
2123                 get_page(desc_cb->priv);
2124         }
2125 }
2126
2127 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2128                              struct hns3_desc *desc)
2129 {
2130         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2131         int l3_type, l4_type;
2132         u32 bd_base_info;
2133         int ol4_type;
2134         u32 l234info;
2135
2136         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2137         l234info = le32_to_cpu(desc->rx.l234_info);
2138
2139         skb->ip_summed = CHECKSUM_NONE;
2140
2141         skb_checksum_none_assert(skb);
2142
2143         if (!(netdev->features & NETIF_F_RXCSUM))
2144                 return;
2145
2146         /* check if hardware has done checksum */
2147         if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2148                 return;
2149
2150         if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
2151                      hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
2152                      hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2153                      hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2154                 u64_stats_update_begin(&ring->syncp);
2155                 ring->stats.l3l4_csum_err++;
2156                 u64_stats_update_end(&ring->syncp);
2157
2158                 return;
2159         }
2160
2161         l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2162                                   HNS3_RXD_L3ID_S);
2163         l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2164                                   HNS3_RXD_L4ID_S);
2165
2166         ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2167                                    HNS3_RXD_OL4ID_S);
2168         switch (ol4_type) {
2169         case HNS3_OL4_TYPE_MAC_IN_UDP:
2170         case HNS3_OL4_TYPE_NVGRE:
2171                 skb->csum_level = 1;
2172                 /* fall through */
2173         case HNS3_OL4_TYPE_NO_TUN:
2174                 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2175                 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2176                      l3_type == HNS3_L3_TYPE_IPV6) &&
2177                     (l4_type == HNS3_L4_TYPE_UDP ||
2178                      l4_type == HNS3_L4_TYPE_TCP ||
2179                      l4_type == HNS3_L4_TYPE_SCTP))
2180                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2181                 break;
2182         default:
2183                 break;
2184         }
2185 }
2186
2187 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2188 {
2189         napi_gro_receive(&ring->tqp_vector->napi, skb);
2190 }
2191
2192 static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2193                                struct hns3_desc *desc, u32 l234info)
2194 {
2195         struct pci_dev *pdev = ring->tqp->handle->pdev;
2196         u16 vlan_tag;
2197
2198         if (pdev->revision == 0x20) {
2199                 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2200                 if (!(vlan_tag & VLAN_VID_MASK))
2201                         vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2202
2203                 return vlan_tag;
2204         }
2205
2206 #define HNS3_STRP_OUTER_VLAN    0x1
2207 #define HNS3_STRP_INNER_VLAN    0x2
2208
2209         switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2210                                 HNS3_RXD_STRP_TAGP_S)) {
2211         case HNS3_STRP_OUTER_VLAN:
2212                 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2213                 break;
2214         case HNS3_STRP_INNER_VLAN:
2215                 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2216                 break;
2217         default:
2218                 vlan_tag = 0;
2219                 break;
2220         }
2221
2222         return vlan_tag;
2223 }
2224
2225 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2226                              struct sk_buff **out_skb, int *out_bnum)
2227 {
2228         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2229         struct hns3_desc_cb *desc_cb;
2230         struct hns3_desc *desc;
2231         struct sk_buff *skb;
2232         unsigned char *va;
2233         u32 bd_base_info;
2234         int pull_len;
2235         u32 l234info;
2236         int length;
2237         int bnum;
2238
2239         desc = &ring->desc[ring->next_to_clean];
2240         desc_cb = &ring->desc_cb[ring->next_to_clean];
2241
2242         prefetch(desc);
2243
2244         length = le16_to_cpu(desc->rx.size);
2245         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2246
2247         /* Check valid BD */
2248         if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
2249                 return -EFAULT;
2250
2251         va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2252
2253         /* Prefetch first cache line of first page
2254          * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2255          * line size is 64B so need to prefetch twice to make it 128B. But in
2256          * actual we can have greater size of caches with 128B Level 1 cache
2257          * lines. In such a case, single fetch would suffice to cache in the
2258          * relevant part of the header.
2259          */
2260         prefetch(va);
2261 #if L1_CACHE_BYTES < 128
2262         prefetch(va + L1_CACHE_BYTES);
2263 #endif
2264
2265         skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2266                                         HNS3_RX_HEAD_SIZE);
2267         if (unlikely(!skb)) {
2268                 netdev_err(netdev, "alloc rx skb fail\n");
2269
2270                 u64_stats_update_begin(&ring->syncp);
2271                 ring->stats.sw_err_cnt++;
2272                 u64_stats_update_end(&ring->syncp);
2273
2274                 return -ENOMEM;
2275         }
2276
2277         prefetchw(skb->data);
2278
2279         bnum = 1;
2280         if (length <= HNS3_RX_HEAD_SIZE) {
2281                 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2282
2283                 /* We can reuse buffer as-is, just make sure it is local */
2284                 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2285                         desc_cb->reuse_flag = 1;
2286                 else /* This page cannot be reused so discard it */
2287                         put_page(desc_cb->priv);
2288
2289                 ring_ptr_move_fw(ring, next_to_clean);
2290         } else {
2291                 u64_stats_update_begin(&ring->syncp);
2292                 ring->stats.seg_pkt_cnt++;
2293                 u64_stats_update_end(&ring->syncp);
2294
2295                 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2296
2297                 memcpy(__skb_put(skb, pull_len), va,
2298                        ALIGN(pull_len, sizeof(long)));
2299
2300                 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2301                 ring_ptr_move_fw(ring, next_to_clean);
2302
2303                 while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2304                         desc = &ring->desc[ring->next_to_clean];
2305                         desc_cb = &ring->desc_cb[ring->next_to_clean];
2306                         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2307                         hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2308                         ring_ptr_move_fw(ring, next_to_clean);
2309                         bnum++;
2310                 }
2311         }
2312
2313         *out_bnum = bnum;
2314
2315         l234info = le32_to_cpu(desc->rx.l234_info);
2316
2317         /* Based on hw strategy, the tag offloaded will be stored at
2318          * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2319          * in one layer tag case.
2320          */
2321         if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2322                 u16 vlan_tag;
2323
2324                 vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
2325                 if (vlan_tag & VLAN_VID_MASK)
2326                         __vlan_hwaccel_put_tag(skb,
2327                                                htons(ETH_P_8021Q),
2328                                                vlan_tag);
2329         }
2330
2331         if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2332                 u64_stats_update_begin(&ring->syncp);
2333                 ring->stats.non_vld_descs++;
2334                 u64_stats_update_end(&ring->syncp);
2335
2336                 dev_kfree_skb_any(skb);
2337                 return -EINVAL;
2338         }
2339
2340         if (unlikely((!desc->rx.pkt_len) ||
2341                      hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2342                 u64_stats_update_begin(&ring->syncp);
2343                 ring->stats.err_pkt_len++;
2344                 u64_stats_update_end(&ring->syncp);
2345
2346                 dev_kfree_skb_any(skb);
2347                 return -EFAULT;
2348         }
2349
2350         if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
2351                 u64_stats_update_begin(&ring->syncp);
2352                 ring->stats.l2_err++;
2353                 u64_stats_update_end(&ring->syncp);
2354
2355                 dev_kfree_skb_any(skb);
2356                 return -EFAULT;
2357         }
2358
2359         u64_stats_update_begin(&ring->syncp);
2360         ring->stats.rx_pkts++;
2361         ring->stats.rx_bytes += skb->len;
2362         u64_stats_update_end(&ring->syncp);
2363
2364         ring->tqp_vector->rx_group.total_bytes += skb->len;
2365
2366         hns3_rx_checksum(ring, skb, desc);
2367         return 0;
2368 }
2369
2370 int hns3_clean_rx_ring(
2371                 struct hns3_enet_ring *ring, int budget,
2372                 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2373 {
2374 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2375         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2376         int recv_pkts, recv_bds, clean_count, err;
2377         int unused_count = hns3_desc_unused(ring);
2378         struct sk_buff *skb = NULL;
2379         int num, bnum = 0;
2380
2381         num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2382         rmb(); /* Make sure num taken effect before the other data is touched */
2383
2384         recv_pkts = 0, recv_bds = 0, clean_count = 0;
2385         num -= unused_count;
2386
2387         while (recv_pkts < budget && recv_bds < num) {
2388                 /* Reuse or realloc buffers */
2389                 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2390                         hns3_nic_alloc_rx_buffers(ring,
2391                                                   clean_count + unused_count);
2392                         clean_count = 0;
2393                         unused_count = hns3_desc_unused(ring);
2394                 }
2395
2396                 /* Poll one pkt */
2397                 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2398                 if (unlikely(!skb)) /* This fault cannot be repaired */
2399                         goto out;
2400
2401                 recv_bds += bnum;
2402                 clean_count += bnum;
2403                 if (unlikely(err)) {  /* Do jump the err */
2404                         recv_pkts++;
2405                         continue;
2406                 }
2407
2408                 /* Do update ip stack process */
2409                 skb->protocol = eth_type_trans(skb, netdev);
2410                 rx_fn(ring, skb);
2411
2412                 recv_pkts++;
2413         }
2414
2415 out:
2416         /* Make all data has been write before submit */
2417         if (clean_count + unused_count > 0)
2418                 hns3_nic_alloc_rx_buffers(ring,
2419                                           clean_count + unused_count);
2420
2421         return recv_pkts;
2422 }
2423
2424 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2425 {
2426         struct hns3_enet_tqp_vector *tqp_vector =
2427                                         ring_group->ring->tqp_vector;
2428         enum hns3_flow_level_range new_flow_level;
2429         int packets_per_msecs;
2430         int bytes_per_msecs;
2431         u32 time_passed_ms;
2432         u16 new_int_gl;
2433
2434         if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
2435                 return false;
2436
2437         if (ring_group->total_packets == 0) {
2438                 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2439                 ring_group->coal.flow_level = HNS3_FLOW_LOW;
2440                 return true;
2441         }
2442
2443         /* Simple throttlerate management
2444          * 0-10MB/s   lower     (50000 ints/s)
2445          * 10-20MB/s   middle    (20000 ints/s)
2446          * 20-1249MB/s high      (18000 ints/s)
2447          * > 40000pps  ultra     (8000 ints/s)
2448          */
2449         new_flow_level = ring_group->coal.flow_level;
2450         new_int_gl = ring_group->coal.int_gl;
2451         time_passed_ms =
2452                 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2453
2454         if (!time_passed_ms)
2455                 return false;
2456
2457         do_div(ring_group->total_packets, time_passed_ms);
2458         packets_per_msecs = ring_group->total_packets;
2459
2460         do_div(ring_group->total_bytes, time_passed_ms);
2461         bytes_per_msecs = ring_group->total_bytes;
2462
2463 #define HNS3_RX_LOW_BYTE_RATE 10000
2464 #define HNS3_RX_MID_BYTE_RATE 20000
2465
2466         switch (new_flow_level) {
2467         case HNS3_FLOW_LOW:
2468                 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2469                         new_flow_level = HNS3_FLOW_MID;
2470                 break;
2471         case HNS3_FLOW_MID:
2472                 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2473                         new_flow_level = HNS3_FLOW_HIGH;
2474                 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2475                         new_flow_level = HNS3_FLOW_LOW;
2476                 break;
2477         case HNS3_FLOW_HIGH:
2478         case HNS3_FLOW_ULTRA:
2479         default:
2480                 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2481                         new_flow_level = HNS3_FLOW_MID;
2482                 break;
2483         }
2484
2485 #define HNS3_RX_ULTRA_PACKET_RATE 40
2486
2487         if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2488             &tqp_vector->rx_group == ring_group)
2489                 new_flow_level = HNS3_FLOW_ULTRA;
2490
2491         switch (new_flow_level) {
2492         case HNS3_FLOW_LOW:
2493                 new_int_gl = HNS3_INT_GL_50K;
2494                 break;
2495         case HNS3_FLOW_MID:
2496                 new_int_gl = HNS3_INT_GL_20K;
2497                 break;
2498         case HNS3_FLOW_HIGH:
2499                 new_int_gl = HNS3_INT_GL_18K;
2500                 break;
2501         case HNS3_FLOW_ULTRA:
2502                 new_int_gl = HNS3_INT_GL_8K;
2503                 break;
2504         default:
2505                 break;
2506         }
2507
2508         ring_group->total_bytes = 0;
2509         ring_group->total_packets = 0;
2510         ring_group->coal.flow_level = new_flow_level;
2511         if (new_int_gl != ring_group->coal.int_gl) {
2512                 ring_group->coal.int_gl = new_int_gl;
2513                 return true;
2514         }
2515         return false;
2516 }
2517
2518 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2519 {
2520         struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2521         struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2522         bool rx_update, tx_update;
2523
2524         if (tqp_vector->int_adapt_down > 0) {
2525                 tqp_vector->int_adapt_down--;
2526                 return;
2527         }
2528
2529         if (rx_group->coal.gl_adapt_enable) {
2530                 rx_update = hns3_get_new_int_gl(rx_group);
2531                 if (rx_update)
2532                         hns3_set_vector_coalesce_rx_gl(tqp_vector,
2533                                                        rx_group->coal.int_gl);
2534         }
2535
2536         if (tx_group->coal.gl_adapt_enable) {
2537                 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2538                 if (tx_update)
2539                         hns3_set_vector_coalesce_tx_gl(tqp_vector,
2540                                                        tx_group->coal.int_gl);
2541         }
2542
2543         tqp_vector->last_jiffies = jiffies;
2544         tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
2545 }
2546
2547 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2548 {
2549         struct hns3_enet_ring *ring;
2550         int rx_pkt_total = 0;
2551
2552         struct hns3_enet_tqp_vector *tqp_vector =
2553                 container_of(napi, struct hns3_enet_tqp_vector, napi);
2554         bool clean_complete = true;
2555         int rx_budget;
2556
2557         /* Since the actual Tx work is minimal, we can give the Tx a larger
2558          * budget and be more aggressive about cleaning up the Tx descriptors.
2559          */
2560         hns3_for_each_ring(ring, tqp_vector->tx_group)
2561                 hns3_clean_tx_ring(ring);
2562
2563         /* make sure rx ring budget not smaller than 1 */
2564         rx_budget = max(budget / tqp_vector->num_tqps, 1);
2565
2566         hns3_for_each_ring(ring, tqp_vector->rx_group) {
2567                 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2568                                                     hns3_rx_skb);
2569
2570                 if (rx_cleaned >= rx_budget)
2571                         clean_complete = false;
2572
2573                 rx_pkt_total += rx_cleaned;
2574         }
2575
2576         tqp_vector->rx_group.total_packets += rx_pkt_total;
2577
2578         if (!clean_complete)
2579                 return budget;
2580
2581         napi_complete(napi);
2582         hns3_update_new_int_gl(tqp_vector);
2583         hns3_mask_vector_irq(tqp_vector, 1);
2584
2585         return rx_pkt_total;
2586 }
2587
2588 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2589                                       struct hnae3_ring_chain_node *head)
2590 {
2591         struct pci_dev *pdev = tqp_vector->handle->pdev;
2592         struct hnae3_ring_chain_node *cur_chain = head;
2593         struct hnae3_ring_chain_node *chain;
2594         struct hns3_enet_ring *tx_ring;
2595         struct hns3_enet_ring *rx_ring;
2596
2597         tx_ring = tqp_vector->tx_group.ring;
2598         if (tx_ring) {
2599                 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2600                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2601                               HNAE3_RING_TYPE_TX);
2602                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2603                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2604
2605                 cur_chain->next = NULL;
2606
2607                 while (tx_ring->next) {
2608                         tx_ring = tx_ring->next;
2609
2610                         chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2611                                              GFP_KERNEL);
2612                         if (!chain)
2613                                 return -ENOMEM;
2614
2615                         cur_chain->next = chain;
2616                         chain->tqp_index = tx_ring->tqp->tqp_index;
2617                         hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2618                                       HNAE3_RING_TYPE_TX);
2619                         hnae3_set_field(chain->int_gl_idx,
2620                                         HNAE3_RING_GL_IDX_M,
2621                                         HNAE3_RING_GL_IDX_S,
2622                                         HNAE3_RING_GL_TX);
2623
2624                         cur_chain = chain;
2625                 }
2626         }
2627
2628         rx_ring = tqp_vector->rx_group.ring;
2629         if (!tx_ring && rx_ring) {
2630                 cur_chain->next = NULL;
2631                 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2632                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2633                               HNAE3_RING_TYPE_RX);
2634                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2635                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2636
2637                 rx_ring = rx_ring->next;
2638         }
2639
2640         while (rx_ring) {
2641                 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2642                 if (!chain)
2643                         return -ENOMEM;
2644
2645                 cur_chain->next = chain;
2646                 chain->tqp_index = rx_ring->tqp->tqp_index;
2647                 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2648                               HNAE3_RING_TYPE_RX);
2649                 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2650                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2651
2652                 cur_chain = chain;
2653
2654                 rx_ring = rx_ring->next;
2655         }
2656
2657         return 0;
2658 }
2659
2660 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2661                                         struct hnae3_ring_chain_node *head)
2662 {
2663         struct pci_dev *pdev = tqp_vector->handle->pdev;
2664         struct hnae3_ring_chain_node *chain_tmp, *chain;
2665
2666         chain = head->next;
2667
2668         while (chain) {
2669                 chain_tmp = chain->next;
2670                 devm_kfree(&pdev->dev, chain);
2671                 chain = chain_tmp;
2672         }
2673 }
2674
2675 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2676                                    struct hns3_enet_ring *ring)
2677 {
2678         ring->next = group->ring;
2679         group->ring = ring;
2680
2681         group->count++;
2682 }
2683
2684 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
2685 {
2686         struct pci_dev *pdev = priv->ae_handle->pdev;
2687         struct hns3_enet_tqp_vector *tqp_vector;
2688         int num_vectors = priv->vector_num;
2689         int numa_node;
2690         int vector_i;
2691
2692         numa_node = dev_to_node(&pdev->dev);
2693
2694         for (vector_i = 0; vector_i < num_vectors; vector_i++) {
2695                 tqp_vector = &priv->tqp_vector[vector_i];
2696                 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
2697                                 &tqp_vector->affinity_mask);
2698         }
2699 }
2700
2701 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2702 {
2703         struct hnae3_ring_chain_node vector_ring_chain;
2704         struct hnae3_handle *h = priv->ae_handle;
2705         struct hns3_enet_tqp_vector *tqp_vector;
2706         int ret = 0;
2707         u16 i;
2708
2709         hns3_nic_set_cpumask(priv);
2710
2711         for (i = 0; i < priv->vector_num; i++) {
2712                 tqp_vector = &priv->tqp_vector[i];
2713                 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2714                 tqp_vector->num_tqps = 0;
2715         }
2716
2717         for (i = 0; i < h->kinfo.num_tqps; i++) {
2718                 u16 vector_i = i % priv->vector_num;
2719                 u16 tqp_num = h->kinfo.num_tqps;
2720
2721                 tqp_vector = &priv->tqp_vector[vector_i];
2722
2723                 hns3_add_ring_to_group(&tqp_vector->tx_group,
2724                                        priv->ring_data[i].ring);
2725
2726                 hns3_add_ring_to_group(&tqp_vector->rx_group,
2727                                        priv->ring_data[i + tqp_num].ring);
2728
2729                 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2730                 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2731                 tqp_vector->num_tqps++;
2732         }
2733
2734         for (i = 0; i < priv->vector_num; i++) {
2735                 tqp_vector = &priv->tqp_vector[i];
2736
2737                 tqp_vector->rx_group.total_bytes = 0;
2738                 tqp_vector->rx_group.total_packets = 0;
2739                 tqp_vector->tx_group.total_bytes = 0;
2740                 tqp_vector->tx_group.total_packets = 0;
2741                 tqp_vector->handle = h;
2742
2743                 ret = hns3_get_vector_ring_chain(tqp_vector,
2744                                                  &vector_ring_chain);
2745                 if (ret)
2746                         return ret;
2747
2748                 ret = h->ae_algo->ops->map_ring_to_vector(h,
2749                         tqp_vector->vector_irq, &vector_ring_chain);
2750
2751                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2752
2753                 if (ret)
2754                         return ret;
2755
2756                 netif_napi_add(priv->netdev, &tqp_vector->napi,
2757                                hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2758         }
2759
2760         return 0;
2761 }
2762
2763 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2764 {
2765         struct hnae3_handle *h = priv->ae_handle;
2766         struct hns3_enet_tqp_vector *tqp_vector;
2767         struct hnae3_vector_info *vector;
2768         struct pci_dev *pdev = h->pdev;
2769         u16 tqp_num = h->kinfo.num_tqps;
2770         u16 vector_num;
2771         int ret = 0;
2772         u16 i;
2773
2774         /* RSS size, cpu online and vector_num should be the same */
2775         /* Should consider 2p/4p later */
2776         vector_num = min_t(u16, num_online_cpus(), tqp_num);
2777         vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2778                               GFP_KERNEL);
2779         if (!vector)
2780                 return -ENOMEM;
2781
2782         vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2783
2784         priv->vector_num = vector_num;
2785         priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2786                 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2787                              GFP_KERNEL);
2788         if (!priv->tqp_vector) {
2789                 ret = -ENOMEM;
2790                 goto out;
2791         }
2792
2793         for (i = 0; i < priv->vector_num; i++) {
2794                 tqp_vector = &priv->tqp_vector[i];
2795                 tqp_vector->idx = i;
2796                 tqp_vector->mask_addr = vector[i].io_addr;
2797                 tqp_vector->vector_irq = vector[i].vector;
2798                 hns3_vector_gl_rl_init(tqp_vector, priv);
2799         }
2800
2801 out:
2802         devm_kfree(&pdev->dev, vector);
2803         return ret;
2804 }
2805
2806 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2807 {
2808         group->ring = NULL;
2809         group->count = 0;
2810 }
2811
2812 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2813 {
2814         struct hnae3_ring_chain_node vector_ring_chain;
2815         struct hnae3_handle *h = priv->ae_handle;
2816         struct hns3_enet_tqp_vector *tqp_vector;
2817         int i, ret;
2818
2819         for (i = 0; i < priv->vector_num; i++) {
2820                 tqp_vector = &priv->tqp_vector[i];
2821
2822                 ret = hns3_get_vector_ring_chain(tqp_vector,
2823                                                  &vector_ring_chain);
2824                 if (ret)
2825                         return ret;
2826
2827                 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2828                         tqp_vector->vector_irq, &vector_ring_chain);
2829                 if (ret)
2830                         return ret;
2831
2832                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2833
2834                 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2835                         (void)irq_set_affinity_hint(
2836                                 priv->tqp_vector[i].vector_irq,
2837                                                     NULL);
2838                         free_irq(priv->tqp_vector[i].vector_irq,
2839                                  &priv->tqp_vector[i]);
2840                 }
2841
2842                 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2843                 hns3_clear_ring_group(&tqp_vector->rx_group);
2844                 hns3_clear_ring_group(&tqp_vector->tx_group);
2845                 netif_napi_del(&priv->tqp_vector[i].napi);
2846         }
2847
2848         return 0;
2849 }
2850
2851 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2852 {
2853         struct hnae3_handle *h = priv->ae_handle;
2854         struct pci_dev *pdev = h->pdev;
2855         int i, ret;
2856
2857         for (i = 0; i < priv->vector_num; i++) {
2858                 struct hns3_enet_tqp_vector *tqp_vector;
2859
2860                 tqp_vector = &priv->tqp_vector[i];
2861                 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2862                 if (ret)
2863                         return ret;
2864         }
2865
2866         devm_kfree(&pdev->dev, priv->tqp_vector);
2867         return 0;
2868 }
2869
2870 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2871                              int ring_type)
2872 {
2873         struct hns3_nic_ring_data *ring_data = priv->ring_data;
2874         int queue_num = priv->ae_handle->kinfo.num_tqps;
2875         struct pci_dev *pdev = priv->ae_handle->pdev;
2876         struct hns3_enet_ring *ring;
2877
2878         ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2879         if (!ring)
2880                 return -ENOMEM;
2881
2882         if (ring_type == HNAE3_RING_TYPE_TX) {
2883                 ring_data[q->tqp_index].ring = ring;
2884                 ring_data[q->tqp_index].queue_index = q->tqp_index;
2885                 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2886         } else {
2887                 ring_data[q->tqp_index + queue_num].ring = ring;
2888                 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
2889                 ring->io_base = q->io_base;
2890         }
2891
2892         hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2893
2894         ring->tqp = q;
2895         ring->desc = NULL;
2896         ring->desc_cb = NULL;
2897         ring->dev = priv->dev;
2898         ring->desc_dma_addr = 0;
2899         ring->buf_size = q->buf_size;
2900         ring->desc_num = q->desc_num;
2901         ring->next_to_use = 0;
2902         ring->next_to_clean = 0;
2903
2904         return 0;
2905 }
2906
2907 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2908                               struct hns3_nic_priv *priv)
2909 {
2910         int ret;
2911
2912         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2913         if (ret)
2914                 return ret;
2915
2916         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2917         if (ret)
2918                 return ret;
2919
2920         return 0;
2921 }
2922
2923 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2924 {
2925         struct hnae3_handle *h = priv->ae_handle;
2926         struct pci_dev *pdev = h->pdev;
2927         int i, ret;
2928
2929         priv->ring_data =  devm_kzalloc(&pdev->dev,
2930                                         array3_size(h->kinfo.num_tqps,
2931                                                     sizeof(*priv->ring_data),
2932                                                     2),
2933                                         GFP_KERNEL);
2934         if (!priv->ring_data)
2935                 return -ENOMEM;
2936
2937         for (i = 0; i < h->kinfo.num_tqps; i++) {
2938                 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2939                 if (ret)
2940                         goto err;
2941         }
2942
2943         return 0;
2944 err:
2945         devm_kfree(&pdev->dev, priv->ring_data);
2946         return ret;
2947 }
2948
2949 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2950 {
2951         struct hnae3_handle *h = priv->ae_handle;
2952         int i;
2953
2954         for (i = 0; i < h->kinfo.num_tqps; i++) {
2955                 devm_kfree(priv->dev, priv->ring_data[i].ring);
2956                 devm_kfree(priv->dev,
2957                            priv->ring_data[i + h->kinfo.num_tqps].ring);
2958         }
2959         devm_kfree(priv->dev, priv->ring_data);
2960 }
2961
2962 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2963 {
2964         int ret;
2965
2966         if (ring->desc_num <= 0 || ring->buf_size <= 0)
2967                 return -EINVAL;
2968
2969         ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2970                                 GFP_KERNEL);
2971         if (!ring->desc_cb) {
2972                 ret = -ENOMEM;
2973                 goto out;
2974         }
2975
2976         ret = hns3_alloc_desc(ring);
2977         if (ret)
2978                 goto out_with_desc_cb;
2979
2980         if (!HNAE3_IS_TX_RING(ring)) {
2981                 ret = hns3_alloc_ring_buffers(ring);
2982                 if (ret)
2983                         goto out_with_desc;
2984         }
2985
2986         return 0;
2987
2988 out_with_desc:
2989         hns3_free_desc(ring);
2990 out_with_desc_cb:
2991         kfree(ring->desc_cb);
2992         ring->desc_cb = NULL;
2993 out:
2994         return ret;
2995 }
2996
2997 static void hns3_fini_ring(struct hns3_enet_ring *ring)
2998 {
2999         hns3_free_desc(ring);
3000         kfree(ring->desc_cb);
3001         ring->desc_cb = NULL;
3002         ring->next_to_clean = 0;
3003         ring->next_to_use = 0;
3004 }
3005
3006 static int hns3_buf_size2type(u32 buf_size)
3007 {
3008         int bd_size_type;
3009
3010         switch (buf_size) {
3011         case 512:
3012                 bd_size_type = HNS3_BD_SIZE_512_TYPE;
3013                 break;
3014         case 1024:
3015                 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3016                 break;
3017         case 2048:
3018                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3019                 break;
3020         case 4096:
3021                 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3022                 break;
3023         default:
3024                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3025         }
3026
3027         return bd_size_type;
3028 }
3029
3030 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3031 {
3032         dma_addr_t dma = ring->desc_dma_addr;
3033         struct hnae3_queue *q = ring->tqp;
3034
3035         if (!HNAE3_IS_TX_RING(ring)) {
3036                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3037                                (u32)dma);
3038                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3039                                (u32)((dma >> 31) >> 1));
3040
3041                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3042                                hns3_buf_size2type(ring->buf_size));
3043                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3044                                ring->desc_num / 8 - 1);
3045
3046         } else {
3047                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3048                                (u32)dma);
3049                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3050                                (u32)((dma >> 31) >> 1));
3051
3052                 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3053                                ring->desc_num / 8 - 1);
3054         }
3055 }
3056
3057 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3058 {
3059         struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3060         int i;
3061
3062         for (i = 0; i < HNAE3_MAX_TC; i++) {
3063                 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3064                 int j;
3065
3066                 if (!tc_info->enable)
3067                         continue;
3068
3069                 for (j = 0; j < tc_info->tqp_count; j++) {
3070                         struct hnae3_queue *q;
3071
3072                         q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3073                         hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3074                                        tc_info->tc);
3075                 }
3076         }
3077 }
3078
3079 int hns3_init_all_ring(struct hns3_nic_priv *priv)
3080 {
3081         struct hnae3_handle *h = priv->ae_handle;
3082         int ring_num = h->kinfo.num_tqps * 2;
3083         int i, j;
3084         int ret;
3085
3086         for (i = 0; i < ring_num; i++) {
3087                 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3088                 if (ret) {
3089                         dev_err(priv->dev,
3090                                 "Alloc ring memory fail! ret=%d\n", ret);
3091                         goto out_when_alloc_ring_memory;
3092                 }
3093
3094                 u64_stats_init(&priv->ring_data[i].ring->syncp);
3095         }
3096
3097         return 0;
3098
3099 out_when_alloc_ring_memory:
3100         for (j = i - 1; j >= 0; j--)
3101                 hns3_fini_ring(priv->ring_data[j].ring);
3102
3103         return -ENOMEM;
3104 }
3105
3106 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3107 {
3108         struct hnae3_handle *h = priv->ae_handle;
3109         int i;
3110
3111         for (i = 0; i < h->kinfo.num_tqps; i++) {
3112                 if (h->ae_algo->ops->reset_queue)
3113                         h->ae_algo->ops->reset_queue(h, i);
3114
3115                 hns3_fini_ring(priv->ring_data[i].ring);
3116                 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3117         }
3118         return 0;
3119 }
3120
3121 /* Set mac addr if it is configured. or leave it to the AE driver */
3122 static void hns3_init_mac_addr(struct net_device *netdev, bool init)
3123 {
3124         struct hns3_nic_priv *priv = netdev_priv(netdev);
3125         struct hnae3_handle *h = priv->ae_handle;
3126         u8 mac_addr_temp[ETH_ALEN];
3127
3128         if (h->ae_algo->ops->get_mac_addr && init) {
3129                 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3130                 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3131         }
3132
3133         /* Check if the MAC address is valid, if not get a random one */
3134         if (!is_valid_ether_addr(netdev->dev_addr)) {
3135                 eth_hw_addr_random(netdev);
3136                 dev_warn(priv->dev, "using random MAC address %pM\n",
3137                          netdev->dev_addr);
3138         }
3139
3140         if (h->ae_algo->ops->set_mac_addr)
3141                 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3142
3143 }
3144
3145 static void hns3_uninit_mac_addr(struct net_device *netdev)
3146 {
3147         struct hns3_nic_priv *priv = netdev_priv(netdev);
3148         struct hnae3_handle *h = priv->ae_handle;
3149
3150         if (h->ae_algo->ops->rm_uc_addr)
3151                 h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
3152 }
3153
3154 static int hns3_restore_fd_rules(struct net_device *netdev)
3155 {
3156         struct hnae3_handle *h = hns3_get_handle(netdev);
3157         int ret = 0;
3158
3159         if (h->ae_algo->ops->restore_fd_rules)
3160                 ret = h->ae_algo->ops->restore_fd_rules(h);
3161
3162         return ret;
3163 }
3164
3165 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3166 {
3167         struct hnae3_handle *h = hns3_get_handle(netdev);
3168
3169         if (h->ae_algo->ops->del_all_fd_entries)
3170                 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3171 }
3172
3173 static void hns3_nic_set_priv_ops(struct net_device *netdev)
3174 {
3175         struct hns3_nic_priv *priv = netdev_priv(netdev);
3176
3177         if ((netdev->features & NETIF_F_TSO) ||
3178             (netdev->features & NETIF_F_TSO6)) {
3179                 priv->ops.fill_desc = hns3_fill_desc_tso;
3180                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3181         } else {
3182                 priv->ops.fill_desc = hns3_fill_desc;
3183                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3184         }
3185 }
3186
3187 static int hns3_client_init(struct hnae3_handle *handle)
3188 {
3189         struct pci_dev *pdev = handle->pdev;
3190         u16 alloc_tqps, max_rss_size;
3191         struct hns3_nic_priv *priv;
3192         struct net_device *netdev;
3193         int ret;
3194
3195         handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3196                                                     &max_rss_size);
3197         netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3198         if (!netdev)
3199                 return -ENOMEM;
3200
3201         priv = netdev_priv(netdev);
3202         priv->dev = &pdev->dev;
3203         priv->netdev = netdev;
3204         priv->ae_handle = handle;
3205         priv->ae_handle->last_reset_time = jiffies;
3206         priv->tx_timeout_count = 0;
3207
3208         handle->kinfo.netdev = netdev;
3209         handle->priv = (void *)priv;
3210
3211         hns3_init_mac_addr(netdev, true);
3212
3213         hns3_set_default_feature(netdev);
3214
3215         netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3216         netdev->priv_flags |= IFF_UNICAST_FLT;
3217         netdev->netdev_ops = &hns3_nic_netdev_ops;
3218         SET_NETDEV_DEV(netdev, &pdev->dev);
3219         hns3_ethtool_set_ops(netdev);
3220         hns3_nic_set_priv_ops(netdev);
3221
3222         /* Carrier off reporting is important to ethtool even BEFORE open */
3223         netif_carrier_off(netdev);
3224
3225         if (handle->flags & HNAE3_SUPPORT_VF)
3226                 handle->reset_level = HNAE3_VF_RESET;
3227         else
3228                 handle->reset_level = HNAE3_FUNC_RESET;
3229
3230         ret = hns3_get_ring_config(priv);
3231         if (ret) {
3232                 ret = -ENOMEM;
3233                 goto out_get_ring_cfg;
3234         }
3235
3236         ret = hns3_nic_alloc_vector_data(priv);
3237         if (ret) {
3238                 ret = -ENOMEM;
3239                 goto out_alloc_vector_data;
3240         }
3241
3242         ret = hns3_nic_init_vector_data(priv);
3243         if (ret) {
3244                 ret = -ENOMEM;
3245                 goto out_init_vector_data;
3246         }
3247
3248         ret = hns3_init_all_ring(priv);
3249         if (ret) {
3250                 ret = -ENOMEM;
3251                 goto out_init_ring_data;
3252         }
3253
3254         ret = register_netdev(netdev);
3255         if (ret) {
3256                 dev_err(priv->dev, "probe register netdev fail!\n");
3257                 goto out_reg_netdev_fail;
3258         }
3259
3260         hns3_dcbnl_setup(handle);
3261
3262         /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3263         netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3264
3265         return ret;
3266
3267 out_reg_netdev_fail:
3268 out_init_ring_data:
3269         (void)hns3_nic_uninit_vector_data(priv);
3270 out_init_vector_data:
3271         hns3_nic_dealloc_vector_data(priv);
3272 out_alloc_vector_data:
3273         priv->ring_data = NULL;
3274 out_get_ring_cfg:
3275         priv->ae_handle = NULL;
3276         free_netdev(netdev);
3277         return ret;
3278 }
3279
3280 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3281 {
3282         struct net_device *netdev = handle->kinfo.netdev;
3283         struct hns3_nic_priv *priv = netdev_priv(netdev);
3284         int ret;
3285
3286         if (netdev->reg_state != NETREG_UNINITIALIZED)
3287                 unregister_netdev(netdev);
3288
3289         hns3_force_clear_all_rx_ring(handle);
3290
3291         ret = hns3_nic_uninit_vector_data(priv);
3292         if (ret)
3293                 netdev_err(netdev, "uninit vector error\n");
3294
3295         ret = hns3_nic_dealloc_vector_data(priv);
3296         if (ret)
3297                 netdev_err(netdev, "dealloc vector error\n");
3298
3299         ret = hns3_uninit_all_ring(priv);
3300         if (ret)
3301                 netdev_err(netdev, "uninit ring error\n");
3302
3303         hns3_put_ring_config(priv);
3304
3305         priv->ring_data = NULL;
3306
3307         hns3_uninit_mac_addr(netdev);
3308
3309         free_netdev(netdev);
3310 }
3311
3312 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3313 {
3314         struct net_device *netdev = handle->kinfo.netdev;
3315
3316         if (!netdev)
3317                 return;
3318
3319         if (linkup) {
3320                 netif_carrier_on(netdev);
3321                 netif_tx_wake_all_queues(netdev);
3322                 netdev_info(netdev, "link up\n");
3323         } else {
3324                 netif_carrier_off(netdev);
3325                 netif_tx_stop_all_queues(netdev);
3326                 netdev_info(netdev, "link down\n");
3327         }
3328 }
3329
3330 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3331 {
3332         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3333         struct net_device *ndev = kinfo->netdev;
3334         bool if_running;
3335         int ret;
3336
3337         if (tc > HNAE3_MAX_TC)
3338                 return -EINVAL;
3339
3340         if (!ndev)
3341                 return -ENODEV;
3342
3343         if_running = netif_running(ndev);
3344
3345         if (if_running) {
3346                 (void)hns3_nic_net_stop(ndev);
3347                 msleep(100);
3348         }
3349
3350         ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3351                 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3352         if (ret)
3353                 goto err_out;
3354
3355         ret = hns3_nic_set_real_num_queue(ndev);
3356
3357 err_out:
3358         if (if_running)
3359                 (void)hns3_nic_net_open(ndev);
3360
3361         return ret;
3362 }
3363
3364 static void hns3_recover_hw_addr(struct net_device *ndev)
3365 {
3366         struct netdev_hw_addr_list *list;
3367         struct netdev_hw_addr *ha, *tmp;
3368
3369         /* go through and sync uc_addr entries to the device */
3370         list = &ndev->uc;
3371         list_for_each_entry_safe(ha, tmp, &list->list, list)
3372                 hns3_nic_uc_sync(ndev, ha->addr);
3373
3374         /* go through and sync mc_addr entries to the device */
3375         list = &ndev->mc;
3376         list_for_each_entry_safe(ha, tmp, &list->list, list)
3377                 hns3_nic_mc_sync(ndev, ha->addr);
3378 }
3379
3380 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3381 {
3382         while (ring->next_to_clean != ring->next_to_use) {
3383                 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3384                 hns3_free_buffer_detach(ring, ring->next_to_clean);
3385                 ring_ptr_move_fw(ring, next_to_clean);
3386         }
3387 }
3388
3389 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3390 {
3391         struct hns3_desc_cb res_cbs;
3392         int ret;
3393
3394         while (ring->next_to_use != ring->next_to_clean) {
3395                 /* When a buffer is not reused, it's memory has been
3396                  * freed in hns3_handle_rx_bd or will be freed by
3397                  * stack, so we need to replace the buffer here.
3398                  */
3399                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3400                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
3401                         if (ret) {
3402                                 u64_stats_update_begin(&ring->syncp);
3403                                 ring->stats.sw_err_cnt++;
3404                                 u64_stats_update_end(&ring->syncp);
3405                                 /* if alloc new buffer fail, exit directly
3406                                  * and reclear in up flow.
3407                                  */
3408                                 netdev_warn(ring->tqp->handle->kinfo.netdev,
3409                                             "reserve buffer map failed, ret = %d\n",
3410                                             ret);
3411                                 return ret;
3412                         }
3413                         hns3_replace_buffer(ring, ring->next_to_use,
3414                                             &res_cbs);
3415                 }
3416                 ring_ptr_move_fw(ring, next_to_use);
3417         }
3418
3419         return 0;
3420 }
3421
3422 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
3423 {
3424         while (ring->next_to_use != ring->next_to_clean) {
3425                 /* When a buffer is not reused, it's memory has been
3426                  * freed in hns3_handle_rx_bd or will be freed by
3427                  * stack, so only need to unmap the buffer here.
3428                  */
3429                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3430                         hns3_unmap_buffer(ring,
3431                                           &ring->desc_cb[ring->next_to_use]);
3432                         ring->desc_cb[ring->next_to_use].dma = 0;
3433                 }
3434
3435                 ring_ptr_move_fw(ring, next_to_use);
3436         }
3437 }
3438
3439 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3440 {
3441         struct net_device *ndev = h->kinfo.netdev;
3442         struct hns3_nic_priv *priv = netdev_priv(ndev);
3443         struct hns3_enet_ring *ring;
3444         u32 i;
3445
3446         for (i = 0; i < h->kinfo.num_tqps; i++) {
3447                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3448                 hns3_force_clear_rx_ring(ring);
3449         }
3450 }
3451
3452 static void hns3_clear_all_ring(struct hnae3_handle *h)
3453 {
3454         struct net_device *ndev = h->kinfo.netdev;
3455         struct hns3_nic_priv *priv = netdev_priv(ndev);
3456         u32 i;
3457
3458         for (i = 0; i < h->kinfo.num_tqps; i++) {
3459                 struct netdev_queue *dev_queue;
3460                 struct hns3_enet_ring *ring;
3461
3462                 ring = priv->ring_data[i].ring;
3463                 hns3_clear_tx_ring(ring);
3464                 dev_queue = netdev_get_tx_queue(ndev,
3465                                                 priv->ring_data[i].queue_index);
3466                 netdev_tx_reset_queue(dev_queue);
3467
3468                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3469                 /* Continue to clear other rings even if clearing some
3470                  * rings failed.
3471                  */
3472                 hns3_clear_rx_ring(ring);
3473         }
3474 }
3475
3476 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3477 {
3478         struct net_device *ndev = h->kinfo.netdev;
3479         struct hns3_nic_priv *priv = netdev_priv(ndev);
3480         struct hns3_enet_ring *rx_ring;
3481         int i, j;
3482         int ret;
3483
3484         for (i = 0; i < h->kinfo.num_tqps; i++) {
3485                 h->ae_algo->ops->reset_queue(h, i);
3486                 hns3_init_ring_hw(priv->ring_data[i].ring);
3487
3488                 /* We need to clear tx ring here because self test will
3489                  * use the ring and will not run down before up
3490                  */
3491                 hns3_clear_tx_ring(priv->ring_data[i].ring);
3492                 priv->ring_data[i].ring->next_to_clean = 0;
3493                 priv->ring_data[i].ring->next_to_use = 0;
3494
3495                 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3496                 hns3_init_ring_hw(rx_ring);
3497                 ret = hns3_clear_rx_ring(rx_ring);
3498                 if (ret)
3499                         return ret;
3500
3501                 /* We can not know the hardware head and tail when this
3502                  * function is called in reset flow, so we reuse all desc.
3503                  */
3504                 for (j = 0; j < rx_ring->desc_num; j++)
3505                         hns3_reuse_buffer(rx_ring, j);
3506
3507                 rx_ring->next_to_clean = 0;
3508                 rx_ring->next_to_use = 0;
3509         }
3510
3511         hns3_init_tx_ring_tc(priv);
3512
3513         return 0;
3514 }
3515
3516 static void hns3_store_coal(struct hns3_nic_priv *priv)
3517 {
3518         /* ethtool only support setting and querying one coal
3519          * configuation for now, so save the vector 0' coal
3520          * configuation here in order to restore it.
3521          */
3522         memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
3523                sizeof(struct hns3_enet_coalesce));
3524         memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
3525                sizeof(struct hns3_enet_coalesce));
3526 }
3527
3528 static void hns3_restore_coal(struct hns3_nic_priv *priv)
3529 {
3530         u16 vector_num = priv->vector_num;
3531         int i;
3532
3533         for (i = 0; i < vector_num; i++) {
3534                 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
3535                        sizeof(struct hns3_enet_coalesce));
3536                 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
3537                        sizeof(struct hns3_enet_coalesce));
3538         }
3539 }
3540
3541 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3542 {
3543         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3544         struct net_device *ndev = kinfo->netdev;
3545
3546         if (!netif_running(ndev))
3547                 return 0;
3548
3549         return hns3_nic_net_stop(ndev);
3550 }
3551
3552 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3553 {
3554         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3555         int ret = 0;
3556
3557         if (netif_running(kinfo->netdev)) {
3558                 ret = hns3_nic_net_up(kinfo->netdev);
3559                 if (ret) {
3560                         netdev_err(kinfo->netdev,
3561                                    "hns net up fail, ret=%d!\n", ret);
3562                         return ret;
3563                 }
3564                 handle->last_reset_time = jiffies;
3565         }
3566
3567         return ret;
3568 }
3569
3570 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3571 {
3572         struct net_device *netdev = handle->kinfo.netdev;
3573         struct hns3_nic_priv *priv = netdev_priv(netdev);
3574         int ret;
3575
3576         hns3_init_mac_addr(netdev, false);
3577         hns3_nic_set_rx_mode(netdev);
3578         hns3_recover_hw_addr(netdev);
3579
3580         /* Hardware table is only clear when pf resets */
3581         if (!(handle->flags & HNAE3_SUPPORT_VF))
3582                 hns3_restore_vlan(netdev);
3583
3584         hns3_restore_fd_rules(netdev);
3585
3586         /* Carrier off reporting is important to ethtool even BEFORE open */
3587         netif_carrier_off(netdev);
3588
3589         hns3_restore_coal(priv);
3590
3591         ret = hns3_nic_init_vector_data(priv);
3592         if (ret)
3593                 return ret;
3594
3595         ret = hns3_init_all_ring(priv);
3596         if (ret) {
3597                 hns3_nic_uninit_vector_data(priv);
3598                 priv->ring_data = NULL;
3599         }
3600
3601         return ret;
3602 }
3603
3604 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3605 {
3606         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
3607         struct net_device *netdev = handle->kinfo.netdev;
3608         struct hns3_nic_priv *priv = netdev_priv(netdev);
3609         int ret;
3610
3611         hns3_force_clear_all_rx_ring(handle);
3612
3613         ret = hns3_nic_uninit_vector_data(priv);
3614         if (ret) {
3615                 netdev_err(netdev, "uninit vector error\n");
3616                 return ret;
3617         }
3618
3619         hns3_store_coal(priv);
3620
3621         ret = hns3_uninit_all_ring(priv);
3622         if (ret)
3623                 netdev_err(netdev, "uninit ring error\n");
3624
3625         hns3_uninit_mac_addr(netdev);
3626
3627         /* it is cumbersome for hardware to pick-and-choose rules for deletion
3628          * from TCAM. Hence, for function reset software intervention is
3629          * required to delete the rules
3630          */
3631         if (hns3_dev_ongoing_func_reset(ae_dev))
3632                 hns3_del_all_fd_rules(netdev, false);
3633
3634         return ret;
3635 }
3636
3637 static int hns3_reset_notify(struct hnae3_handle *handle,
3638                              enum hnae3_reset_notify_type type)
3639 {
3640         int ret = 0;
3641
3642         switch (type) {
3643         case HNAE3_UP_CLIENT:
3644                 ret = hns3_reset_notify_up_enet(handle);
3645                 break;
3646         case HNAE3_DOWN_CLIENT:
3647                 ret = hns3_reset_notify_down_enet(handle);
3648                 break;
3649         case HNAE3_INIT_CLIENT:
3650                 ret = hns3_reset_notify_init_enet(handle);
3651                 break;
3652         case HNAE3_UNINIT_CLIENT:
3653                 ret = hns3_reset_notify_uninit_enet(handle);
3654                 break;
3655         default:
3656                 break;
3657         }
3658
3659         return ret;
3660 }
3661
3662 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
3663 {
3664         struct hns3_nic_priv *priv = netdev_priv(netdev);
3665         struct hnae3_handle *h = hns3_get_handle(netdev);
3666         int ret;
3667
3668         ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3669         if (ret)
3670                 return ret;
3671
3672         ret = hns3_get_ring_config(priv);
3673         if (ret)
3674                 return ret;
3675
3676         ret = hns3_nic_alloc_vector_data(priv);
3677         if (ret)
3678                 goto err_alloc_vector;
3679
3680         hns3_restore_coal(priv);
3681
3682         ret = hns3_nic_init_vector_data(priv);
3683         if (ret)
3684                 goto err_uninit_vector;
3685
3686         ret = hns3_init_all_ring(priv);
3687         if (ret)
3688                 goto err_put_ring;
3689
3690         return 0;
3691
3692 err_put_ring:
3693         hns3_put_ring_config(priv);
3694 err_uninit_vector:
3695         hns3_nic_uninit_vector_data(priv);
3696 err_alloc_vector:
3697         hns3_nic_dealloc_vector_data(priv);
3698         return ret;
3699 }
3700
3701 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3702 {
3703         return (new_tqp_num / num_tc) * num_tc;
3704 }
3705
3706 int hns3_set_channels(struct net_device *netdev,
3707                       struct ethtool_channels *ch)
3708 {
3709         struct hns3_nic_priv *priv = netdev_priv(netdev);
3710         struct hnae3_handle *h = hns3_get_handle(netdev);
3711         struct hnae3_knic_private_info *kinfo = &h->kinfo;
3712         bool if_running = netif_running(netdev);
3713         u32 new_tqp_num = ch->combined_count;
3714         u16 org_tqp_num;
3715         int ret;
3716
3717         if (ch->rx_count || ch->tx_count)
3718                 return -EINVAL;
3719
3720         if (new_tqp_num > hns3_get_max_available_channels(h) ||
3721             new_tqp_num < kinfo->num_tc) {
3722                 dev_err(&netdev->dev,
3723                         "Change tqps fail, the tqp range is from %d to %d",
3724                         kinfo->num_tc,
3725                         hns3_get_max_available_channels(h));
3726                 return -EINVAL;
3727         }
3728
3729         new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3730         if (kinfo->num_tqps == new_tqp_num)
3731                 return 0;
3732
3733         if (if_running)
3734                 hns3_nic_net_stop(netdev);
3735
3736         ret = hns3_nic_uninit_vector_data(priv);
3737         if (ret) {
3738                 dev_err(&netdev->dev,
3739                         "Unbind vector with tqp fail, nothing is changed");
3740                 goto open_netdev;
3741         }
3742
3743         hns3_store_coal(priv);
3744
3745         hns3_nic_dealloc_vector_data(priv);
3746
3747         hns3_uninit_all_ring(priv);
3748         hns3_put_ring_config(priv);
3749
3750         org_tqp_num = h->kinfo.num_tqps;
3751         ret = hns3_modify_tqp_num(netdev, new_tqp_num);
3752         if (ret) {
3753                 ret = hns3_modify_tqp_num(netdev, org_tqp_num);
3754                 if (ret) {
3755                         /* If revert to old tqp failed, fatal error occurred */
3756                         dev_err(&netdev->dev,
3757                                 "Revert to old tqp num fail, ret=%d", ret);
3758                         return ret;
3759                 }
3760                 dev_info(&netdev->dev,
3761                          "Change tqp num fail, Revert to old tqp num");
3762         }
3763
3764 open_netdev:
3765         if (if_running)
3766                 hns3_nic_net_open(netdev);
3767
3768         return ret;
3769 }
3770
3771 static const struct hnae3_client_ops client_ops = {
3772         .init_instance = hns3_client_init,
3773         .uninit_instance = hns3_client_uninit,
3774         .link_status_change = hns3_link_status_change,
3775         .setup_tc = hns3_client_setup_tc,
3776         .reset_notify = hns3_reset_notify,
3777 };
3778
3779 /* hns3_init_module - Driver registration routine
3780  * hns3_init_module is the first routine called when the driver is
3781  * loaded. All it does is register with the PCI subsystem.
3782  */
3783 static int __init hns3_init_module(void)
3784 {
3785         int ret;
3786
3787         pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3788         pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3789
3790         client.type = HNAE3_CLIENT_KNIC;
3791         snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3792                  hns3_driver_name);
3793
3794         client.ops = &client_ops;
3795
3796         INIT_LIST_HEAD(&client.node);
3797
3798         ret = hnae3_register_client(&client);
3799         if (ret)
3800                 return ret;
3801
3802         ret = pci_register_driver(&hns3_driver);
3803         if (ret)
3804                 hnae3_unregister_client(&client);
3805
3806         return ret;
3807 }
3808 module_init(hns3_init_module);
3809
3810 /* hns3_exit_module - Driver exit cleanup routine
3811  * hns3_exit_module is called just before the driver is removed
3812  * from memory.
3813  */
3814 static void __exit hns3_exit_module(void)
3815 {
3816         pci_unregister_driver(&hns3_driver);
3817         hnae3_unregister_client(&client);
3818 }
3819 module_exit(hns3_exit_module);
3820
3821 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3822 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3823 MODULE_LICENSE("GPL");
3824 MODULE_ALIAS("pci:hns-nic");
3825 MODULE_VERSION(HNS3_MOD_VERSION);