2 * Huawei HiNIC PCI Express Linux driver
3 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/u64_stats_sync.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/pci.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/skbuff.h>
27 #include <linux/smp.h>
28 #include <asm/byteorder.h>
30 #include <linux/tcp.h>
31 #include <linux/sctp.h>
32 #include <linux/ipv6.h>
34 #include <net/checksum.h>
35 #include <net/ip6_checksum.h>
37 #include "hinic_common.h"
38 #include "hinic_hw_if.h"
39 #include "hinic_hw_wqe.h"
40 #include "hinic_hw_wq.h"
41 #include "hinic_hw_qp.h"
42 #include "hinic_hw_dev.h"
43 #include "hinic_dev.h"
46 #define TX_IRQ_NO_PENDING 0
47 #define TX_IRQ_NO_COALESC 0
48 #define TX_IRQ_NO_LLI_TIMER 0
49 #define TX_IRQ_NO_CREDIT 0
50 #define TX_IRQ_NO_RESEND_TIMER 0
52 #define CI_UPDATE_NO_PENDING 0
53 #define CI_UPDATE_NO_COALESC 0
55 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
57 #define MIN_SKB_LEN 17
59 #define MAX_PAYLOAD_OFFSET 221
60 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
74 enum hinic_offload_type {
75 TX_OFFLOAD_TSO = BIT(0),
76 TX_OFFLOAD_CSUM = BIT(1),
77 TX_OFFLOAD_VLAN = BIT(2),
78 TX_OFFLOAD_INVALID = BIT(3),
82 * hinic_txq_clean_stats - Clean the statistics of specific queue
83 * @txq: Logical Tx Queue
85 void hinic_txq_clean_stats(struct hinic_txq *txq)
87 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
89 u64_stats_update_begin(&txq_stats->syncp);
92 txq_stats->tx_busy = 0;
93 txq_stats->tx_wake = 0;
94 txq_stats->tx_dropped = 0;
95 u64_stats_update_end(&txq_stats->syncp);
99 * hinic_txq_get_stats - get statistics of Tx Queue
100 * @txq: Logical Tx Queue
101 * @stats: return updated stats here
103 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
105 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
108 u64_stats_update_begin(&stats->syncp);
110 start = u64_stats_fetch_begin(&txq_stats->syncp);
111 stats->pkts = txq_stats->pkts;
112 stats->bytes = txq_stats->bytes;
113 stats->tx_busy = txq_stats->tx_busy;
114 stats->tx_wake = txq_stats->tx_wake;
115 stats->tx_dropped = txq_stats->tx_dropped;
116 } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
117 u64_stats_update_end(&stats->syncp);
121 * txq_stats_init - Initialize the statistics of specific queue
122 * @txq: Logical Tx Queue
124 static void txq_stats_init(struct hinic_txq *txq)
126 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
128 u64_stats_init(&txq_stats->syncp);
129 hinic_txq_clean_stats(txq);
133 * tx_map_skb - dma mapping for skb and return sges
134 * @nic_dev: nic device
136 * @sges: returned sges
138 * Return 0 - Success, negative - Failure
140 static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
141 struct hinic_sge *sges)
143 struct hinic_hwdev *hwdev = nic_dev->hwdev;
144 struct hinic_hwif *hwif = hwdev->hwif;
145 struct pci_dev *pdev = hwif->pdev;
146 struct skb_frag_struct *frag;
150 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
152 if (dma_mapping_error(&pdev->dev, dma_addr)) {
153 dev_err(&pdev->dev, "Failed to map Tx skb data\n");
157 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
159 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
160 frag = &skb_shinfo(skb)->frags[i];
162 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
165 if (dma_mapping_error(&pdev->dev, dma_addr)) {
166 dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
170 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
176 for (j = 0; j < i; j++)
177 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
178 sges[j + 1].len, DMA_TO_DEVICE);
180 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
186 * tx_unmap_skb - unmap the dma address of the skb
187 * @nic_dev: nic device
189 * @sges: the sges that are connected to the skb
191 static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
192 struct hinic_sge *sges)
194 struct hinic_hwdev *hwdev = nic_dev->hwdev;
195 struct hinic_hwif *hwif = hwdev->hwif;
196 struct pci_dev *pdev = hwif->pdev;
199 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
200 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
201 sges[i + 1].len, DMA_TO_DEVICE);
203 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
207 static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
209 enum hinic_offload_type offload_type,
210 enum hinic_l3_offload_type *l3_type,
215 if (ip->v4->version == 4) {
216 *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
217 IPV4_PKT_NO_CHKSUM_OFFLOAD :
218 IPV4_PKT_WITH_CHKSUM_OFFLOAD;
219 *l4_proto = ip->v4->protocol;
220 } else if (ip->v4->version == 6) {
222 exthdr = ip->hdr + sizeof(*ip->v6);
223 *l4_proto = ip->v6->nexthdr;
224 if (exthdr != l4->hdr) {
225 int start = exthdr - skb->data;
228 ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
231 *l3_type = L3TYPE_UNKNOWN;
236 static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
237 enum hinic_offload_type offload_type, u8 l4_proto,
238 enum hinic_l4_offload_type *l4_offload,
239 u32 *l4_len, u32 *offset)
241 *l4_offload = OFFLOAD_DISABLE;
247 *l4_offload = TCP_OFFLOAD_ENABLE;
248 /* doff in unit of 4B */
249 *l4_len = l4->tcp->doff * 4;
250 *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
254 *l4_offload = UDP_OFFLOAD_ENABLE;
255 *l4_len = sizeof(struct udphdr);
256 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
260 /* only csum offload support sctp */
261 if (offload_type != TX_OFFLOAD_CSUM)
264 *l4_offload = SCTP_OFFLOAD_ENABLE;
265 *l4_len = sizeof(struct sctphdr);
266 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
274 static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
276 return (ip->v4->version == 4) ?
277 csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
278 csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
281 static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
284 u32 offset, l4_len, ip_identify, network_hdr_len;
285 enum hinic_l3_offload_type l3_offload;
286 enum hinic_l4_offload_type l4_offload;
291 if (!skb_is_gso(skb))
294 if (skb_cow_head(skb, 0) < 0)
295 return -EPROTONOSUPPORT;
297 if (skb->encapsulation) {
298 u32 gso_type = skb_shinfo(skb)->gso_type;
302 ip.hdr = skb_network_header(skb);
303 l4.hdr = skb_transport_header(skb);
304 network_hdr_len = skb_inner_network_header_len(skb);
306 if (ip.v4->version == 4) {
308 l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
309 } else if (ip.v4->version == 6) {
310 l3_offload = IPV6_PKT;
315 hinic_task_set_outter_l3(task, l3_offload,
316 skb_network_header_len(skb));
318 if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
319 l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
320 tunnel_type = TUNNEL_UDP_CSUM;
321 } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
322 tunnel_type = TUNNEL_UDP_NO_CSUM;
325 l4_tunnel_len = skb_inner_network_offset(skb) -
326 skb_transport_offset(skb);
327 hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
329 ip.hdr = skb_inner_network_header(skb);
330 l4.hdr = skb_inner_transport_header(skb);
332 ip.hdr = skb_network_header(skb);
333 l4.hdr = skb_transport_header(skb);
334 network_hdr_len = skb_network_header_len(skb);
337 /* initialize inner IP header fields */
338 if (ip.v4->version == 4)
341 ip.v6->payload_len = 0;
343 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
346 hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
349 if (l4_proto == IPPROTO_TCP)
350 l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
352 get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
355 hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
356 ip_identify, skb_shinfo(skb)->gso_size);
361 static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
364 enum hinic_l4_offload_type l4_offload;
365 u32 offset, l4_len, network_hdr_len;
366 enum hinic_l3_offload_type l3_type;
371 if (skb->ip_summed != CHECKSUM_PARTIAL)
374 if (skb->encapsulation) {
377 ip.hdr = skb_network_header(skb);
379 if (ip.v4->version == 4)
380 l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
381 else if (ip.v4->version == 6)
384 l3_type = L3TYPE_UNKNOWN;
386 hinic_task_set_outter_l3(task, l3_type,
387 skb_network_header_len(skb));
389 l4_tunnel_len = skb_inner_network_offset(skb) -
390 skb_transport_offset(skb);
392 hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
395 ip.hdr = skb_inner_network_header(skb);
396 l4.hdr = skb_inner_transport_header(skb);
397 network_hdr_len = skb_inner_network_header_len(skb);
399 ip.hdr = skb_network_header(skb);
400 l4.hdr = skb_transport_header(skb);
401 network_hdr_len = skb_network_header_len(skb);
404 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
407 hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
409 get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
412 hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
417 static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
420 enum hinic_offload_type offload = 0;
423 enabled = offload_tso(task, queue_info, skb);
425 offload |= TX_OFFLOAD_TSO;
426 } else if (enabled == 0) {
427 enabled = offload_csum(task, queue_info, skb);
429 offload |= TX_OFFLOAD_CSUM;
431 return -EPROTONOSUPPORT;
435 hinic_task_set_l2hdr(task, skb_network_offset(skb));
437 /* payload offset should not more than 221 */
438 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
439 MAX_PAYLOAD_OFFSET) {
440 return -EPROTONOSUPPORT;
443 /* mss should not less than 80 */
444 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
445 *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
446 *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
452 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
454 struct hinic_dev *nic_dev = netdev_priv(netdev);
455 u16 prod_idx, q_id = skb->queue_mapping;
456 struct netdev_queue *netdev_txq;
457 int nr_sges, err = NETDEV_TX_OK;
458 struct hinic_sq_wqe *sq_wqe;
459 unsigned int wqe_size;
460 struct hinic_txq *txq;
463 txq = &nic_dev->txqs[q_id];
464 qp = container_of(txq->sq, struct hinic_qp, sq);
466 if (skb->len < MIN_SKB_LEN) {
467 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
468 netdev_err(netdev, "Failed to pad skb\n");
469 goto update_error_stats;
472 skb->len = MIN_SKB_LEN;
475 nr_sges = skb_shinfo(skb)->nr_frags + 1;
476 if (nr_sges > txq->max_sges) {
477 netdev_err(netdev, "Too many Tx sges\n");
481 err = tx_map_skb(nic_dev, skb, txq->sges);
485 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
487 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
489 netif_stop_subqueue(netdev, qp->q_id);
491 /* Check for the case free_tx_poll is called in another cpu
492 * and we stopped the subqueue after free_tx_poll check.
494 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
496 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
500 tx_unmap_skb(nic_dev, skb, txq->sges);
502 u64_stats_update_begin(&txq->txq_stats.syncp);
503 txq->txq_stats.tx_busy++;
504 u64_stats_update_end(&txq->txq_stats.syncp);
505 err = NETDEV_TX_BUSY;
511 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
513 err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
517 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
520 netdev_txq = netdev_get_tx_queue(netdev, q_id);
521 if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
522 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
527 hinic_sq_return_wqe(txq->sq, wqe_size);
528 tx_unmap_skb(nic_dev, skb, txq->sges);
531 dev_kfree_skb_any(skb);
534 u64_stats_update_begin(&txq->txq_stats.syncp);
535 txq->txq_stats.tx_dropped++;
536 u64_stats_update_end(&txq->txq_stats.syncp);
542 * tx_free_skb - unmap and free skb
543 * @nic_dev: nic device
545 * @sges: the sges that are connected to the skb
547 static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
548 struct hinic_sge *sges)
550 tx_unmap_skb(nic_dev, skb, sges);
552 dev_kfree_skb_any(skb);
556 * free_all_rx_skbs - free all skbs in tx queue
559 static void free_all_tx_skbs(struct hinic_txq *txq)
561 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
562 struct hinic_sq *sq = txq->sq;
563 struct hinic_sq_wqe *sq_wqe;
564 unsigned int wqe_size;
569 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
570 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
574 nr_sges = skb_shinfo(skb)->nr_frags + 1;
576 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
578 hinic_sq_put_wqe(sq, wqe_size);
580 tx_free_skb(nic_dev, skb, txq->free_sges);
585 * free_tx_poll - free finished tx skbs in tx queue that connected to napi
587 * @budget: number of tx
589 * Return 0 - Success, negative - Failure
591 static int free_tx_poll(struct napi_struct *napi, int budget)
593 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
594 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
595 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
596 struct netdev_queue *netdev_txq;
597 struct hinic_sq *sq = txq->sq;
598 struct hinic_wq *wq = sq->wq;
599 struct hinic_sq_wqe *sq_wqe;
600 unsigned int wqe_size;
601 int nr_sges, pkts = 0;
607 hw_ci = HW_CONS_IDX(sq) & wq->mask;
609 /* Reading a WQEBB to get real WQE size and consumer index. */
610 sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
612 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
615 /* If this WQE have multiple WQEBBs, we will read again to get
618 if (wqe_size > wq->wqebb_size) {
619 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
620 if (unlikely(!sq_wqe))
624 tx_bytes += skb->len;
627 nr_sges = skb_shinfo(skb)->nr_frags + 1;
629 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
631 hinic_sq_put_wqe(sq, wqe_size);
633 tx_free_skb(nic_dev, skb, txq->free_sges);
634 } while (pkts < budget);
636 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
637 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
638 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
640 __netif_tx_lock(netdev_txq, smp_processor_id());
642 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
644 __netif_tx_unlock(netdev_txq);
646 u64_stats_update_begin(&txq->txq_stats.syncp);
647 txq->txq_stats.tx_wake++;
648 u64_stats_update_end(&txq->txq_stats.syncp);
651 u64_stats_update_begin(&txq->txq_stats.syncp);
652 txq->txq_stats.bytes += tx_bytes;
653 txq->txq_stats.pkts += pkts;
654 u64_stats_update_end(&txq->txq_stats.syncp);
658 hinic_hwdev_set_msix_state(nic_dev->hwdev,
667 static void tx_napi_add(struct hinic_txq *txq, int weight)
669 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
670 napi_enable(&txq->napi);
673 static void tx_napi_del(struct hinic_txq *txq)
675 napi_disable(&txq->napi);
676 netif_napi_del(&txq->napi);
679 static irqreturn_t tx_irq(int irq, void *data)
681 struct hinic_txq *txq = data;
682 struct hinic_dev *nic_dev;
684 nic_dev = netdev_priv(txq->netdev);
686 /* Disable the interrupt until napi will be completed */
687 hinic_hwdev_set_msix_state(nic_dev->hwdev,
691 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
693 napi_schedule(&txq->napi);
697 static int tx_request_irq(struct hinic_txq *txq)
699 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
700 struct hinic_hwdev *hwdev = nic_dev->hwdev;
701 struct hinic_hwif *hwif = hwdev->hwif;
702 struct pci_dev *pdev = hwif->pdev;
703 struct hinic_sq *sq = txq->sq;
706 tx_napi_add(txq, nic_dev->tx_weight);
708 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
709 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
710 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
711 TX_IRQ_NO_RESEND_TIMER);
713 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
715 dev_err(&pdev->dev, "Failed to request Tx irq\n");
723 static void tx_free_irq(struct hinic_txq *txq)
725 struct hinic_sq *sq = txq->sq;
727 free_irq(sq->irq, txq);
732 * hinic_init_txq - Initialize the Tx Queue
733 * @txq: Logical Tx Queue
734 * @sq: Hardware Tx Queue to connect the Logical queue with
735 * @netdev: network device to connect the Logical queue with
737 * Return 0 - Success, negative - Failure
739 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
740 struct net_device *netdev)
742 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
743 struct hinic_dev *nic_dev = netdev_priv(netdev);
744 struct hinic_hwdev *hwdev = nic_dev->hwdev;
745 int err, irqname_len;
748 txq->netdev = netdev;
753 txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
755 sges_size = txq->max_sges * sizeof(*txq->sges);
756 txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
760 sges_size = txq->max_sges * sizeof(*txq->free_sges);
761 txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
762 if (!txq->free_sges) {
764 goto err_alloc_free_sges;
767 irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
768 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
769 if (!txq->irq_name) {
771 goto err_alloc_irqname;
774 sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
776 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
777 CI_UPDATE_NO_COALESC);
781 err = tx_request_irq(txq);
783 netdev_err(netdev, "Failed to request Tx irq\n");
791 devm_kfree(&netdev->dev, txq->irq_name);
794 devm_kfree(&netdev->dev, txq->free_sges);
797 devm_kfree(&netdev->dev, txq->sges);
802 * hinic_clean_txq - Clean the Tx Queue
803 * @txq: Logical Tx Queue
805 void hinic_clean_txq(struct hinic_txq *txq)
807 struct net_device *netdev = txq->netdev;
811 free_all_tx_skbs(txq);
813 devm_kfree(&netdev->dev, txq->irq_name);
814 devm_kfree(&netdev->dev, txq->free_sges);
815 devm_kfree(&netdev->dev, txq->sges);