1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* The driver transmit and receive code */
6 #include <linux/prefetch.h>
10 #define ICE_RX_HDR_SIZE 256
13 * ice_unmap_and_free_tx_buf - Release a Tx buffer
14 * @ring: the ring that owns the buffer
15 * @tx_buf: the buffer to free
18 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
21 dev_kfree_skb_any(tx_buf->skb);
22 if (dma_unmap_len(tx_buf, len))
23 dma_unmap_single(ring->dev,
24 dma_unmap_addr(tx_buf, dma),
25 dma_unmap_len(tx_buf, len),
27 } else if (dma_unmap_len(tx_buf, len)) {
28 dma_unmap_page(ring->dev,
29 dma_unmap_addr(tx_buf, dma),
30 dma_unmap_len(tx_buf, len),
34 tx_buf->next_to_watch = NULL;
36 dma_unmap_len_set(tx_buf, len, 0);
37 /* tx_buf must be completely set up in the transmit path */
40 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
42 return netdev_get_tx_queue(ring->netdev, ring->q_index);
46 * ice_clean_tx_ring - Free any empty Tx buffers
47 * @tx_ring: ring to be cleaned
49 void ice_clean_tx_ring(struct ice_ring *tx_ring)
53 /* ring already cleared, nothing to do */
57 /* Free all the Tx ring sk_bufss */
58 for (i = 0; i < tx_ring->count; i++)
59 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
61 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
63 /* Zero out the descriptor ring */
64 memset(tx_ring->desc, 0, tx_ring->size);
66 tx_ring->next_to_use = 0;
67 tx_ring->next_to_clean = 0;
72 /* cleanup Tx queue statistics */
73 netdev_tx_reset_queue(txring_txq(tx_ring));
77 * ice_free_tx_ring - Free Tx resources per queue
78 * @tx_ring: Tx descriptor ring for a specific queue
80 * Free all transmit software resources
82 void ice_free_tx_ring(struct ice_ring *tx_ring)
84 ice_clean_tx_ring(tx_ring);
85 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
86 tx_ring->tx_buf = NULL;
89 dmam_free_coherent(tx_ring->dev, tx_ring->size,
90 tx_ring->desc, tx_ring->dma);
96 * ice_clean_tx_irq - Reclaim resources after transmit completes
97 * @vsi: the VSI we care about
98 * @tx_ring: Tx ring to clean
99 * @napi_budget: Used to determine if we are in netpoll
101 * Returns true if there's any budget left (e.g. the clean is finished)
103 static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
106 unsigned int total_bytes = 0, total_pkts = 0;
107 unsigned int budget = vsi->work_lmt;
108 s16 i = tx_ring->next_to_clean;
109 struct ice_tx_desc *tx_desc;
110 struct ice_tx_buf *tx_buf;
112 tx_buf = &tx_ring->tx_buf[i];
113 tx_desc = ICE_TX_DESC(tx_ring, i);
117 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
119 /* if next_to_watch is not set then there is no work pending */
123 smp_rmb(); /* prevent any other reads prior to eop_desc */
125 /* if the descriptor isn't done, no work yet to do */
126 if (!(eop_desc->cmd_type_offset_bsz &
127 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
130 /* clear next_to_watch to prevent false hangs */
131 tx_buf->next_to_watch = NULL;
133 /* update the statistics for this packet */
134 total_bytes += tx_buf->bytecount;
135 total_pkts += tx_buf->gso_segs;
138 napi_consume_skb(tx_buf->skb, napi_budget);
140 /* unmap skb header data */
141 dma_unmap_single(tx_ring->dev,
142 dma_unmap_addr(tx_buf, dma),
143 dma_unmap_len(tx_buf, len),
146 /* clear tx_buf data */
148 dma_unmap_len_set(tx_buf, len, 0);
150 /* unmap remaining buffers */
151 while (tx_desc != eop_desc) {
157 tx_buf = tx_ring->tx_buf;
158 tx_desc = ICE_TX_DESC(tx_ring, 0);
161 /* unmap any remaining paged data */
162 if (dma_unmap_len(tx_buf, len)) {
163 dma_unmap_page(tx_ring->dev,
164 dma_unmap_addr(tx_buf, dma),
165 dma_unmap_len(tx_buf, len),
167 dma_unmap_len_set(tx_buf, len, 0);
171 /* move us one more past the eop_desc for start of next pkt */
177 tx_buf = tx_ring->tx_buf;
178 tx_desc = ICE_TX_DESC(tx_ring, 0);
183 /* update budget accounting */
185 } while (likely(budget));
188 tx_ring->next_to_clean = i;
189 u64_stats_update_begin(&tx_ring->syncp);
190 tx_ring->stats.bytes += total_bytes;
191 tx_ring->stats.pkts += total_pkts;
192 u64_stats_update_end(&tx_ring->syncp);
193 tx_ring->q_vector->tx.total_bytes += total_bytes;
194 tx_ring->q_vector->tx.total_pkts += total_pkts;
196 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
199 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
200 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
201 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
202 /* Make sure that anybody stopping the queue after this
203 * sees the new next_to_clean.
206 if (__netif_subqueue_stopped(tx_ring->netdev,
208 !test_bit(__ICE_DOWN, vsi->state)) {
209 netif_wake_subqueue(tx_ring->netdev,
211 ++tx_ring->tx_stats.restart_q;
219 * ice_setup_tx_ring - Allocate the Tx descriptors
220 * @tx_ring: the Tx ring to set up
222 * Return 0 on success, negative on error
224 int ice_setup_tx_ring(struct ice_ring *tx_ring)
226 struct device *dev = tx_ring->dev;
231 /* warn if we are about to overwrite the pointer */
232 WARN_ON(tx_ring->tx_buf);
234 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
236 if (!tx_ring->tx_buf)
239 /* round up to nearest 4K */
240 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
242 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
244 if (!tx_ring->desc) {
245 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
250 tx_ring->next_to_use = 0;
251 tx_ring->next_to_clean = 0;
252 tx_ring->tx_stats.prev_pkt = -1;
256 devm_kfree(dev, tx_ring->tx_buf);
257 tx_ring->tx_buf = NULL;
262 * ice_clean_rx_ring - Free Rx buffers
263 * @rx_ring: ring to be cleaned
265 void ice_clean_rx_ring(struct ice_ring *rx_ring)
267 struct device *dev = rx_ring->dev;
270 /* ring already cleared, nothing to do */
271 if (!rx_ring->rx_buf)
274 /* Free all the Rx ring sk_buffs */
275 for (i = 0; i < rx_ring->count; i++) {
276 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
279 dev_kfree_skb(rx_buf->skb);
285 dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
286 __free_pages(rx_buf->page, 0);
289 rx_buf->page_offset = 0;
292 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
294 /* Zero out the descriptor ring */
295 memset(rx_ring->desc, 0, rx_ring->size);
297 rx_ring->next_to_alloc = 0;
298 rx_ring->next_to_clean = 0;
299 rx_ring->next_to_use = 0;
303 * ice_free_rx_ring - Free Rx resources
304 * @rx_ring: ring to clean the resources from
306 * Free all receive software resources
308 void ice_free_rx_ring(struct ice_ring *rx_ring)
310 ice_clean_rx_ring(rx_ring);
311 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
312 rx_ring->rx_buf = NULL;
315 dmam_free_coherent(rx_ring->dev, rx_ring->size,
316 rx_ring->desc, rx_ring->dma);
317 rx_ring->desc = NULL;
322 * ice_setup_rx_ring - Allocate the Rx descriptors
323 * @rx_ring: the Rx ring to set up
325 * Return 0 on success, negative on error
327 int ice_setup_rx_ring(struct ice_ring *rx_ring)
329 struct device *dev = rx_ring->dev;
334 /* warn if we are about to overwrite the pointer */
335 WARN_ON(rx_ring->rx_buf);
337 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
339 if (!rx_ring->rx_buf)
342 /* round up to nearest 4K */
343 rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
344 rx_ring->size = ALIGN(rx_ring->size, 4096);
345 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
347 if (!rx_ring->desc) {
348 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
353 rx_ring->next_to_use = 0;
354 rx_ring->next_to_clean = 0;
358 devm_kfree(dev, rx_ring->rx_buf);
359 rx_ring->rx_buf = NULL;
364 * ice_release_rx_desc - Store the new tail and head values
365 * @rx_ring: ring to bump
366 * @val: new head index
368 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
370 rx_ring->next_to_use = val;
372 /* update next to alloc since we have filled the ring */
373 rx_ring->next_to_alloc = val;
375 /* Force memory writes to complete before letting h/w
376 * know there are new descriptors to fetch. (Only
377 * applicable for weak-ordered memory model archs,
381 writel(val, rx_ring->tail);
385 * ice_alloc_mapped_page - recycle or make a new page
386 * @rx_ring: ring to use
387 * @bi: rx_buf struct to modify
389 * Returns true if the page was successfully allocated or
392 static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
393 struct ice_rx_buf *bi)
395 struct page *page = bi->page;
398 /* since we are recycling buffers we should seldom need to alloc */
400 rx_ring->rx_stats.page_reuse_count++;
404 /* alloc new page for storage */
405 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
406 if (unlikely(!page)) {
407 rx_ring->rx_stats.alloc_page_failed++;
411 /* map page for use */
412 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
414 /* if mapping failed free memory back to system since
415 * there isn't much point in holding memory we can't use
417 if (dma_mapping_error(rx_ring->dev, dma)) {
418 __free_pages(page, 0);
419 rx_ring->rx_stats.alloc_page_failed++;
431 * ice_alloc_rx_bufs - Replace used receive buffers
432 * @rx_ring: ring to place buffers on
433 * @cleaned_count: number of buffers to replace
435 * Returns false if all allocations were successful, true if any fail
437 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
439 union ice_32b_rx_flex_desc *rx_desc;
440 u16 ntu = rx_ring->next_to_use;
441 struct ice_rx_buf *bi;
443 /* do nothing if no valid netdev defined */
444 if (!rx_ring->netdev || !cleaned_count)
447 /* get the RX descriptor and buffer based on next_to_use */
448 rx_desc = ICE_RX_DESC(rx_ring, ntu);
449 bi = &rx_ring->rx_buf[ntu];
452 if (!ice_alloc_mapped_page(rx_ring, bi))
455 /* Refresh the desc even if buffer_addrs didn't change
456 * because each write-back erases this info.
458 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
463 if (unlikely(ntu == rx_ring->count)) {
464 rx_desc = ICE_RX_DESC(rx_ring, 0);
465 bi = rx_ring->rx_buf;
469 /* clear the status bits for the next_to_use descriptor */
470 rx_desc->wb.status_error0 = 0;
473 } while (cleaned_count);
475 if (rx_ring->next_to_use != ntu)
476 ice_release_rx_desc(rx_ring, ntu);
481 if (rx_ring->next_to_use != ntu)
482 ice_release_rx_desc(rx_ring, ntu);
484 /* make sure to come back via polling to try again after
491 * ice_page_is_reserved - check if reuse is possible
492 * @page: page struct to check
494 static bool ice_page_is_reserved(struct page *page)
496 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
500 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
501 * @rx_buf: buffer containing page to add
502 * @rx_desc: descriptor containing length of buffer written by hardware
503 * @skb: sk_buf to place the data into
505 * This function will add the data contained in rx_buf->page to the skb.
506 * This is done either through a direct copy if the data in the buffer is
507 * less than the skb header size, otherwise it will just attach the page as
510 * The function will then update the page offset if necessary and return
511 * true if the buffer can be reused by the adapter.
513 static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
514 union ice_32b_rx_flex_desc *rx_desc,
517 #if (PAGE_SIZE < 8192)
518 unsigned int truesize = ICE_RXBUF_2048;
520 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
521 unsigned int truesize;
522 #endif /* PAGE_SIZE < 8192) */
527 size = le16_to_cpu(rx_desc->wb.pkt_len) &
528 ICE_RX_FLX_DESC_PKT_LEN_M;
532 #if (PAGE_SIZE >= 8192)
533 truesize = ALIGN(size, L1_CACHE_BYTES);
534 #endif /* PAGE_SIZE >= 8192) */
536 /* will the data fit in the skb we allocated? if so, just
537 * copy it as it is pretty small anyway
539 if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
540 unsigned char *va = page_address(page) + rx_buf->page_offset;
542 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
544 /* page is not reserved, we can reuse buffer as-is */
545 if (likely(!ice_page_is_reserved(page)))
548 /* this page cannot be reused so discard it */
549 __free_pages(page, 0);
553 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
554 rx_buf->page_offset, size, truesize);
556 /* avoid re-using remote pages */
557 if (unlikely(ice_page_is_reserved(page)))
560 #if (PAGE_SIZE < 8192)
561 /* if we are only owner of page we can reuse it */
562 if (unlikely(page_count(page) != 1))
565 /* flip page offset to other buffer */
566 rx_buf->page_offset ^= truesize;
568 /* move offset up to the next cache line */
569 rx_buf->page_offset += truesize;
571 if (rx_buf->page_offset > last_offset)
573 #endif /* PAGE_SIZE < 8192) */
575 /* Even if we own the page, we are not allowed to use atomic_set()
576 * This would break get_page_unless_zero() users.
578 get_page(rx_buf->page);
584 * ice_reuse_rx_page - page flip buffer and store it back on the ring
585 * @rx_ring: Rx descriptor ring to store buffers on
586 * @old_buf: donor buffer to have page reused
588 * Synchronizes page for reuse by the adapter
590 static void ice_reuse_rx_page(struct ice_ring *rx_ring,
591 struct ice_rx_buf *old_buf)
593 u16 nta = rx_ring->next_to_alloc;
594 struct ice_rx_buf *new_buf;
596 new_buf = &rx_ring->rx_buf[nta];
598 /* update, and store next to alloc */
600 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
602 /* transfer page from old buffer to new buffer */
607 * ice_fetch_rx_buf - Allocate skb and populate it
608 * @rx_ring: Rx descriptor ring to transact packets on
609 * @rx_desc: descriptor containing info written by hardware
611 * This function allocates an skb on the fly, and populates it with the page
612 * data from the current receive descriptor, taking care to set up the skb
613 * correctly, as well as handling calling the page recycle function if
616 static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
617 union ice_32b_rx_flex_desc *rx_desc)
619 struct ice_rx_buf *rx_buf;
623 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
630 u8 *page_addr = page_address(page) + rx_buf->page_offset;
632 /* prefetch first cache line of first page */
634 #if L1_CACHE_BYTES < 128
635 prefetch((void *)(page_addr + L1_CACHE_BYTES));
636 #endif /* L1_CACHE_BYTES */
638 /* allocate a skb to store the frags */
639 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
641 GFP_ATOMIC | __GFP_NOWARN);
642 if (unlikely(!skb)) {
643 rx_ring->rx_stats.alloc_buf_failed++;
647 /* we will be copying header into skb->data in
648 * pskb_may_pull so it is in our interest to prefetch
649 * it now to avoid a possible cache miss
651 prefetchw(skb->data);
653 skb_record_rx_queue(skb, rx_ring->q_index);
655 /* we are reusing so sync this buffer for CPU use */
656 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
664 /* pull page into skb */
665 if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
666 /* hand second half of page back to the ring */
667 ice_reuse_rx_page(rx_ring, rx_buf);
668 rx_ring->rx_stats.page_reuse_count++;
670 /* we are not reusing the buffer so unmap it */
671 dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
675 /* clear contents of buffer_info */
682 * ice_pull_tail - ice specific version of skb_pull_tail
683 * @skb: pointer to current skb being adjusted
685 * This function is an ice specific version of __pskb_pull_tail. The
686 * main difference between this version and the original function is that
687 * this function can make several assumptions about the state of things
688 * that allow for significant optimizations versus the standard function.
689 * As a result we can do things like drop a frag and maintain an accurate
690 * truesize for the skb.
692 static void ice_pull_tail(struct sk_buff *skb)
694 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
695 unsigned int pull_len;
698 /* it is valid to use page_address instead of kmap since we are
699 * working with pages allocated out of the lomem pool per
700 * alloc_page(GFP_ATOMIC)
702 va = skb_frag_address(frag);
704 /* we need the header to contain the greater of either ETH_HLEN or
705 * 60 bytes if the skb->len is less than 60 for skb_pad.
707 pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
709 /* align pull length to size of long to optimize memcpy performance */
710 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
712 /* update all of the pointers */
713 skb_frag_size_sub(frag, pull_len);
714 frag->page_offset += pull_len;
715 skb->data_len -= pull_len;
716 skb->tail += pull_len;
720 * ice_cleanup_headers - Correct empty headers
721 * @skb: pointer to current skb being fixed
723 * Also address the case where we are pulling data in on pages only
724 * and as such no data is present in the skb header.
726 * In addition if skb is not at least 60 bytes we need to pad it so that
727 * it is large enough to qualify as a valid Ethernet frame.
729 * Returns true if an error was encountered and skb was freed.
731 static bool ice_cleanup_headers(struct sk_buff *skb)
733 /* place header in linear portion of buffer */
734 if (skb_is_nonlinear(skb))
737 /* if eth_skb_pad returns an error the skb was freed */
738 if (eth_skb_pad(skb))
745 * ice_test_staterr - tests bits in Rx descriptor status and error fields
746 * @rx_desc: pointer to receive descriptor (in le64 format)
747 * @stat_err_bits: value to mask
749 * This function does some fast chicanery in order to return the
750 * value of the mask which is really only used for boolean tests.
751 * The status_error_len doesn't need to be shifted because it begins
754 static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
755 const u16 stat_err_bits)
757 return !!(rx_desc->wb.status_error0 &
758 cpu_to_le16(stat_err_bits));
762 * ice_is_non_eop - process handling of non-EOP buffers
763 * @rx_ring: Rx ring being processed
764 * @rx_desc: Rx descriptor for current buffer
765 * @skb: Current socket buffer containing buffer in progress
767 * This function updates next to clean. If the buffer is an EOP buffer
768 * this function exits returning false, otherwise it will place the
769 * sk_buff in the next buffer to be chained and return true indicating
770 * that this is in fact a non-EOP buffer.
772 static bool ice_is_non_eop(struct ice_ring *rx_ring,
773 union ice_32b_rx_flex_desc *rx_desc,
776 u32 ntc = rx_ring->next_to_clean + 1;
778 /* fetch, update, and store next to clean */
779 ntc = (ntc < rx_ring->count) ? ntc : 0;
780 rx_ring->next_to_clean = ntc;
782 prefetch(ICE_RX_DESC(rx_ring, ntc));
784 /* if we are the last buffer then there is nothing else to do */
785 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
786 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
789 /* place skb in next buffer to be received */
790 rx_ring->rx_buf[ntc].skb = skb;
791 rx_ring->rx_stats.non_eop_descs++;
797 * ice_ptype_to_htype - get a hash type
798 * @ptype: the ptype value from the descriptor
800 * Returns a hash type to be used by skb_set_hash
802 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
804 return PKT_HASH_TYPE_NONE;
808 * ice_rx_hash - set the hash value in the skb
809 * @rx_ring: descriptor ring
810 * @rx_desc: specific descriptor
811 * @skb: pointer to current skb
812 * @rx_ptype: the ptype value from the descriptor
815 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
816 struct sk_buff *skb, u8 rx_ptype)
818 struct ice_32b_rx_flex_desc_nic *nic_mdid;
821 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
824 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
827 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
828 hash = le32_to_cpu(nic_mdid->rss_hash);
829 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
833 * ice_rx_csum - Indicate in skb if checksum is good
834 * @vsi: the VSI we care about
835 * @skb: skb currently being received and modified
836 * @rx_desc: the receive descriptor
837 * @ptype: the packet type decoded by hardware
839 * skb->protocol must be set before this function is called
841 static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
842 union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
844 struct ice_rx_ptype_decoded decoded;
845 u32 rx_error, rx_status;
848 rx_status = le16_to_cpu(rx_desc->wb.status_error0);
849 rx_error = rx_status;
851 decoded = ice_decode_rx_desc_ptype(ptype);
853 /* Start with CHECKSUM_NONE and by default csum_level = 0 */
854 skb->ip_summed = CHECKSUM_NONE;
855 skb_checksum_none_assert(skb);
857 /* check if Rx checksum is enabled */
858 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
861 /* check if HW has decoded the packet and checksum */
862 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
865 if (!(decoded.known && decoded.outer_ip))
868 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
869 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
870 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
871 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
873 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
874 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
876 else if (ipv6 && (rx_status &
877 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
880 /* check for L4 errors and handle packets that were not able to be
881 * checksummed due to arrival speed
883 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
886 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
887 switch (decoded.inner_prot) {
888 case ICE_RX_PTYPE_INNER_PROT_TCP:
889 case ICE_RX_PTYPE_INNER_PROT_UDP:
890 case ICE_RX_PTYPE_INNER_PROT_SCTP:
891 skb->ip_summed = CHECKSUM_UNNECESSARY;
898 vsi->back->hw_csum_rx_error++;
902 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
903 * @rx_ring: Rx descriptor ring packet is being transacted on
904 * @rx_desc: pointer to the EOP Rx descriptor
905 * @skb: pointer to current skb being populated
906 * @ptype: the packet type decoded by hardware
908 * This function checks the ring, descriptor, and packet information in
909 * order to populate the hash, checksum, VLAN, protocol, and
910 * other fields within the skb.
912 static void ice_process_skb_fields(struct ice_ring *rx_ring,
913 union ice_32b_rx_flex_desc *rx_desc,
914 struct sk_buff *skb, u8 ptype)
916 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
918 /* modifies the skb - consumes the enet header */
919 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
921 ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
925 * ice_receive_skb - Send a completed packet up the stack
926 * @rx_ring: Rx ring in play
927 * @skb: packet to send up
928 * @vlan_tag: vlan tag for packet
930 * This function sends the completed packet (via. skb) up the stack using
931 * gro receive functions (with/without vlan tag)
933 static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
936 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
937 (vlan_tag & VLAN_VID_MASK)) {
938 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
940 napi_gro_receive(&rx_ring->q_vector->napi, skb);
944 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
945 * @rx_ring: Rx descriptor ring to transact packets on
946 * @budget: Total limit on number of packets to process
948 * This function provides a "bounce buffer" approach to Rx interrupt
949 * processing. The advantage to this is that on systems that have
950 * expensive overhead for IOMMU access this provides a means of avoiding
951 * it by maintaining the mapping of the page to the system.
953 * Returns amount of work completed
955 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
957 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
958 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
959 bool failure = false;
961 /* start the loop to process RX packets bounded by 'budget' */
962 while (likely(total_rx_pkts < (unsigned int)budget)) {
963 union ice_32b_rx_flex_desc *rx_desc;
969 /* return some buffers to hardware, one at a time is too slow */
970 if (cleaned_count >= ICE_RX_BUF_WRITE) {
972 ice_alloc_rx_bufs(rx_ring, cleaned_count);
976 /* get the RX desc from RX ring based on 'next_to_clean' */
977 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
979 /* status_error_len will always be zero for unused descriptors
980 * because it's cleared in cleanup, and overlaps with hdr_addr
981 * which is always zero because packet split isn't used, if the
982 * hardware wrote DD then it will be non-zero
984 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
985 if (!ice_test_staterr(rx_desc, stat_err_bits))
988 /* This memory barrier is needed to keep us from reading
989 * any other fields out of the rx_desc until we know the
994 /* allocate (if needed) and populate skb */
995 skb = ice_fetch_rx_buf(rx_ring, rx_desc);
1001 /* skip if it is NOP desc */
1002 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1005 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1006 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1007 dev_kfree_skb_any(skb);
1011 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1012 ICE_RX_FLEX_DESC_PTYPE_M;
1014 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1015 if (ice_test_staterr(rx_desc, stat_err_bits))
1016 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1018 /* correct empty headers and pad skb if needed (to make valid
1021 if (ice_cleanup_headers(skb)) {
1026 /* probably a little skewed due to removing CRC */
1027 total_rx_bytes += skb->len;
1029 /* populate checksum, VLAN, and protocol */
1030 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1032 /* send completed skb up the stack */
1033 ice_receive_skb(rx_ring, skb, vlan_tag);
1035 /* update budget accounting */
1039 /* update queue and vector specific stats */
1040 u64_stats_update_begin(&rx_ring->syncp);
1041 rx_ring->stats.pkts += total_rx_pkts;
1042 rx_ring->stats.bytes += total_rx_bytes;
1043 u64_stats_update_end(&rx_ring->syncp);
1044 rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1045 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1047 /* guarantee a trip back through this routine if there was a failure */
1048 return failure ? budget : (int)total_rx_pkts;
1052 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1053 * @itr_idx: interrupt throttling index
1054 * @reg_itr: interrupt throttling value adjusted based on ITR granularity
1056 static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
1058 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1059 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1060 (reg_itr << GLINT_DYN_CTL_INTERVAL_S);
1064 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1065 * @vsi: the VSI associated with the q_vector
1066 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1069 ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
1071 struct ice_hw *hw = &vsi->back->hw;
1072 struct ice_ring_container *rc;
1075 /* This block of logic allows us to get away with only updating
1076 * one ITR value with each interrupt. The idea is to perform a
1077 * pseudo-lazy update with the following criteria.
1079 * 1. Rx is given higher priority than Tx if both are in same state
1080 * 2. If we must reduce an ITR that is given highest priority.
1081 * 3. We then give priority to increasing ITR based on amount.
1083 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1085 /* Rx ITR needs to be reduced, this is highest priority */
1086 itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
1087 rc->current_itr = rc->target_itr;
1088 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1089 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1090 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1092 /* Tx ITR needs to be reduced, this is second priority
1093 * Tx ITR needs to be increased more than Rx, fourth priority
1095 itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
1096 rc->current_itr = rc->target_itr;
1097 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1099 /* Rx ITR needs to be increased, third priority */
1100 itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
1101 rc->current_itr = rc->target_itr;
1103 /* Still have to re-enable the interrupts */
1104 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1107 if (!test_bit(__ICE_DOWN, vsi->state)) {
1108 int vector = vsi->hw_base_vector + q_vector->v_idx;
1110 wr32(hw, GLINT_DYN_CTL(vector), itr_val);
1115 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1116 * @napi: napi struct with our devices info in it
1117 * @budget: amount of work driver is allowed to do this pass, in packets
1119 * This function will clean all queues associated with a q_vector.
1121 * Returns the amount of work done
1123 int ice_napi_poll(struct napi_struct *napi, int budget)
1125 struct ice_q_vector *q_vector =
1126 container_of(napi, struct ice_q_vector, napi);
1127 struct ice_vsi *vsi = q_vector->vsi;
1128 struct ice_pf *pf = vsi->back;
1129 bool clean_complete = true;
1130 int budget_per_ring = 0;
1131 struct ice_ring *ring;
1134 /* Since the actual Tx work is minimal, we can give the Tx a larger
1135 * budget and be more aggressive about cleaning up the Tx descriptors.
1137 ice_for_each_ring(ring, q_vector->tx)
1138 if (!ice_clean_tx_irq(vsi, ring, budget))
1139 clean_complete = false;
1141 /* Handle case where we are called by netpoll with a budget of 0 */
1145 /* We attempt to distribute budget to each Rx queue fairly, but don't
1146 * allow the budget to go below 1 because that would exit polling early.
1148 if (q_vector->num_ring_rx)
1149 budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1151 ice_for_each_ring(ring, q_vector->rx) {
1154 cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1155 work_done += cleaned;
1156 /* if we clean as many as budgeted, we must not be done */
1157 if (cleaned >= budget_per_ring)
1158 clean_complete = false;
1161 /* If work not completed, return budget and polling will return */
1162 if (!clean_complete)
1165 /* Exit the polling mode, but don't re-enable interrupts if stack might
1166 * poll us due to busy-polling
1168 if (likely(napi_complete_done(napi, work_done)))
1169 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1170 ice_update_ena_itr(vsi, q_vector);
1172 return min_t(int, work_done, budget - 1);
1175 /* helper function for building cmd/type/offset */
1177 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1179 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1180 (td_cmd << ICE_TXD_QW1_CMD_S) |
1181 (td_offset << ICE_TXD_QW1_OFFSET_S) |
1182 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1183 (td_tag << ICE_TXD_QW1_L2TAG1_S));
1187 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1188 * @tx_ring: the ring to be checked
1189 * @size: the size buffer we want to assure is available
1191 * Returns -EBUSY if a stop is needed, else 0
1193 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1195 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1196 /* Memory barrier before checking head and tail */
1199 /* Check again in a case another CPU has just made room available. */
1200 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1203 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1204 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1205 ++tx_ring->tx_stats.restart_q;
1210 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1211 * @tx_ring: the ring to be checked
1212 * @size: the size buffer we want to assure is available
1214 * Returns 0 if stop is not needed
1216 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1218 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1221 return __ice_maybe_stop_tx(tx_ring, size);
1225 * ice_tx_map - Build the Tx descriptor
1226 * @tx_ring: ring to send buffer on
1227 * @first: first buffer info buffer to use
1228 * @off: pointer to struct that holds offload parameters
1230 * This function loops over the skb data pointed to by *first
1231 * and gets a physical address for each memory location and programs
1232 * it and the length into the transmit descriptor.
1235 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1236 struct ice_tx_offload_params *off)
1238 u64 td_offset, td_tag, td_cmd;
1239 u16 i = tx_ring->next_to_use;
1240 struct skb_frag_struct *frag;
1241 unsigned int data_len, size;
1242 struct ice_tx_desc *tx_desc;
1243 struct ice_tx_buf *tx_buf;
1244 struct sk_buff *skb;
1247 td_tag = off->td_l2tag1;
1248 td_cmd = off->td_cmd;
1249 td_offset = off->td_offset;
1252 data_len = skb->data_len;
1253 size = skb_headlen(skb);
1255 tx_desc = ICE_TX_DESC(tx_ring, i);
1257 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1258 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1259 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1260 ICE_TX_FLAGS_VLAN_S;
1263 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1267 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1268 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1270 if (dma_mapping_error(tx_ring->dev, dma))
1273 /* record length, and DMA address */
1274 dma_unmap_len_set(tx_buf, len, size);
1275 dma_unmap_addr_set(tx_buf, dma, dma);
1277 /* align size to end of page */
1278 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1279 tx_desc->buf_addr = cpu_to_le64(dma);
1281 /* account for data chunks larger than the hardware
1284 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1285 tx_desc->cmd_type_offset_bsz =
1286 build_ctob(td_cmd, td_offset, max_data, td_tag);
1291 if (i == tx_ring->count) {
1292 tx_desc = ICE_TX_DESC(tx_ring, 0);
1299 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1300 tx_desc->buf_addr = cpu_to_le64(dma);
1303 if (likely(!data_len))
1306 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1312 if (i == tx_ring->count) {
1313 tx_desc = ICE_TX_DESC(tx_ring, 0);
1317 size = skb_frag_size(frag);
1320 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1323 tx_buf = &tx_ring->tx_buf[i];
1326 /* record bytecount for BQL */
1327 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1329 /* record SW timestamp if HW timestamp is not available */
1330 skb_tx_timestamp(first->skb);
1333 if (i == tx_ring->count)
1336 /* write last descriptor with RS and EOP bits */
1337 td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1338 tx_desc->cmd_type_offset_bsz =
1339 build_ctob(td_cmd, td_offset, size, td_tag);
1341 /* Force memory writes to complete before letting h/w know there
1342 * are new descriptors to fetch.
1344 * We also use this memory barrier to make certain all of the
1345 * status bits have been updated before next_to_watch is written.
1349 /* set next_to_watch value indicating a packet is present */
1350 first->next_to_watch = tx_desc;
1352 tx_ring->next_to_use = i;
1354 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1356 /* notify HW of packet */
1357 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1358 writel(i, tx_ring->tail);
1364 /* clear dma mappings for failed tx_buf map */
1366 tx_buf = &tx_ring->tx_buf[i];
1367 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1368 if (tx_buf == first)
1375 tx_ring->next_to_use = i;
1379 * ice_tx_csum - Enable Tx checksum offloads
1380 * @first: pointer to the first descriptor
1381 * @off: pointer to struct that holds offload parameters
1383 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1386 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1388 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1389 struct sk_buff *skb = first->skb;
1399 __be16 frag_off, protocol;
1400 unsigned char *exthdr;
1401 u32 offset, cmd = 0;
1404 if (skb->ip_summed != CHECKSUM_PARTIAL)
1407 ip.hdr = skb_network_header(skb);
1408 l4.hdr = skb_transport_header(skb);
1410 /* compute outer L2 header size */
1411 l2_len = ip.hdr - skb->data;
1412 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1414 if (skb->encapsulation)
1417 /* Enable IP checksum offloads */
1418 protocol = vlan_get_protocol(skb);
1419 if (protocol == htons(ETH_P_IP)) {
1420 l4_proto = ip.v4->protocol;
1421 /* the stack computes the IP header already, the only time we
1422 * need the hardware to recompute it is in the case of TSO.
1424 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1425 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1427 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1429 } else if (protocol == htons(ETH_P_IPV6)) {
1430 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1431 exthdr = ip.hdr + sizeof(*ip.v6);
1432 l4_proto = ip.v6->nexthdr;
1433 if (l4.hdr != exthdr)
1434 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1440 /* compute inner L3 header size */
1441 l3_len = l4.hdr - ip.hdr;
1442 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1444 /* Enable L4 checksum offloads */
1447 /* enable checksum offloads */
1448 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1449 l4_len = l4.tcp->doff;
1450 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1453 /* enable UDP checksum offload */
1454 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1455 l4_len = (sizeof(struct udphdr) >> 2);
1456 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1459 /* enable SCTP checksum offload */
1460 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1461 l4_len = sizeof(struct sctphdr) >> 2;
1462 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1466 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1468 skb_checksum_help(skb);
1473 off->td_offset |= offset;
1478 * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1479 * @tx_ring: ring to send buffer on
1480 * @first: pointer to struct ice_tx_buf
1482 * Checks the skb and set up correspondingly several generic transmit flags
1483 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1485 * Returns error code indicate the frame should be dropped upon error and the
1486 * otherwise returns 0 to indicate the flags has been set properly.
1489 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1491 struct sk_buff *skb = first->skb;
1492 __be16 protocol = skb->protocol;
1494 if (protocol == htons(ETH_P_8021Q) &&
1495 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1496 /* when HW VLAN acceleration is turned off by the user the
1497 * stack sets the protocol to 8021q so that the driver
1498 * can take any steps required to support the SW only
1499 * VLAN handling. In our case the driver doesn't need
1500 * to take any further steps so just set the protocol
1501 * to the encapsulated ethertype.
1503 skb->protocol = vlan_get_protocol(skb);
1507 /* if we have a HW VLAN tag being added, default to the HW one */
1508 if (skb_vlan_tag_present(skb)) {
1509 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1510 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1511 } else if (protocol == htons(ETH_P_8021Q)) {
1512 struct vlan_hdr *vhdr, _vhdr;
1514 /* for SW VLAN, check the next protocol and store the tag */
1515 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1521 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1522 ICE_TX_FLAGS_VLAN_S;
1523 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1531 * ice_tso - computes mss and TSO length to prepare for TSO
1532 * @first: pointer to struct ice_tx_buf
1533 * @off: pointer to struct that holds offload parameters
1535 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1538 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1540 struct sk_buff *skb = first->skb;
1550 u64 cd_mss, cd_tso_len;
1551 u32 paylen, l4_start;
1554 if (skb->ip_summed != CHECKSUM_PARTIAL)
1557 if (!skb_is_gso(skb))
1560 err = skb_cow_head(skb, 0);
1564 ip.hdr = skb_network_header(skb);
1565 l4.hdr = skb_transport_header(skb);
1567 /* initialize outer IP header fields */
1568 if (ip.v4->version == 4) {
1572 ip.v6->payload_len = 0;
1575 /* determine offset of transport header */
1576 l4_start = l4.hdr - skb->data;
1578 /* remove payload length from checksum */
1579 paylen = skb->len - l4_start;
1580 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1582 /* compute length of segmentation header */
1583 off->header_len = (l4.tcp->doff * 4) + l4_start;
1585 /* update gso_segs and bytecount */
1586 first->gso_segs = skb_shinfo(skb)->gso_segs;
1587 first->bytecount += (first->gso_segs - 1) * off->header_len;
1589 cd_tso_len = skb->len - off->header_len;
1590 cd_mss = skb_shinfo(skb)->gso_size;
1592 /* record cdesc_qw1 with TSO parameters */
1593 off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
1594 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1595 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1596 (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
1597 first->tx_flags |= ICE_TX_FLAGS_TSO;
1602 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1603 * @size: transmit request size in bytes
1605 * Due to hardware alignment restrictions (4K alignment), we need to
1606 * assume that we can have no more than 12K of data per descriptor, even
1607 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1608 * Thus, we need to divide by 12K. But division is slow! Instead,
1609 * we decompose the operation into shifts and one relatively cheap
1610 * multiply operation.
1612 * To divide by 12K, we first divide by 4K, then divide by 3:
1613 * To divide by 4K, shift right by 12 bits
1614 * To divide by 3, multiply by 85, then divide by 256
1615 * (Divide by 256 is done by shifting right by 8 bits)
1616 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1617 * 3, we'll underestimate near each multiple of 12K. This is actually more
1618 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1619 * segment. For our purposes this is accurate out to 1M which is orders of
1620 * magnitude greater than our largest possible GSO size.
1622 * This would then be implemented as:
1623 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1625 * Since multiplication and division are commutative, we can reorder
1627 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1629 static unsigned int ice_txd_use_count(unsigned int size)
1631 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1635 * ice_xmit_desc_count - calculate number of Tx descriptors needed
1638 * Returns number of data descriptors needed for this skb.
1640 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1642 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1643 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1644 unsigned int count = 0, size = skb_headlen(skb);
1647 count += ice_txd_use_count(size);
1652 size = skb_frag_size(frag++);
1659 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
1662 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
1663 * and so we need to figure out the cases where we need to linearize the skb.
1665 * For TSO we need to count the TSO header and segment payload separately.
1666 * As such we need to check cases where we have 7 fragments or more as we
1667 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1668 * the segment payload in the first descriptor, and another 7 for the
1671 static bool __ice_chk_linearize(struct sk_buff *skb)
1673 const struct skb_frag_struct *frag, *stale;
1676 /* no need to check if number of frags is less than 7 */
1677 nr_frags = skb_shinfo(skb)->nr_frags;
1678 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
1681 /* We need to walk through the list and validate that each group
1682 * of 6 fragments totals at least gso_size.
1684 nr_frags -= ICE_MAX_BUF_TXD - 2;
1685 frag = &skb_shinfo(skb)->frags[0];
1687 /* Initialize size to the negative value of gso_size minus 1. We
1688 * use this as the worst case scenerio in which the frag ahead
1689 * of us only provides one byte which is why we are limited to 6
1690 * descriptors for a single transmit as the header and previous
1691 * fragment are already consuming 2 descriptors.
1693 sum = 1 - skb_shinfo(skb)->gso_size;
1695 /* Add size of frags 0 through 4 to create our initial sum */
1696 sum += skb_frag_size(frag++);
1697 sum += skb_frag_size(frag++);
1698 sum += skb_frag_size(frag++);
1699 sum += skb_frag_size(frag++);
1700 sum += skb_frag_size(frag++);
1702 /* Walk through fragments adding latest fragment, testing it, and
1703 * then removing stale fragments from the sum.
1705 stale = &skb_shinfo(skb)->frags[0];
1707 sum += skb_frag_size(frag++);
1709 /* if sum is negative we failed to make sufficient progress */
1716 sum -= skb_frag_size(stale++);
1723 * ice_chk_linearize - Check if there are more than 8 fragments per packet
1725 * @count: number of buffers used
1727 * Note: Our HW can't scatter-gather more than 8 fragments to build
1728 * a packet on the wire and so we need to figure out the cases where we
1729 * need to linearize the skb.
1731 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
1733 /* Both TSO and single send will work if count is less than 8 */
1734 if (likely(count < ICE_MAX_BUF_TXD))
1737 if (skb_is_gso(skb))
1738 return __ice_chk_linearize(skb);
1740 /* we can support up to 8 data buffers for a single send */
1741 return count != ICE_MAX_BUF_TXD;
1745 * ice_xmit_frame_ring - Sends buffer on Tx ring
1747 * @tx_ring: ring to send buffer on
1749 * Returns NETDEV_TX_OK if sent, else an error code
1752 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1754 struct ice_tx_offload_params offload = { 0 };
1755 struct ice_tx_buf *first;
1759 count = ice_xmit_desc_count(skb);
1760 if (ice_chk_linearize(skb, count)) {
1761 if (__skb_linearize(skb))
1763 count = ice_txd_use_count(skb->len);
1764 tx_ring->tx_stats.tx_linearize++;
1767 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
1768 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
1769 * + 4 desc gap to avoid the cache line where head is,
1770 * + 1 desc for context descriptor,
1771 * otherwise try next time
1773 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
1774 ICE_DESCS_FOR_CTX_DESC)) {
1775 tx_ring->tx_stats.tx_busy++;
1776 return NETDEV_TX_BUSY;
1779 offload.tx_ring = tx_ring;
1781 /* record the location of the first descriptor for this packet */
1782 first = &tx_ring->tx_buf[tx_ring->next_to_use];
1784 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
1785 first->gso_segs = 1;
1786 first->tx_flags = 0;
1788 /* prepare the VLAN tagging flags for Tx */
1789 if (ice_tx_prepare_vlan_flags(tx_ring, first))
1792 /* set up TSO offload */
1793 tso = ice_tso(first, &offload);
1797 /* always set up Tx checksum offload */
1798 csum = ice_tx_csum(first, &offload);
1802 if (tso || offload.cd_tunnel_params) {
1803 struct ice_tx_ctx_desc *cdesc;
1804 int i = tx_ring->next_to_use;
1806 /* grab the next descriptor */
1807 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
1809 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1811 /* setup context descriptor */
1812 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
1813 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
1814 cdesc->rsvd = cpu_to_le16(0);
1815 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
1818 ice_tx_map(tx_ring, first, &offload);
1819 return NETDEV_TX_OK;
1822 dev_kfree_skb_any(skb);
1823 return NETDEV_TX_OK;
1827 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
1829 * @netdev: network interface device structure
1831 * Returns NETDEV_TX_OK if sent, else an error code
1833 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1835 struct ice_netdev_priv *np = netdev_priv(netdev);
1836 struct ice_vsi *vsi = np->vsi;
1837 struct ice_ring *tx_ring;
1839 tx_ring = vsi->tx_rings[skb->queue_mapping];
1841 /* hardware can't handle really short frames, hardware padding works
1844 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
1845 return NETDEV_TX_OK;
1847 return ice_xmit_frame_ring(skb, tx_ring);