1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* The driver transmit and receive code */
6 #include <linux/prefetch.h>
10 #define ICE_RX_HDR_SIZE 256
13 * ice_unmap_and_free_tx_buf - Release a Tx buffer
14 * @ring: the ring that owns the buffer
15 * @tx_buf: the buffer to free
18 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
21 dev_kfree_skb_any(tx_buf->skb);
22 if (dma_unmap_len(tx_buf, len))
23 dma_unmap_single(ring->dev,
24 dma_unmap_addr(tx_buf, dma),
25 dma_unmap_len(tx_buf, len),
27 } else if (dma_unmap_len(tx_buf, len)) {
28 dma_unmap_page(ring->dev,
29 dma_unmap_addr(tx_buf, dma),
30 dma_unmap_len(tx_buf, len),
34 tx_buf->next_to_watch = NULL;
36 dma_unmap_len_set(tx_buf, len, 0);
37 /* tx_buf must be completely set up in the transmit path */
40 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
42 return netdev_get_tx_queue(ring->netdev, ring->q_index);
46 * ice_clean_tx_ring - Free any empty Tx buffers
47 * @tx_ring: ring to be cleaned
49 void ice_clean_tx_ring(struct ice_ring *tx_ring)
53 /* ring already cleared, nothing to do */
57 /* Free all the Tx ring sk_bufss */
58 for (i = 0; i < tx_ring->count; i++)
59 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
61 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
63 /* Zero out the descriptor ring */
64 memset(tx_ring->desc, 0, tx_ring->size);
66 tx_ring->next_to_use = 0;
67 tx_ring->next_to_clean = 0;
72 /* cleanup Tx queue statistics */
73 netdev_tx_reset_queue(txring_txq(tx_ring));
77 * ice_free_tx_ring - Free Tx resources per queue
78 * @tx_ring: Tx descriptor ring for a specific queue
80 * Free all transmit software resources
82 void ice_free_tx_ring(struct ice_ring *tx_ring)
84 ice_clean_tx_ring(tx_ring);
85 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
86 tx_ring->tx_buf = NULL;
89 dmam_free_coherent(tx_ring->dev, tx_ring->size,
90 tx_ring->desc, tx_ring->dma);
96 * ice_clean_tx_irq - Reclaim resources after transmit completes
97 * @vsi: the VSI we care about
98 * @tx_ring: Tx ring to clean
99 * @napi_budget: Used to determine if we are in netpoll
101 * Returns true if there's any budget left (e.g. the clean is finished)
104 ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
106 unsigned int total_bytes = 0, total_pkts = 0;
107 unsigned int budget = vsi->work_lmt;
108 s16 i = tx_ring->next_to_clean;
109 struct ice_tx_desc *tx_desc;
110 struct ice_tx_buf *tx_buf;
112 tx_buf = &tx_ring->tx_buf[i];
113 tx_desc = ICE_TX_DESC(tx_ring, i);
117 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
119 /* if next_to_watch is not set then there is no work pending */
123 smp_rmb(); /* prevent any other reads prior to eop_desc */
125 /* if the descriptor isn't done, no work yet to do */
126 if (!(eop_desc->cmd_type_offset_bsz &
127 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
130 /* clear next_to_watch to prevent false hangs */
131 tx_buf->next_to_watch = NULL;
133 /* update the statistics for this packet */
134 total_bytes += tx_buf->bytecount;
135 total_pkts += tx_buf->gso_segs;
138 napi_consume_skb(tx_buf->skb, napi_budget);
140 /* unmap skb header data */
141 dma_unmap_single(tx_ring->dev,
142 dma_unmap_addr(tx_buf, dma),
143 dma_unmap_len(tx_buf, len),
146 /* clear tx_buf data */
148 dma_unmap_len_set(tx_buf, len, 0);
150 /* unmap remaining buffers */
151 while (tx_desc != eop_desc) {
157 tx_buf = tx_ring->tx_buf;
158 tx_desc = ICE_TX_DESC(tx_ring, 0);
161 /* unmap any remaining paged data */
162 if (dma_unmap_len(tx_buf, len)) {
163 dma_unmap_page(tx_ring->dev,
164 dma_unmap_addr(tx_buf, dma),
165 dma_unmap_len(tx_buf, len),
167 dma_unmap_len_set(tx_buf, len, 0);
171 /* move us one more past the eop_desc for start of next pkt */
177 tx_buf = tx_ring->tx_buf;
178 tx_desc = ICE_TX_DESC(tx_ring, 0);
183 /* update budget accounting */
185 } while (likely(budget));
188 tx_ring->next_to_clean = i;
189 u64_stats_update_begin(&tx_ring->syncp);
190 tx_ring->stats.bytes += total_bytes;
191 tx_ring->stats.pkts += total_pkts;
192 u64_stats_update_end(&tx_ring->syncp);
193 tx_ring->q_vector->tx.total_bytes += total_bytes;
194 tx_ring->q_vector->tx.total_pkts += total_pkts;
196 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
199 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
200 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
201 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
202 /* Make sure that anybody stopping the queue after this
203 * sees the new next_to_clean.
206 if (__netif_subqueue_stopped(tx_ring->netdev,
208 !test_bit(__ICE_DOWN, vsi->state)) {
209 netif_wake_subqueue(tx_ring->netdev,
211 ++tx_ring->tx_stats.restart_q;
219 * ice_setup_tx_ring - Allocate the Tx descriptors
220 * @tx_ring: the Tx ring to set up
222 * Return 0 on success, negative on error
224 int ice_setup_tx_ring(struct ice_ring *tx_ring)
226 struct device *dev = tx_ring->dev;
231 /* warn if we are about to overwrite the pointer */
232 WARN_ON(tx_ring->tx_buf);
234 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
236 if (!tx_ring->tx_buf)
239 /* round up to nearest page */
240 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
242 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
244 if (!tx_ring->desc) {
245 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
250 tx_ring->next_to_use = 0;
251 tx_ring->next_to_clean = 0;
252 tx_ring->tx_stats.prev_pkt = -1;
256 devm_kfree(dev, tx_ring->tx_buf);
257 tx_ring->tx_buf = NULL;
262 * ice_clean_rx_ring - Free Rx buffers
263 * @rx_ring: ring to be cleaned
265 void ice_clean_rx_ring(struct ice_ring *rx_ring)
267 struct device *dev = rx_ring->dev;
270 /* ring already cleared, nothing to do */
271 if (!rx_ring->rx_buf)
274 /* Free all the Rx ring sk_buffs */
275 for (i = 0; i < rx_ring->count; i++) {
276 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
279 dev_kfree_skb(rx_buf->skb);
285 /* Invalidate cache lines that may have been written to by
286 * device so that we avoid corrupting memory.
288 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
290 ICE_RXBUF_2048, DMA_FROM_DEVICE);
292 /* free resources associated with mapping */
293 dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
294 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
295 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
298 rx_buf->page_offset = 0;
301 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
303 /* Zero out the descriptor ring */
304 memset(rx_ring->desc, 0, rx_ring->size);
306 rx_ring->next_to_alloc = 0;
307 rx_ring->next_to_clean = 0;
308 rx_ring->next_to_use = 0;
312 * ice_free_rx_ring - Free Rx resources
313 * @rx_ring: ring to clean the resources from
315 * Free all receive software resources
317 void ice_free_rx_ring(struct ice_ring *rx_ring)
319 ice_clean_rx_ring(rx_ring);
320 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
321 rx_ring->rx_buf = NULL;
324 dmam_free_coherent(rx_ring->dev, rx_ring->size,
325 rx_ring->desc, rx_ring->dma);
326 rx_ring->desc = NULL;
331 * ice_setup_rx_ring - Allocate the Rx descriptors
332 * @rx_ring: the Rx ring to set up
334 * Return 0 on success, negative on error
336 int ice_setup_rx_ring(struct ice_ring *rx_ring)
338 struct device *dev = rx_ring->dev;
343 /* warn if we are about to overwrite the pointer */
344 WARN_ON(rx_ring->rx_buf);
346 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
348 if (!rx_ring->rx_buf)
351 /* round up to nearest page */
352 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
354 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
356 if (!rx_ring->desc) {
357 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
362 rx_ring->next_to_use = 0;
363 rx_ring->next_to_clean = 0;
367 devm_kfree(dev, rx_ring->rx_buf);
368 rx_ring->rx_buf = NULL;
373 * ice_release_rx_desc - Store the new tail and head values
374 * @rx_ring: ring to bump
375 * @val: new head index
377 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
379 rx_ring->next_to_use = val;
381 /* update next to alloc since we have filled the ring */
382 rx_ring->next_to_alloc = val;
384 /* Force memory writes to complete before letting h/w
385 * know there are new descriptors to fetch. (Only
386 * applicable for weak-ordered memory model archs,
390 writel(val, rx_ring->tail);
394 * ice_alloc_mapped_page - recycle or make a new page
395 * @rx_ring: ring to use
396 * @bi: rx_buf struct to modify
398 * Returns true if the page was successfully allocated or
402 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
404 struct page *page = bi->page;
407 /* since we are recycling buffers we should seldom need to alloc */
409 rx_ring->rx_stats.page_reuse_count++;
413 /* alloc new page for storage */
414 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
415 if (unlikely(!page)) {
416 rx_ring->rx_stats.alloc_page_failed++;
420 /* map page for use */
421 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
422 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
424 /* if mapping failed free memory back to system since
425 * there isn't much point in holding memory we can't use
427 if (dma_mapping_error(rx_ring->dev, dma)) {
428 __free_pages(page, 0);
429 rx_ring->rx_stats.alloc_page_failed++;
436 page_ref_add(page, USHRT_MAX - 1);
437 bi->pagecnt_bias = USHRT_MAX;
443 * ice_alloc_rx_bufs - Replace used receive buffers
444 * @rx_ring: ring to place buffers on
445 * @cleaned_count: number of buffers to replace
447 * Returns false if all allocations were successful, true if any fail
449 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
451 union ice_32b_rx_flex_desc *rx_desc;
452 u16 ntu = rx_ring->next_to_use;
453 struct ice_rx_buf *bi;
455 /* do nothing if no valid netdev defined */
456 if (!rx_ring->netdev || !cleaned_count)
459 /* get the RX descriptor and buffer based on next_to_use */
460 rx_desc = ICE_RX_DESC(rx_ring, ntu);
461 bi = &rx_ring->rx_buf[ntu];
464 if (!ice_alloc_mapped_page(rx_ring, bi))
467 /* sync the buffer for use by the device */
468 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
473 /* Refresh the desc even if buffer_addrs didn't change
474 * because each write-back erases this info.
476 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
481 if (unlikely(ntu == rx_ring->count)) {
482 rx_desc = ICE_RX_DESC(rx_ring, 0);
483 bi = rx_ring->rx_buf;
487 /* clear the status bits for the next_to_use descriptor */
488 rx_desc->wb.status_error0 = 0;
491 } while (cleaned_count);
493 if (rx_ring->next_to_use != ntu)
494 ice_release_rx_desc(rx_ring, ntu);
499 if (rx_ring->next_to_use != ntu)
500 ice_release_rx_desc(rx_ring, ntu);
502 /* make sure to come back via polling to try again after
509 * ice_page_is_reserved - check if reuse is possible
510 * @page: page struct to check
512 static bool ice_page_is_reserved(struct page *page)
514 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
518 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
519 * @rx_buf: Rx buffer to adjust
520 * @size: Size of adjustment
522 * Update the offset within page so that Rx buf will be ready to be reused.
523 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
524 * so the second half of page assigned to Rx buffer will be used, otherwise
525 * the offset is moved by the @size bytes
528 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
530 #if (PAGE_SIZE < 8192)
531 /* flip page offset to other buffer */
532 rx_buf->page_offset ^= size;
534 /* move offset up to the next cache line */
535 rx_buf->page_offset += size;
540 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
541 * @rx_buf: buffer containing the page
543 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
544 * which will assign the current buffer to the buffer that next_to_alloc is
545 * pointing to; otherwise, the DMA mapping needs to be destroyed and
548 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
550 #if (PAGE_SIZE >= 8192)
551 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
553 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
554 struct page *page = rx_buf->page;
556 /* avoid re-using remote pages */
557 if (unlikely(ice_page_is_reserved(page)))
560 #if (PAGE_SIZE < 8192)
561 /* if we are only owner of page we can reuse it */
562 if (unlikely((page_count(page) - pagecnt_bias) > 1))
565 if (rx_buf->page_offset > last_offset)
567 #endif /* PAGE_SIZE < 8192) */
569 /* If we have drained the page fragment pool we need to update
570 * the pagecnt_bias and page count so that we fully restock the
571 * number of references the driver holds.
573 if (unlikely(pagecnt_bias == 1)) {
574 page_ref_add(page, USHRT_MAX - 1);
575 rx_buf->pagecnt_bias = USHRT_MAX;
582 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
583 * @rx_buf: buffer containing page to add
584 * @skb: sk_buff to place the data into
585 * @size: packet length from rx_desc
587 * This function will add the data contained in rx_buf->page to the skb.
588 * It will just attach the page as a frag to the skb.
589 * The function will then update the page offset.
592 ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
595 #if (PAGE_SIZE >= 8192)
596 unsigned int truesize = SKB_DATA_ALIGN(size);
598 unsigned int truesize = ICE_RXBUF_2048;
601 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
602 rx_buf->page_offset, size, truesize);
604 /* page is being used so we must update the page offset */
605 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
609 * ice_reuse_rx_page - page flip buffer and store it back on the ring
610 * @rx_ring: Rx descriptor ring to store buffers on
611 * @old_buf: donor buffer to have page reused
613 * Synchronizes page for reuse by the adapter
616 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
618 u16 nta = rx_ring->next_to_alloc;
619 struct ice_rx_buf *new_buf;
621 new_buf = &rx_ring->rx_buf[nta];
623 /* update, and store next to alloc */
625 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
627 /* Transfer page from old buffer to new buffer.
628 * Move each member individually to avoid possible store
629 * forwarding stalls and unnecessary copy of skb.
631 new_buf->dma = old_buf->dma;
632 new_buf->page = old_buf->page;
633 new_buf->page_offset = old_buf->page_offset;
634 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
638 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
639 * @rx_ring: Rx descriptor ring to transact packets on
640 * @skb: skb to be used
641 * @size: size of buffer to add to skb
643 * This function will pull an Rx buffer from the ring and synchronize it
644 * for use by the CPU.
646 static struct ice_rx_buf *
647 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
648 const unsigned int size)
650 struct ice_rx_buf *rx_buf;
652 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
653 prefetchw(rx_buf->page);
656 /* we are reusing so sync this buffer for CPU use */
657 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
658 rx_buf->page_offset, size,
661 /* We have pulled a buffer for use, so decrement pagecnt_bias */
662 rx_buf->pagecnt_bias--;
668 * ice_construct_skb - Allocate skb and populate it
669 * @rx_ring: Rx descriptor ring to transact packets on
670 * @rx_buf: Rx buffer to pull data from
671 * @size: the length of the packet
673 * This function allocates an skb. It then populates it with the page
674 * data from the current receive descriptor, taking care to set up the
677 static struct sk_buff *
678 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
681 void *va = page_address(rx_buf->page) + rx_buf->page_offset;
682 unsigned int headlen;
685 /* prefetch first cache line of first page */
687 #if L1_CACHE_BYTES < 128
688 prefetch((u8 *)va + L1_CACHE_BYTES);
689 #endif /* L1_CACHE_BYTES */
691 /* allocate a skb to store the frags */
692 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
693 GFP_ATOMIC | __GFP_NOWARN);
697 skb_record_rx_queue(skb, rx_ring->q_index);
698 /* Determine available headroom for copy */
700 if (headlen > ICE_RX_HDR_SIZE)
701 headlen = eth_get_headlen(va, ICE_RX_HDR_SIZE);
703 /* align pull length to size of long to optimize memcpy performance */
704 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
706 /* if we exhaust the linear part then add what is left as a frag */
709 #if (PAGE_SIZE >= 8192)
710 unsigned int truesize = SKB_DATA_ALIGN(size);
712 unsigned int truesize = ICE_RXBUF_2048;
714 skb_add_rx_frag(skb, 0, rx_buf->page,
715 rx_buf->page_offset + headlen, size, truesize);
716 /* buffer is used by skb, update page_offset */
717 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
719 /* buffer is unused, reset bias back to rx_buf; data was copied
720 * onto skb's linear part so there's no need for adjusting
721 * page offset and we can reuse this buffer as-is
723 rx_buf->pagecnt_bias++;
730 * ice_put_rx_buf - Clean up used buffer and either recycle or free
731 * @rx_ring: Rx descriptor ring to transact packets on
732 * @rx_buf: Rx buffer to pull data from
734 * This function will clean up the contents of the rx_buf. It will
735 * either recycle the buffer or unmap it and free the associated resources.
737 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
739 /* hand second half of page back to the ring */
740 if (ice_can_reuse_rx_page(rx_buf)) {
741 ice_reuse_rx_page(rx_ring, rx_buf);
742 rx_ring->rx_stats.page_reuse_count++;
744 /* we are not reusing the buffer so unmap it */
745 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
746 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
747 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
750 /* clear contents of buffer_info */
756 * ice_cleanup_headers - Correct empty headers
757 * @skb: pointer to current skb being fixed
759 * Also address the case where we are pulling data in on pages only
760 * and as such no data is present in the skb header.
762 * In addition if skb is not at least 60 bytes we need to pad it so that
763 * it is large enough to qualify as a valid Ethernet frame.
765 * Returns true if an error was encountered and skb was freed.
767 static bool ice_cleanup_headers(struct sk_buff *skb)
769 /* if eth_skb_pad returns an error the skb was freed */
770 if (eth_skb_pad(skb))
777 * ice_test_staterr - tests bits in Rx descriptor status and error fields
778 * @rx_desc: pointer to receive descriptor (in le64 format)
779 * @stat_err_bits: value to mask
781 * This function does some fast chicanery in order to return the
782 * value of the mask which is really only used for boolean tests.
783 * The status_error_len doesn't need to be shifted because it begins
787 ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
789 return !!(rx_desc->wb.status_error0 &
790 cpu_to_le16(stat_err_bits));
794 * ice_is_non_eop - process handling of non-EOP buffers
795 * @rx_ring: Rx ring being processed
796 * @rx_desc: Rx descriptor for current buffer
797 * @skb: Current socket buffer containing buffer in progress
799 * This function updates next to clean. If the buffer is an EOP buffer
800 * this function exits returning false, otherwise it will place the
801 * sk_buff in the next buffer to be chained and return true indicating
802 * that this is in fact a non-EOP buffer.
805 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
808 u32 ntc = rx_ring->next_to_clean + 1;
810 /* fetch, update, and store next to clean */
811 ntc = (ntc < rx_ring->count) ? ntc : 0;
812 rx_ring->next_to_clean = ntc;
814 prefetch(ICE_RX_DESC(rx_ring, ntc));
816 /* if we are the last buffer then there is nothing else to do */
817 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
818 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
821 /* place skb in next buffer to be received */
822 rx_ring->rx_buf[ntc].skb = skb;
823 rx_ring->rx_stats.non_eop_descs++;
829 * ice_ptype_to_htype - get a hash type
830 * @ptype: the ptype value from the descriptor
832 * Returns a hash type to be used by skb_set_hash
834 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
836 return PKT_HASH_TYPE_NONE;
840 * ice_rx_hash - set the hash value in the skb
841 * @rx_ring: descriptor ring
842 * @rx_desc: specific descriptor
843 * @skb: pointer to current skb
844 * @rx_ptype: the ptype value from the descriptor
847 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
848 struct sk_buff *skb, u8 rx_ptype)
850 struct ice_32b_rx_flex_desc_nic *nic_mdid;
853 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
856 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
859 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
860 hash = le32_to_cpu(nic_mdid->rss_hash);
861 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
865 * ice_rx_csum - Indicate in skb if checksum is good
866 * @vsi: the VSI we care about
867 * @skb: skb currently being received and modified
868 * @rx_desc: the receive descriptor
869 * @ptype: the packet type decoded by hardware
871 * skb->protocol must be set before this function is called
874 ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
875 union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
877 struct ice_rx_ptype_decoded decoded;
878 u32 rx_error, rx_status;
881 rx_status = le16_to_cpu(rx_desc->wb.status_error0);
882 rx_error = rx_status;
884 decoded = ice_decode_rx_desc_ptype(ptype);
886 /* Start with CHECKSUM_NONE and by default csum_level = 0 */
887 skb->ip_summed = CHECKSUM_NONE;
888 skb_checksum_none_assert(skb);
890 /* check if Rx checksum is enabled */
891 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
894 /* check if HW has decoded the packet and checksum */
895 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
898 if (!(decoded.known && decoded.outer_ip))
901 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
902 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
903 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
904 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
906 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
907 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
909 else if (ipv6 && (rx_status &
910 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
913 /* check for L4 errors and handle packets that were not able to be
914 * checksummed due to arrival speed
916 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
919 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
920 switch (decoded.inner_prot) {
921 case ICE_RX_PTYPE_INNER_PROT_TCP:
922 case ICE_RX_PTYPE_INNER_PROT_UDP:
923 case ICE_RX_PTYPE_INNER_PROT_SCTP:
924 skb->ip_summed = CHECKSUM_UNNECESSARY;
931 vsi->back->hw_csum_rx_error++;
935 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
936 * @rx_ring: Rx descriptor ring packet is being transacted on
937 * @rx_desc: pointer to the EOP Rx descriptor
938 * @skb: pointer to current skb being populated
939 * @ptype: the packet type decoded by hardware
941 * This function checks the ring, descriptor, and packet information in
942 * order to populate the hash, checksum, VLAN, protocol, and
943 * other fields within the skb.
946 ice_process_skb_fields(struct ice_ring *rx_ring,
947 union ice_32b_rx_flex_desc *rx_desc,
948 struct sk_buff *skb, u8 ptype)
950 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
952 /* modifies the skb - consumes the enet header */
953 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
955 ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
959 * ice_receive_skb - Send a completed packet up the stack
960 * @rx_ring: Rx ring in play
961 * @skb: packet to send up
962 * @vlan_tag: vlan tag for packet
964 * This function sends the completed packet (via. skb) up the stack using
965 * gro receive functions (with/without vlan tag)
968 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
970 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
971 (vlan_tag & VLAN_VID_MASK))
972 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
973 napi_gro_receive(&rx_ring->q_vector->napi, skb);
977 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
978 * @rx_ring: Rx descriptor ring to transact packets on
979 * @budget: Total limit on number of packets to process
981 * This function provides a "bounce buffer" approach to Rx interrupt
982 * processing. The advantage to this is that on systems that have
983 * expensive overhead for IOMMU access this provides a means of avoiding
984 * it by maintaining the mapping of the page to the system.
986 * Returns amount of work completed
988 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
990 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
991 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
992 bool failure = false;
994 /* start the loop to process RX packets bounded by 'budget' */
995 while (likely(total_rx_pkts < (unsigned int)budget)) {
996 union ice_32b_rx_flex_desc *rx_desc;
997 struct ice_rx_buf *rx_buf;
1004 /* return some buffers to hardware, one at a time is too slow */
1005 if (cleaned_count >= ICE_RX_BUF_WRITE) {
1006 failure = failure ||
1007 ice_alloc_rx_bufs(rx_ring, cleaned_count);
1011 /* get the RX desc from RX ring based on 'next_to_clean' */
1012 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1014 /* status_error_len will always be zero for unused descriptors
1015 * because it's cleared in cleanup, and overlaps with hdr_addr
1016 * which is always zero because packet split isn't used, if the
1017 * hardware wrote DD then it will be non-zero
1019 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1020 if (!ice_test_staterr(rx_desc, stat_err_bits))
1023 /* This memory barrier is needed to keep us from reading
1024 * any other fields out of the rx_desc until we know the
1029 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1030 ICE_RX_FLX_DESC_PKT_LEN_M;
1032 rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1033 /* allocate (if needed) and populate skb */
1035 ice_add_rx_frag(rx_buf, skb, size);
1037 skb = ice_construct_skb(rx_ring, rx_buf, size);
1039 /* exit if we failed to retrieve a buffer */
1041 rx_ring->rx_stats.alloc_buf_failed++;
1042 rx_buf->pagecnt_bias++;
1046 ice_put_rx_buf(rx_ring, rx_buf);
1049 /* skip if it is NOP desc */
1050 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1053 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1054 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1055 dev_kfree_skb_any(skb);
1059 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1060 ICE_RX_FLEX_DESC_PTYPE_M;
1062 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1063 if (ice_test_staterr(rx_desc, stat_err_bits))
1064 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1066 /* correct empty headers and pad skb if needed (to make valid
1069 if (ice_cleanup_headers(skb)) {
1074 /* probably a little skewed due to removing CRC */
1075 total_rx_bytes += skb->len;
1077 /* populate checksum, VLAN, and protocol */
1078 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1080 /* send completed skb up the stack */
1081 ice_receive_skb(rx_ring, skb, vlan_tag);
1083 /* update budget accounting */
1087 /* update queue and vector specific stats */
1088 u64_stats_update_begin(&rx_ring->syncp);
1089 rx_ring->stats.pkts += total_rx_pkts;
1090 rx_ring->stats.bytes += total_rx_bytes;
1091 u64_stats_update_end(&rx_ring->syncp);
1092 rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1093 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1095 /* guarantee a trip back through this routine if there was a failure */
1096 return failure ? budget : (int)total_rx_pkts;
1099 static unsigned int ice_itr_divisor(struct ice_port_info *pi)
1101 switch (pi->phy.link_info.link_speed) {
1102 case ICE_AQ_LINK_SPEED_40GB:
1103 return ICE_ITR_ADAPTIVE_MIN_INC * 1024;
1104 case ICE_AQ_LINK_SPEED_25GB:
1105 case ICE_AQ_LINK_SPEED_20GB:
1106 return ICE_ITR_ADAPTIVE_MIN_INC * 512;
1107 case ICE_AQ_LINK_SPEED_100MB:
1108 return ICE_ITR_ADAPTIVE_MIN_INC * 32;
1110 return ICE_ITR_ADAPTIVE_MIN_INC * 256;
1115 * ice_update_itr - update the adaptive ITR value based on statistics
1116 * @q_vector: structure containing interrupt and ring information
1117 * @rc: structure containing ring performance data
1119 * Stores a new ITR value based on packets and byte
1120 * counts during the last interrupt. The advantage of per interrupt
1121 * computation is faster updates and more accurate ITR for the current
1122 * traffic pattern. Constants in this function were computed
1123 * based on theoretical maximum wire speed and thresholds were set based
1124 * on testing data as well as attempting to minimize response time
1125 * while increasing bulk throughput.
1128 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1130 unsigned int avg_wire_size, packets, bytes, itr;
1131 unsigned long next_update = jiffies;
1132 bool container_is_rx;
1134 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1137 /* If itr_countdown is set it means we programmed an ITR within
1138 * the last 4 interrupt cycles. This has a side effect of us
1139 * potentially firing an early interrupt. In order to work around
1140 * this we need to throw out any data received for a few
1141 * interrupts following the update.
1143 if (q_vector->itr_countdown) {
1144 itr = rc->target_itr;
1148 container_is_rx = (&q_vector->rx == rc);
1149 /* For Rx we want to push the delay up and default to low latency.
1150 * for Tx we want to pull the delay down and default to high latency.
1152 itr = container_is_rx ?
1153 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1154 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1156 /* If we didn't update within up to 1 - 2 jiffies we can assume
1157 * that either packets are coming in so slow there hasn't been
1158 * any work, or that there is so much work that NAPI is dealing
1159 * with interrupt moderation and we don't need to do anything.
1161 if (time_after(next_update, rc->next_update))
1164 packets = rc->total_pkts;
1165 bytes = rc->total_bytes;
1167 if (container_is_rx) {
1168 /* If Rx there are 1 to 4 packets and bytes are less than
1169 * 9000 assume insufficient data to use bulk rate limiting
1170 * approach unless Tx is already in bulk rate limiting. We
1171 * are likely latency driven.
1173 if (packets && packets < 4 && bytes < 9000 &&
1174 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1175 itr = ICE_ITR_ADAPTIVE_LATENCY;
1176 goto adjust_by_size;
1178 } else if (packets < 4) {
1179 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1180 * bulk mode and we are receiving 4 or fewer packets just
1181 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1182 * that the Rx can relax.
1184 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1185 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1186 ICE_ITR_ADAPTIVE_MAX_USECS)
1188 } else if (packets > 32) {
1189 /* If we have processed over 32 packets in a single interrupt
1190 * for Tx assume we need to switch over to "bulk" mode.
1192 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1195 /* We have no packets to actually measure against. This means
1196 * either one of the other queues on this vector is active or
1197 * we are a Tx queue doing TSO with too high of an interrupt rate.
1199 * Between 4 and 56 we can assume that our current interrupt delay
1200 * is only slightly too low. As such we should increase it by a small
1204 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1205 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1206 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1207 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1212 if (packets <= 256) {
1213 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1214 itr &= ICE_ITR_MASK;
1216 /* Between 56 and 112 is our "goldilocks" zone where we are
1217 * working out "just right". Just report that our current
1218 * ITR is good for us.
1223 /* If packet count is 128 or greater we are likely looking
1224 * at a slight overrun of the delay we want. Try halving
1225 * our delay to see if that will cut the number of packets
1226 * in half per interrupt.
1229 itr &= ICE_ITR_MASK;
1230 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1231 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1236 /* The paths below assume we are dealing with a bulk ITR since
1237 * number of packets is greater than 256. We are just going to have
1238 * to compute a value and try to bring the count under control,
1239 * though for smaller packet sizes there isn't much we can do as
1240 * NAPI polling will likely be kicking in sooner rather than later.
1242 itr = ICE_ITR_ADAPTIVE_BULK;
1245 /* If packet counts are 256 or greater we can assume we have a gross
1246 * overestimation of what the rate should be. Instead of trying to fine
1247 * tune it just use the formula below to try and dial in an exact value
1248 * gives the current packet size of the frame.
1250 avg_wire_size = bytes / packets;
1252 /* The following is a crude approximation of:
1253 * wmem_default / (size + overhead) = desired_pkts_per_int
1254 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1255 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1257 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1258 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1261 * (170 * (size + 24)) / (size + 640) = ITR
1263 * We first do some math on the packet size and then finally bitshift
1264 * by 8 after rounding up. We also have to account for PCIe link speed
1265 * difference as ITR scales based on this.
1267 if (avg_wire_size <= 60) {
1268 /* Start at 250k ints/sec */
1269 avg_wire_size = 4096;
1270 } else if (avg_wire_size <= 380) {
1271 /* 250K ints/sec to 60K ints/sec */
1272 avg_wire_size *= 40;
1273 avg_wire_size += 1696;
1274 } else if (avg_wire_size <= 1084) {
1275 /* 60K ints/sec to 36K ints/sec */
1276 avg_wire_size *= 15;
1277 avg_wire_size += 11452;
1278 } else if (avg_wire_size <= 1980) {
1279 /* 36K ints/sec to 30K ints/sec */
1281 avg_wire_size += 22420;
1283 /* plateau at a limit of 30K ints/sec */
1284 avg_wire_size = 32256;
1287 /* If we are in low latency mode halve our delay which doubles the
1288 * rate to somewhere between 100K to 16K ints/sec
1290 if (itr & ICE_ITR_ADAPTIVE_LATENCY)
1291 avg_wire_size >>= 1;
1293 /* Resultant value is 256 times larger than it needs to be. This
1294 * gives us room to adjust the value as needed to either increase
1295 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1297 * Use addition as we have already recorded the new latency flag
1298 * for the ITR value.
1300 itr += DIV_ROUND_UP(avg_wire_size,
1301 ice_itr_divisor(q_vector->vsi->port_info)) *
1302 ICE_ITR_ADAPTIVE_MIN_INC;
1304 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1305 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1306 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1310 /* write back value */
1311 rc->target_itr = itr;
1313 /* next update should occur within next jiffy */
1314 rc->next_update = next_update + 1;
1316 rc->total_bytes = 0;
1321 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1322 * @itr_idx: interrupt throttling index
1323 * @itr: interrupt throttling value in usecs
1325 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1327 /* The itr value is reported in microseconds, and the register value is
1328 * recorded in 2 microsecond units. For this reason we only need to
1329 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1330 * granularity as a shift instead of division. The mask makes sure the
1331 * ITR value is never odd so we don't accidentally write into the field
1332 * prior to the ITR field.
1334 itr &= ICE_ITR_MASK;
1336 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1337 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1338 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1341 /* The act of updating the ITR will cause it to immediately trigger. In order
1342 * to prevent this from throwing off adaptive update statistics we defer the
1343 * update so that it can only happen so often. So after either Tx or Rx are
1344 * updated we make the adaptive scheme wait until either the ITR completely
1345 * expires via the next_update expiration or we have been through at least
1348 #define ITR_COUNTDOWN_START 3
1351 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1352 * @vsi: the VSI associated with the q_vector
1353 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1356 ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
1358 struct ice_ring_container *tx = &q_vector->tx;
1359 struct ice_ring_container *rx = &q_vector->rx;
1362 /* This will do nothing if dynamic updates are not enabled */
1363 ice_update_itr(q_vector, tx);
1364 ice_update_itr(q_vector, rx);
1366 /* This block of logic allows us to get away with only updating
1367 * one ITR value with each interrupt. The idea is to perform a
1368 * pseudo-lazy update with the following criteria.
1370 * 1. Rx is given higher priority than Tx if both are in same state
1371 * 2. If we must reduce an ITR that is given highest priority.
1372 * 3. We then give priority to increasing ITR based on amount.
1374 if (rx->target_itr < rx->current_itr) {
1375 /* Rx ITR needs to be reduced, this is highest priority */
1376 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1377 rx->current_itr = rx->target_itr;
1378 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1379 } else if ((tx->target_itr < tx->current_itr) ||
1380 ((rx->target_itr - rx->current_itr) <
1381 (tx->target_itr - tx->current_itr))) {
1382 /* Tx ITR needs to be reduced, this is second priority
1383 * Tx ITR needs to be increased more than Rx, fourth priority
1385 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1386 tx->current_itr = tx->target_itr;
1387 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1388 } else if (rx->current_itr != rx->target_itr) {
1389 /* Rx ITR needs to be increased, third priority */
1390 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1391 rx->current_itr = rx->target_itr;
1392 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1394 /* Still have to re-enable the interrupts */
1395 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1396 if (q_vector->itr_countdown)
1397 q_vector->itr_countdown--;
1400 if (!test_bit(__ICE_DOWN, vsi->state))
1401 wr32(&vsi->back->hw,
1402 GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx),
1407 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1408 * @napi: napi struct with our devices info in it
1409 * @budget: amount of work driver is allowed to do this pass, in packets
1411 * This function will clean all queues associated with a q_vector.
1413 * Returns the amount of work done
1415 int ice_napi_poll(struct napi_struct *napi, int budget)
1417 struct ice_q_vector *q_vector =
1418 container_of(napi, struct ice_q_vector, napi);
1419 struct ice_vsi *vsi = q_vector->vsi;
1420 struct ice_pf *pf = vsi->back;
1421 bool clean_complete = true;
1422 int budget_per_ring = 0;
1423 struct ice_ring *ring;
1426 /* Since the actual Tx work is minimal, we can give the Tx a larger
1427 * budget and be more aggressive about cleaning up the Tx descriptors.
1429 ice_for_each_ring(ring, q_vector->tx)
1430 if (!ice_clean_tx_irq(vsi, ring, budget))
1431 clean_complete = false;
1433 /* Handle case where we are called by netpoll with a budget of 0 */
1437 /* We attempt to distribute budget to each Rx queue fairly, but don't
1438 * allow the budget to go below 1 because that would exit polling early.
1440 if (q_vector->num_ring_rx)
1441 budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1443 ice_for_each_ring(ring, q_vector->rx) {
1446 cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1447 work_done += cleaned;
1448 /* if we clean as many as budgeted, we must not be done */
1449 if (cleaned >= budget_per_ring)
1450 clean_complete = false;
1453 /* If work not completed, return budget and polling will return */
1454 if (!clean_complete)
1457 /* Exit the polling mode, but don't re-enable interrupts if stack might
1458 * poll us due to busy-polling
1460 if (likely(napi_complete_done(napi, work_done)))
1461 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1462 ice_update_ena_itr(vsi, q_vector);
1464 return min_t(int, work_done, budget - 1);
1467 /* helper function for building cmd/type/offset */
1469 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1471 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1472 (td_cmd << ICE_TXD_QW1_CMD_S) |
1473 (td_offset << ICE_TXD_QW1_OFFSET_S) |
1474 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1475 (td_tag << ICE_TXD_QW1_L2TAG1_S));
1479 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1480 * @tx_ring: the ring to be checked
1481 * @size: the size buffer we want to assure is available
1483 * Returns -EBUSY if a stop is needed, else 0
1485 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1487 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1488 /* Memory barrier before checking head and tail */
1491 /* Check again in a case another CPU has just made room available. */
1492 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1495 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1496 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1497 ++tx_ring->tx_stats.restart_q;
1502 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1503 * @tx_ring: the ring to be checked
1504 * @size: the size buffer we want to assure is available
1506 * Returns 0 if stop is not needed
1508 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1510 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1513 return __ice_maybe_stop_tx(tx_ring, size);
1517 * ice_tx_map - Build the Tx descriptor
1518 * @tx_ring: ring to send buffer on
1519 * @first: first buffer info buffer to use
1520 * @off: pointer to struct that holds offload parameters
1522 * This function loops over the skb data pointed to by *first
1523 * and gets a physical address for each memory location and programs
1524 * it and the length into the transmit descriptor.
1527 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1528 struct ice_tx_offload_params *off)
1530 u64 td_offset, td_tag, td_cmd;
1531 u16 i = tx_ring->next_to_use;
1532 struct skb_frag_struct *frag;
1533 unsigned int data_len, size;
1534 struct ice_tx_desc *tx_desc;
1535 struct ice_tx_buf *tx_buf;
1536 struct sk_buff *skb;
1539 td_tag = off->td_l2tag1;
1540 td_cmd = off->td_cmd;
1541 td_offset = off->td_offset;
1544 data_len = skb->data_len;
1545 size = skb_headlen(skb);
1547 tx_desc = ICE_TX_DESC(tx_ring, i);
1549 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1550 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1551 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1552 ICE_TX_FLAGS_VLAN_S;
1555 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1559 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1560 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1562 if (dma_mapping_error(tx_ring->dev, dma))
1565 /* record length, and DMA address */
1566 dma_unmap_len_set(tx_buf, len, size);
1567 dma_unmap_addr_set(tx_buf, dma, dma);
1569 /* align size to end of page */
1570 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1571 tx_desc->buf_addr = cpu_to_le64(dma);
1573 /* account for data chunks larger than the hardware
1576 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1577 tx_desc->cmd_type_offset_bsz =
1578 build_ctob(td_cmd, td_offset, max_data, td_tag);
1583 if (i == tx_ring->count) {
1584 tx_desc = ICE_TX_DESC(tx_ring, 0);
1591 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1592 tx_desc->buf_addr = cpu_to_le64(dma);
1595 if (likely(!data_len))
1598 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1604 if (i == tx_ring->count) {
1605 tx_desc = ICE_TX_DESC(tx_ring, 0);
1609 size = skb_frag_size(frag);
1612 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1615 tx_buf = &tx_ring->tx_buf[i];
1618 /* record bytecount for BQL */
1619 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1621 /* record SW timestamp if HW timestamp is not available */
1622 skb_tx_timestamp(first->skb);
1625 if (i == tx_ring->count)
1628 /* write last descriptor with RS and EOP bits */
1629 td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1630 tx_desc->cmd_type_offset_bsz =
1631 build_ctob(td_cmd, td_offset, size, td_tag);
1633 /* Force memory writes to complete before letting h/w know there
1634 * are new descriptors to fetch.
1636 * We also use this memory barrier to make certain all of the
1637 * status bits have been updated before next_to_watch is written.
1641 /* set next_to_watch value indicating a packet is present */
1642 first->next_to_watch = tx_desc;
1644 tx_ring->next_to_use = i;
1646 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1648 /* notify HW of packet */
1649 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1650 writel(i, tx_ring->tail);
1652 /* we need this if more than one processor can write to our tail
1653 * at a time, it synchronizes IO on IA64/Altix systems
1661 /* clear dma mappings for failed tx_buf map */
1663 tx_buf = &tx_ring->tx_buf[i];
1664 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1665 if (tx_buf == first)
1672 tx_ring->next_to_use = i;
1676 * ice_tx_csum - Enable Tx checksum offloads
1677 * @first: pointer to the first descriptor
1678 * @off: pointer to struct that holds offload parameters
1680 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1683 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1685 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1686 struct sk_buff *skb = first->skb;
1696 __be16 frag_off, protocol;
1697 unsigned char *exthdr;
1698 u32 offset, cmd = 0;
1701 if (skb->ip_summed != CHECKSUM_PARTIAL)
1704 ip.hdr = skb_network_header(skb);
1705 l4.hdr = skb_transport_header(skb);
1707 /* compute outer L2 header size */
1708 l2_len = ip.hdr - skb->data;
1709 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1711 if (skb->encapsulation)
1714 /* Enable IP checksum offloads */
1715 protocol = vlan_get_protocol(skb);
1716 if (protocol == htons(ETH_P_IP)) {
1717 l4_proto = ip.v4->protocol;
1718 /* the stack computes the IP header already, the only time we
1719 * need the hardware to recompute it is in the case of TSO.
1721 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1722 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1724 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1726 } else if (protocol == htons(ETH_P_IPV6)) {
1727 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1728 exthdr = ip.hdr + sizeof(*ip.v6);
1729 l4_proto = ip.v6->nexthdr;
1730 if (l4.hdr != exthdr)
1731 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1737 /* compute inner L3 header size */
1738 l3_len = l4.hdr - ip.hdr;
1739 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1741 /* Enable L4 checksum offloads */
1744 /* enable checksum offloads */
1745 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1746 l4_len = l4.tcp->doff;
1747 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1750 /* enable UDP checksum offload */
1751 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1752 l4_len = (sizeof(struct udphdr) >> 2);
1753 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1756 /* enable SCTP checksum offload */
1757 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1758 l4_len = sizeof(struct sctphdr) >> 2;
1759 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1763 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1765 skb_checksum_help(skb);
1770 off->td_offset |= offset;
1775 * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1776 * @tx_ring: ring to send buffer on
1777 * @first: pointer to struct ice_tx_buf
1779 * Checks the skb and set up correspondingly several generic transmit flags
1780 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1782 * Returns error code indicate the frame should be dropped upon error and the
1783 * otherwise returns 0 to indicate the flags has been set properly.
1786 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1788 struct sk_buff *skb = first->skb;
1789 __be16 protocol = skb->protocol;
1791 if (protocol == htons(ETH_P_8021Q) &&
1792 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1793 /* when HW VLAN acceleration is turned off by the user the
1794 * stack sets the protocol to 8021q so that the driver
1795 * can take any steps required to support the SW only
1796 * VLAN handling. In our case the driver doesn't need
1797 * to take any further steps so just set the protocol
1798 * to the encapsulated ethertype.
1800 skb->protocol = vlan_get_protocol(skb);
1804 /* if we have a HW VLAN tag being added, default to the HW one */
1805 if (skb_vlan_tag_present(skb)) {
1806 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1807 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1808 } else if (protocol == htons(ETH_P_8021Q)) {
1809 struct vlan_hdr *vhdr, _vhdr;
1811 /* for SW VLAN, check the next protocol and store the tag */
1812 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1818 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1819 ICE_TX_FLAGS_VLAN_S;
1820 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1828 * ice_tso - computes mss and TSO length to prepare for TSO
1829 * @first: pointer to struct ice_tx_buf
1830 * @off: pointer to struct that holds offload parameters
1832 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1835 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1837 struct sk_buff *skb = first->skb;
1847 u64 cd_mss, cd_tso_len;
1848 u32 paylen, l4_start;
1851 if (skb->ip_summed != CHECKSUM_PARTIAL)
1854 if (!skb_is_gso(skb))
1857 err = skb_cow_head(skb, 0);
1861 ip.hdr = skb_network_header(skb);
1862 l4.hdr = skb_transport_header(skb);
1864 /* initialize outer IP header fields */
1865 if (ip.v4->version == 4) {
1869 ip.v6->payload_len = 0;
1872 /* determine offset of transport header */
1873 l4_start = l4.hdr - skb->data;
1875 /* remove payload length from checksum */
1876 paylen = skb->len - l4_start;
1877 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1879 /* compute length of segmentation header */
1880 off->header_len = (l4.tcp->doff * 4) + l4_start;
1882 /* update gso_segs and bytecount */
1883 first->gso_segs = skb_shinfo(skb)->gso_segs;
1884 first->bytecount += (first->gso_segs - 1) * off->header_len;
1886 cd_tso_len = skb->len - off->header_len;
1887 cd_mss = skb_shinfo(skb)->gso_size;
1889 /* record cdesc_qw1 with TSO parameters */
1890 off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
1891 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1892 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1893 (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
1894 first->tx_flags |= ICE_TX_FLAGS_TSO;
1899 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1900 * @size: transmit request size in bytes
1902 * Due to hardware alignment restrictions (4K alignment), we need to
1903 * assume that we can have no more than 12K of data per descriptor, even
1904 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1905 * Thus, we need to divide by 12K. But division is slow! Instead,
1906 * we decompose the operation into shifts and one relatively cheap
1907 * multiply operation.
1909 * To divide by 12K, we first divide by 4K, then divide by 3:
1910 * To divide by 4K, shift right by 12 bits
1911 * To divide by 3, multiply by 85, then divide by 256
1912 * (Divide by 256 is done by shifting right by 8 bits)
1913 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1914 * 3, we'll underestimate near each multiple of 12K. This is actually more
1915 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1916 * segment. For our purposes this is accurate out to 1M which is orders of
1917 * magnitude greater than our largest possible GSO size.
1919 * This would then be implemented as:
1920 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1922 * Since multiplication and division are commutative, we can reorder
1924 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1926 static unsigned int ice_txd_use_count(unsigned int size)
1928 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1932 * ice_xmit_desc_count - calculate number of Tx descriptors needed
1935 * Returns number of data descriptors needed for this skb.
1937 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1939 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1940 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1941 unsigned int count = 0, size = skb_headlen(skb);
1944 count += ice_txd_use_count(size);
1949 size = skb_frag_size(frag++);
1956 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
1959 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
1960 * and so we need to figure out the cases where we need to linearize the skb.
1962 * For TSO we need to count the TSO header and segment payload separately.
1963 * As such we need to check cases where we have 7 fragments or more as we
1964 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1965 * the segment payload in the first descriptor, and another 7 for the
1968 static bool __ice_chk_linearize(struct sk_buff *skb)
1970 const struct skb_frag_struct *frag, *stale;
1973 /* no need to check if number of frags is less than 7 */
1974 nr_frags = skb_shinfo(skb)->nr_frags;
1975 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
1978 /* We need to walk through the list and validate that each group
1979 * of 6 fragments totals at least gso_size.
1981 nr_frags -= ICE_MAX_BUF_TXD - 2;
1982 frag = &skb_shinfo(skb)->frags[0];
1984 /* Initialize size to the negative value of gso_size minus 1. We
1985 * use this as the worst case scenerio in which the frag ahead
1986 * of us only provides one byte which is why we are limited to 6
1987 * descriptors for a single transmit as the header and previous
1988 * fragment are already consuming 2 descriptors.
1990 sum = 1 - skb_shinfo(skb)->gso_size;
1992 /* Add size of frags 0 through 4 to create our initial sum */
1993 sum += skb_frag_size(frag++);
1994 sum += skb_frag_size(frag++);
1995 sum += skb_frag_size(frag++);
1996 sum += skb_frag_size(frag++);
1997 sum += skb_frag_size(frag++);
1999 /* Walk through fragments adding latest fragment, testing it, and
2000 * then removing stale fragments from the sum.
2002 stale = &skb_shinfo(skb)->frags[0];
2004 sum += skb_frag_size(frag++);
2006 /* if sum is negative we failed to make sufficient progress */
2013 sum -= skb_frag_size(stale++);
2020 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2022 * @count: number of buffers used
2024 * Note: Our HW can't scatter-gather more than 8 fragments to build
2025 * a packet on the wire and so we need to figure out the cases where we
2026 * need to linearize the skb.
2028 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2030 /* Both TSO and single send will work if count is less than 8 */
2031 if (likely(count < ICE_MAX_BUF_TXD))
2034 if (skb_is_gso(skb))
2035 return __ice_chk_linearize(skb);
2037 /* we can support up to 8 data buffers for a single send */
2038 return count != ICE_MAX_BUF_TXD;
2042 * ice_xmit_frame_ring - Sends buffer on Tx ring
2044 * @tx_ring: ring to send buffer on
2046 * Returns NETDEV_TX_OK if sent, else an error code
2049 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2051 struct ice_tx_offload_params offload = { 0 };
2052 struct ice_tx_buf *first;
2056 count = ice_xmit_desc_count(skb);
2057 if (ice_chk_linearize(skb, count)) {
2058 if (__skb_linearize(skb))
2060 count = ice_txd_use_count(skb->len);
2061 tx_ring->tx_stats.tx_linearize++;
2064 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2065 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2066 * + 4 desc gap to avoid the cache line where head is,
2067 * + 1 desc for context descriptor,
2068 * otherwise try next time
2070 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2071 ICE_DESCS_FOR_CTX_DESC)) {
2072 tx_ring->tx_stats.tx_busy++;
2073 return NETDEV_TX_BUSY;
2076 offload.tx_ring = tx_ring;
2078 /* record the location of the first descriptor for this packet */
2079 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2081 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2082 first->gso_segs = 1;
2083 first->tx_flags = 0;
2085 /* prepare the VLAN tagging flags for Tx */
2086 if (ice_tx_prepare_vlan_flags(tx_ring, first))
2089 /* set up TSO offload */
2090 tso = ice_tso(first, &offload);
2094 /* always set up Tx checksum offload */
2095 csum = ice_tx_csum(first, &offload);
2099 if (tso || offload.cd_tunnel_params) {
2100 struct ice_tx_ctx_desc *cdesc;
2101 int i = tx_ring->next_to_use;
2103 /* grab the next descriptor */
2104 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2106 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2108 /* setup context descriptor */
2109 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2110 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2111 cdesc->rsvd = cpu_to_le16(0);
2112 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2115 ice_tx_map(tx_ring, first, &offload);
2116 return NETDEV_TX_OK;
2119 dev_kfree_skb_any(skb);
2120 return NETDEV_TX_OK;
2124 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2126 * @netdev: network interface device structure
2128 * Returns NETDEV_TX_OK if sent, else an error code
2130 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2132 struct ice_netdev_priv *np = netdev_priv(netdev);
2133 struct ice_vsi *vsi = np->vsi;
2134 struct ice_ring *tx_ring;
2136 tx_ring = vsi->tx_rings[skb->queue_mapping];
2138 /* hardware can't handle really short frames, hardware padding works
2141 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2142 return NETDEV_TX_OK;
2144 return ice_xmit_frame_ring(skb, tx_ring);