1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/prefetch.h>
7 #include "iavf_trace.h"
8 #include "iavf_prototype.h"
10 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
13 return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
14 ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
15 ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
16 ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
17 ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
20 #define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
23 * iavf_unmap_and_free_tx_resource - Release a Tx buffer
24 * @ring: the ring that owns the buffer
25 * @tx_buffer: the buffer to free
27 static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
28 struct iavf_tx_buffer *tx_buffer)
31 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
32 kfree(tx_buffer->raw_buf);
34 dev_kfree_skb_any(tx_buffer->skb);
35 if (dma_unmap_len(tx_buffer, len))
36 dma_unmap_single(ring->dev,
37 dma_unmap_addr(tx_buffer, dma),
38 dma_unmap_len(tx_buffer, len),
40 } else if (dma_unmap_len(tx_buffer, len)) {
41 dma_unmap_page(ring->dev,
42 dma_unmap_addr(tx_buffer, dma),
43 dma_unmap_len(tx_buffer, len),
47 tx_buffer->next_to_watch = NULL;
48 tx_buffer->skb = NULL;
49 dma_unmap_len_set(tx_buffer, len, 0);
50 /* tx_buffer must be completely set up in the transmit path */
54 * iavf_clean_tx_ring - Free any empty Tx buffers
55 * @tx_ring: ring to be cleaned
57 void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
59 unsigned long bi_size;
62 /* ring already cleared, nothing to do */
66 /* Free all the Tx ring sk_buffs */
67 for (i = 0; i < tx_ring->count; i++)
68 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
70 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
71 memset(tx_ring->tx_bi, 0, bi_size);
73 /* Zero out the descriptor ring */
74 memset(tx_ring->desc, 0, tx_ring->size);
76 tx_ring->next_to_use = 0;
77 tx_ring->next_to_clean = 0;
82 /* cleanup Tx queue statistics */
83 netdev_tx_reset_queue(txring_txq(tx_ring));
87 * iavf_free_tx_resources - Free Tx resources per queue
88 * @tx_ring: Tx descriptor ring for a specific queue
90 * Free all transmit software resources
92 void iavf_free_tx_resources(struct iavf_ring *tx_ring)
94 iavf_clean_tx_ring(tx_ring);
95 kfree(tx_ring->tx_bi);
96 tx_ring->tx_bi = NULL;
99 dma_free_coherent(tx_ring->dev, tx_ring->size,
100 tx_ring->desc, tx_ring->dma);
101 tx_ring->desc = NULL;
106 * iavf_get_tx_pending - how many Tx descriptors not processed
107 * @ring: the ring of descriptors
108 * @in_sw: is tx_pending being checked in SW or HW
110 * Since there is no access to the ring head register
111 * in XL710, we need to use our local copies
113 u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
117 head = ring->next_to_clean;
118 tail = readl(ring->tail);
121 return (head < tail) ?
122 tail - head : (tail + ring->count - head);
128 * iavf_detect_recover_hung - Function to detect and recover hung_queues
129 * @vsi: pointer to vsi struct with tx queues
131 * VSI has netdev and netdev has TX queues. This function is to check each of
132 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
134 void iavf_detect_recover_hung(struct iavf_vsi *vsi)
136 struct iavf_ring *tx_ring = NULL;
137 struct net_device *netdev;
144 if (test_bit(__IAVF_VSI_DOWN, vsi->state))
147 netdev = vsi->netdev;
151 if (!netif_carrier_ok(netdev))
154 for (i = 0; i < vsi->back->num_active_queues; i++) {
155 tx_ring = &vsi->back->tx_rings[i];
156 if (tx_ring && tx_ring->desc) {
157 /* If packet counter has not changed the queue is
158 * likely stalled, so force an interrupt for this
161 * prev_pkt_ctr would be negative if there was no
164 packets = tx_ring->stats.packets & INT_MAX;
165 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
166 iavf_force_wb(vsi, tx_ring->q_vector);
170 /* Memory barrier between read of packet count and call
171 * to iavf_get_tx_pending()
174 tx_ring->tx_stats.prev_pkt_ctr =
175 iavf_get_tx_pending(tx_ring, true) ? packets : -1;
183 * iavf_clean_tx_irq - Reclaim resources after transmit completes
184 * @vsi: the VSI we care about
185 * @tx_ring: Tx ring to clean
186 * @napi_budget: Used to determine if we are in netpoll
188 * Returns true if there's any budget left (e.g. the clean is finished)
190 static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
191 struct iavf_ring *tx_ring, int napi_budget)
193 int i = tx_ring->next_to_clean;
194 struct iavf_tx_buffer *tx_buf;
195 struct iavf_tx_desc *tx_desc;
196 unsigned int total_bytes = 0, total_packets = 0;
197 unsigned int budget = vsi->work_limit;
199 tx_buf = &tx_ring->tx_bi[i];
200 tx_desc = IAVF_TX_DESC(tx_ring, i);
204 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
206 /* if next_to_watch is not set then there is no work pending */
210 /* prevent any other reads prior to eop_desc */
213 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
214 /* if the descriptor isn't done, no work yet to do */
215 if (!(eop_desc->cmd_type_offset_bsz &
216 cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
219 /* clear next_to_watch to prevent false hangs */
220 tx_buf->next_to_watch = NULL;
222 /* update the statistics for this packet */
223 total_bytes += tx_buf->bytecount;
224 total_packets += tx_buf->gso_segs;
227 napi_consume_skb(tx_buf->skb, napi_budget);
229 /* unmap skb header data */
230 dma_unmap_single(tx_ring->dev,
231 dma_unmap_addr(tx_buf, dma),
232 dma_unmap_len(tx_buf, len),
235 /* clear tx_buffer data */
237 dma_unmap_len_set(tx_buf, len, 0);
239 /* unmap remaining buffers */
240 while (tx_desc != eop_desc) {
241 iavf_trace(clean_tx_irq_unmap,
242 tx_ring, tx_desc, tx_buf);
249 tx_buf = tx_ring->tx_bi;
250 tx_desc = IAVF_TX_DESC(tx_ring, 0);
253 /* unmap any remaining paged data */
254 if (dma_unmap_len(tx_buf, len)) {
255 dma_unmap_page(tx_ring->dev,
256 dma_unmap_addr(tx_buf, dma),
257 dma_unmap_len(tx_buf, len),
259 dma_unmap_len_set(tx_buf, len, 0);
263 /* move us one more past the eop_desc for start of next pkt */
269 tx_buf = tx_ring->tx_bi;
270 tx_desc = IAVF_TX_DESC(tx_ring, 0);
275 /* update budget accounting */
277 } while (likely(budget));
280 tx_ring->next_to_clean = i;
281 u64_stats_update_begin(&tx_ring->syncp);
282 tx_ring->stats.bytes += total_bytes;
283 tx_ring->stats.packets += total_packets;
284 u64_stats_update_end(&tx_ring->syncp);
285 tx_ring->q_vector->tx.total_bytes += total_bytes;
286 tx_ring->q_vector->tx.total_packets += total_packets;
288 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
289 /* check to see if there are < 4 descriptors
290 * waiting to be written back, then kick the hardware to force
291 * them to be written back in case we stay in NAPI.
292 * In this mode on X722 we do not enable Interrupt.
294 unsigned int j = iavf_get_tx_pending(tx_ring, false);
297 ((j / WB_STRIDE) == 0) && (j > 0) &&
298 !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
299 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
300 tx_ring->arm_wb = true;
303 /* notify netdev of completed buffers */
304 netdev_tx_completed_queue(txring_txq(tx_ring),
305 total_packets, total_bytes);
307 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
309 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310 /* Make sure that anybody stopping the queue after this
311 * sees the new next_to_clean.
314 if (__netif_subqueue_stopped(tx_ring->netdev,
315 tx_ring->queue_index) &&
316 !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
318 tx_ring->queue_index);
319 ++tx_ring->tx_stats.restart_queue;
327 * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
328 * @vsi: the VSI we care about
329 * @q_vector: the vector on which to enable writeback
332 static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
333 struct iavf_q_vector *q_vector)
335 u16 flags = q_vector->tx.ring[0].flags;
338 if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
341 if (q_vector->arm_wb_state)
344 val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
345 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
348 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
349 q_vector->arm_wb_state = true;
353 * iavf_force_wb - Issue SW Interrupt so HW does a wb
354 * @vsi: the VSI we care about
355 * @q_vector: the vector on which to force writeback
358 void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
360 u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
361 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
362 IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
363 IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
364 /* allow 00 to be written to the index */;
367 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
371 static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
372 struct iavf_ring_container *rc)
374 return &q_vector->rx == rc;
377 static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
379 unsigned int divisor;
381 switch (q_vector->adapter->link_speed) {
382 case IAVF_LINK_SPEED_40GB:
383 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
385 case IAVF_LINK_SPEED_25GB:
386 case IAVF_LINK_SPEED_20GB:
387 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
390 case IAVF_LINK_SPEED_10GB:
391 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
393 case IAVF_LINK_SPEED_1GB:
394 case IAVF_LINK_SPEED_100MB:
395 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
403 * iavf_update_itr - update the dynamic ITR value based on statistics
404 * @q_vector: structure containing interrupt and ring information
405 * @rc: structure containing ring performance data
407 * Stores a new ITR value based on packets and byte
408 * counts during the last interrupt. The advantage of per interrupt
409 * computation is faster updates and more accurate ITR for the current
410 * traffic pattern. Constants in this function were computed
411 * based on theoretical maximum wire speed and thresholds were set based
412 * on testing data as well as attempting to minimize response time
413 * while increasing bulk throughput.
415 static void iavf_update_itr(struct iavf_q_vector *q_vector,
416 struct iavf_ring_container *rc)
418 unsigned int avg_wire_size, packets, bytes, itr;
419 unsigned long next_update = jiffies;
421 /* If we don't have any rings just leave ourselves set for maximum
422 * possible latency so we take ourselves out of the equation.
424 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
427 /* For Rx we want to push the delay up and default to low latency.
428 * for Tx we want to pull the delay down and default to high latency.
430 itr = iavf_container_is_rx(q_vector, rc) ?
431 IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
432 IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
434 /* If we didn't update within up to 1 - 2 jiffies we can assume
435 * that either packets are coming in so slow there hasn't been
436 * any work, or that there is so much work that NAPI is dealing
437 * with interrupt moderation and we don't need to do anything.
439 if (time_after(next_update, rc->next_update))
442 /* If itr_countdown is set it means we programmed an ITR within
443 * the last 4 interrupt cycles. This has a side effect of us
444 * potentially firing an early interrupt. In order to work around
445 * this we need to throw out any data received for a few
446 * interrupts following the update.
448 if (q_vector->itr_countdown) {
449 itr = rc->target_itr;
453 packets = rc->total_packets;
454 bytes = rc->total_bytes;
456 if (iavf_container_is_rx(q_vector, rc)) {
457 /* If Rx there are 1 to 4 packets and bytes are less than
458 * 9000 assume insufficient data to use bulk rate limiting
459 * approach unless Tx is already in bulk rate limiting. We
460 * are likely latency driven.
462 if (packets && packets < 4 && bytes < 9000 &&
463 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
464 itr = IAVF_ITR_ADAPTIVE_LATENCY;
467 } else if (packets < 4) {
468 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
469 * bulk mode and we are receiving 4 or fewer packets just
470 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
471 * that the Rx can relax.
473 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
474 (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
475 IAVF_ITR_ADAPTIVE_MAX_USECS)
477 } else if (packets > 32) {
478 /* If we have processed over 32 packets in a single interrupt
479 * for Tx assume we need to switch over to "bulk" mode.
481 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
484 /* We have no packets to actually measure against. This means
485 * either one of the other queues on this vector is active or
486 * we are a Tx queue doing TSO with too high of an interrupt rate.
488 * Between 4 and 56 we can assume that our current interrupt delay
489 * is only slightly too low. As such we should increase it by a small
493 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
494 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
495 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
496 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
501 if (packets <= 256) {
502 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
503 itr &= IAVF_ITR_MASK;
505 /* Between 56 and 112 is our "goldilocks" zone where we are
506 * working out "just right". Just report that our current
507 * ITR is good for us.
512 /* If packet count is 128 or greater we are likely looking
513 * at a slight overrun of the delay we want. Try halving
514 * our delay to see if that will cut the number of packets
515 * in half per interrupt.
518 itr &= IAVF_ITR_MASK;
519 if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
520 itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
525 /* The paths below assume we are dealing with a bulk ITR since
526 * number of packets is greater than 256. We are just going to have
527 * to compute a value and try to bring the count under control,
528 * though for smaller packet sizes there isn't much we can do as
529 * NAPI polling will likely be kicking in sooner rather than later.
531 itr = IAVF_ITR_ADAPTIVE_BULK;
534 /* If packet counts are 256 or greater we can assume we have a gross
535 * overestimation of what the rate should be. Instead of trying to fine
536 * tune it just use the formula below to try and dial in an exact value
537 * give the current packet size of the frame.
539 avg_wire_size = bytes / packets;
541 /* The following is a crude approximation of:
542 * wmem_default / (size + overhead) = desired_pkts_per_int
543 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
544 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
546 * Assuming wmem_default is 212992 and overhead is 640 bytes per
547 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
550 * (170 * (size + 24)) / (size + 640) = ITR
552 * We first do some math on the packet size and then finally bitshift
553 * by 8 after rounding up. We also have to account for PCIe link speed
554 * difference as ITR scales based on this.
556 if (avg_wire_size <= 60) {
557 /* Start at 250k ints/sec */
558 avg_wire_size = 4096;
559 } else if (avg_wire_size <= 380) {
560 /* 250K ints/sec to 60K ints/sec */
562 avg_wire_size += 1696;
563 } else if (avg_wire_size <= 1084) {
564 /* 60K ints/sec to 36K ints/sec */
566 avg_wire_size += 11452;
567 } else if (avg_wire_size <= 1980) {
568 /* 36K ints/sec to 30K ints/sec */
570 avg_wire_size += 22420;
572 /* plateau at a limit of 30K ints/sec */
573 avg_wire_size = 32256;
576 /* If we are in low latency mode halve our delay which doubles the
577 * rate to somewhere between 100K to 16K ints/sec
579 if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
582 /* Resultant value is 256 times larger than it needs to be. This
583 * gives us room to adjust the value as needed to either increase
584 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
586 * Use addition as we have already recorded the new latency flag
589 itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
590 IAVF_ITR_ADAPTIVE_MIN_INC;
592 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
593 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
594 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
598 /* write back value */
599 rc->target_itr = itr;
601 /* next update should occur within next jiffy */
602 rc->next_update = next_update + 1;
605 rc->total_packets = 0;
609 * iavf_setup_tx_descriptors - Allocate the Tx descriptors
610 * @tx_ring: the tx ring to set up
612 * Return 0 on success, negative on error
614 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
616 struct device *dev = tx_ring->dev;
622 /* warn if we are about to overwrite the pointer */
623 WARN_ON(tx_ring->tx_bi);
624 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
625 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
629 /* round up to nearest 4K */
630 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
631 tx_ring->size = ALIGN(tx_ring->size, 4096);
632 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
633 &tx_ring->dma, GFP_KERNEL);
634 if (!tx_ring->desc) {
635 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
640 tx_ring->next_to_use = 0;
641 tx_ring->next_to_clean = 0;
642 tx_ring->tx_stats.prev_pkt_ctr = -1;
646 kfree(tx_ring->tx_bi);
647 tx_ring->tx_bi = NULL;
652 * iavf_clean_rx_ring - Free Rx buffers
653 * @rx_ring: ring to be cleaned
655 void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
657 unsigned long bi_size;
660 /* ring already cleared, nothing to do */
665 dev_kfree_skb(rx_ring->skb);
669 /* Free all the Rx ring sk_buffs */
670 for (i = 0; i < rx_ring->count; i++) {
671 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
676 /* Invalidate cache lines that may have been written to by
677 * device so that we avoid corrupting memory.
679 dma_sync_single_range_for_cpu(rx_ring->dev,
685 /* free resources associated with mapping */
686 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
687 iavf_rx_pg_size(rx_ring),
691 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
694 rx_bi->page_offset = 0;
697 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
698 memset(rx_ring->rx_bi, 0, bi_size);
700 /* Zero out the descriptor ring */
701 memset(rx_ring->desc, 0, rx_ring->size);
703 rx_ring->next_to_alloc = 0;
704 rx_ring->next_to_clean = 0;
705 rx_ring->next_to_use = 0;
709 * iavf_free_rx_resources - Free Rx resources
710 * @rx_ring: ring to clean the resources from
712 * Free all receive software resources
714 void iavf_free_rx_resources(struct iavf_ring *rx_ring)
716 iavf_clean_rx_ring(rx_ring);
717 kfree(rx_ring->rx_bi);
718 rx_ring->rx_bi = NULL;
721 dma_free_coherent(rx_ring->dev, rx_ring->size,
722 rx_ring->desc, rx_ring->dma);
723 rx_ring->desc = NULL;
728 * iavf_setup_rx_descriptors - Allocate Rx descriptors
729 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
731 * Returns 0 on success, negative on failure
733 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
735 struct device *dev = rx_ring->dev;
738 /* warn if we are about to overwrite the pointer */
739 WARN_ON(rx_ring->rx_bi);
740 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
741 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
745 u64_stats_init(&rx_ring->syncp);
747 /* Round up to nearest 4K */
748 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
749 rx_ring->size = ALIGN(rx_ring->size, 4096);
750 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
751 &rx_ring->dma, GFP_KERNEL);
753 if (!rx_ring->desc) {
754 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
759 rx_ring->next_to_alloc = 0;
760 rx_ring->next_to_clean = 0;
761 rx_ring->next_to_use = 0;
765 kfree(rx_ring->rx_bi);
766 rx_ring->rx_bi = NULL;
771 * iavf_release_rx_desc - Store the new tail and head values
772 * @rx_ring: ring to bump
773 * @val: new head index
775 static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
777 rx_ring->next_to_use = val;
779 /* update next to alloc since we have filled the ring */
780 rx_ring->next_to_alloc = val;
782 /* Force memory writes to complete before letting h/w
783 * know there are new descriptors to fetch. (Only
784 * applicable for weak-ordered memory model archs,
788 writel(val, rx_ring->tail);
792 * iavf_rx_offset - Return expected offset into page to access data
793 * @rx_ring: Ring we are requesting offset of
795 * Returns the offset value for ring into the data buffer.
797 static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
799 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
803 * iavf_alloc_mapped_page - recycle or make a new page
804 * @rx_ring: ring to use
805 * @bi: rx_buffer struct to modify
807 * Returns true if the page was successfully allocated or
810 static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
811 struct iavf_rx_buffer *bi)
813 struct page *page = bi->page;
816 /* since we are recycling buffers we should seldom need to alloc */
818 rx_ring->rx_stats.page_reuse_count++;
822 /* alloc new page for storage */
823 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
824 if (unlikely(!page)) {
825 rx_ring->rx_stats.alloc_page_failed++;
829 /* map page for use */
830 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
831 iavf_rx_pg_size(rx_ring),
835 /* if mapping failed free memory back to system since
836 * there isn't much point in holding memory we can't use
838 if (dma_mapping_error(rx_ring->dev, dma)) {
839 __free_pages(page, iavf_rx_pg_order(rx_ring));
840 rx_ring->rx_stats.alloc_page_failed++;
846 bi->page_offset = iavf_rx_offset(rx_ring);
848 /* initialize pagecnt_bias to 1 representing we fully own page */
849 bi->pagecnt_bias = 1;
855 * iavf_receive_skb - Send a completed packet up the stack
856 * @rx_ring: rx ring in play
857 * @skb: packet to send up
858 * @vlan_tag: vlan tag for packet
860 static void iavf_receive_skb(struct iavf_ring *rx_ring,
861 struct sk_buff *skb, u16 vlan_tag)
863 struct iavf_q_vector *q_vector = rx_ring->q_vector;
865 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
866 (vlan_tag & VLAN_VID_MASK))
867 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
869 napi_gro_receive(&q_vector->napi, skb);
873 * iavf_alloc_rx_buffers - Replace used receive buffers
874 * @rx_ring: ring to place buffers on
875 * @cleaned_count: number of buffers to replace
877 * Returns false if all allocations were successful, true if any fail
879 bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
881 u16 ntu = rx_ring->next_to_use;
882 union iavf_rx_desc *rx_desc;
883 struct iavf_rx_buffer *bi;
885 /* do nothing if no valid netdev defined */
886 if (!rx_ring->netdev || !cleaned_count)
889 rx_desc = IAVF_RX_DESC(rx_ring, ntu);
890 bi = &rx_ring->rx_bi[ntu];
893 if (!iavf_alloc_mapped_page(rx_ring, bi))
896 /* sync the buffer for use by the device */
897 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
902 /* Refresh the desc even if buffer_addrs didn't change
903 * because each write-back erases this info.
905 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
910 if (unlikely(ntu == rx_ring->count)) {
911 rx_desc = IAVF_RX_DESC(rx_ring, 0);
916 /* clear the status bits for the next_to_use descriptor */
917 rx_desc->wb.qword1.status_error_len = 0;
920 } while (cleaned_count);
922 if (rx_ring->next_to_use != ntu)
923 iavf_release_rx_desc(rx_ring, ntu);
928 if (rx_ring->next_to_use != ntu)
929 iavf_release_rx_desc(rx_ring, ntu);
931 /* make sure to come back via polling to try again after
938 * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
939 * @vsi: the VSI we care about
940 * @skb: skb currently being received and modified
941 * @rx_desc: the receive descriptor
943 static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
945 union iavf_rx_desc *rx_desc)
947 struct iavf_rx_ptype_decoded decoded;
948 u32 rx_error, rx_status;
953 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
954 ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
955 rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
956 IAVF_RXD_QW1_ERROR_SHIFT;
957 rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
958 IAVF_RXD_QW1_STATUS_SHIFT;
959 decoded = decode_rx_desc_ptype(ptype);
961 skb->ip_summed = CHECKSUM_NONE;
963 skb_checksum_none_assert(skb);
965 /* Rx csum enabled and ip headers found? */
966 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
969 /* did the hardware decode the packet and checksum? */
970 if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
973 /* both known and outer_ip must be set for the below code to work */
974 if (!(decoded.known && decoded.outer_ip))
977 ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
978 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
979 ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
980 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
983 (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
984 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
987 /* likely incorrect csum if alternate IP extension headers found */
989 rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
990 /* don't increment checksum err here, non-fatal err */
993 /* there was some L4 error, count error and punt packet to the stack */
994 if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
997 /* handle packets that were not able to be checksummed due
998 * to arrival speed, in this case the stack can compute
1001 if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
1004 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1005 switch (decoded.inner_prot) {
1006 case IAVF_RX_PTYPE_INNER_PROT_TCP:
1007 case IAVF_RX_PTYPE_INNER_PROT_UDP:
1008 case IAVF_RX_PTYPE_INNER_PROT_SCTP:
1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
1018 vsi->back->hw_csum_rx_error++;
1022 * iavf_ptype_to_htype - get a hash type
1023 * @ptype: the ptype value from the descriptor
1025 * Returns a hash type to be used by skb_set_hash
1027 static inline int iavf_ptype_to_htype(u8 ptype)
1029 struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1032 return PKT_HASH_TYPE_NONE;
1034 if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1035 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1036 return PKT_HASH_TYPE_L4;
1037 else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1038 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1039 return PKT_HASH_TYPE_L3;
1041 return PKT_HASH_TYPE_L2;
1045 * iavf_rx_hash - set the hash value in the skb
1046 * @ring: descriptor ring
1047 * @rx_desc: specific descriptor
1048 * @skb: skb currently being received and modified
1049 * @rx_ptype: Rx packet type
1051 static inline void iavf_rx_hash(struct iavf_ring *ring,
1052 union iavf_rx_desc *rx_desc,
1053 struct sk_buff *skb,
1057 const __le64 rss_mask =
1058 cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1059 IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
1061 if (ring->netdev->features & NETIF_F_RXHASH)
1064 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1065 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1066 skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
1071 * iavf_process_skb_fields - Populate skb header fields from Rx descriptor
1072 * @rx_ring: rx descriptor ring packet is being transacted on
1073 * @rx_desc: pointer to the EOP Rx descriptor
1074 * @skb: pointer to current skb being populated
1075 * @rx_ptype: the packet type decoded by hardware
1077 * This function checks the ring, descriptor, and packet information in
1078 * order to populate the hash, checksum, VLAN, protocol, and
1079 * other fields within the skb.
1082 void iavf_process_skb_fields(struct iavf_ring *rx_ring,
1083 union iavf_rx_desc *rx_desc, struct sk_buff *skb,
1086 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1088 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
1090 skb_record_rx_queue(skb, rx_ring->queue_index);
1092 /* modifies the skb - consumes the enet header */
1093 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1097 * iavf_cleanup_headers - Correct empty headers
1098 * @rx_ring: rx descriptor ring packet is being transacted on
1099 * @skb: pointer to current skb being fixed
1101 * Also address the case where we are pulling data in on pages only
1102 * and as such no data is present in the skb header.
1104 * In addition if skb is not at least 60 bytes we need to pad it so that
1105 * it is large enough to qualify as a valid Ethernet frame.
1107 * Returns true if an error was encountered and skb was freed.
1109 static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1111 /* if eth_skb_pad returns an error the skb was freed */
1112 if (eth_skb_pad(skb))
1119 * iavf_reuse_rx_page - page flip buffer and store it back on the ring
1120 * @rx_ring: rx descriptor ring to store buffers on
1121 * @old_buff: donor buffer to have page reused
1123 * Synchronizes page for reuse by the adapter
1125 static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
1126 struct iavf_rx_buffer *old_buff)
1128 struct iavf_rx_buffer *new_buff;
1129 u16 nta = rx_ring->next_to_alloc;
1131 new_buff = &rx_ring->rx_bi[nta];
1133 /* update, and store next to alloc */
1135 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1137 /* transfer page from old buffer to new buffer */
1138 new_buff->dma = old_buff->dma;
1139 new_buff->page = old_buff->page;
1140 new_buff->page_offset = old_buff->page_offset;
1141 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1145 * iavf_page_is_reusable - check if any reuse is possible
1146 * @page: page struct to check
1148 * A page is not reusable if it was allocated under low memory
1149 * conditions, or it's not in the same NUMA node as this CPU.
1151 static inline bool iavf_page_is_reusable(struct page *page)
1153 return (page_to_nid(page) == numa_mem_id()) &&
1154 !page_is_pfmemalloc(page);
1158 * iavf_can_reuse_rx_page - Determine if this page can be reused by
1159 * the adapter for another receive
1161 * @rx_buffer: buffer containing the page
1163 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1164 * an unused region in the page.
1166 * For small pages, @truesize will be a constant value, half the size
1167 * of the memory at page. We'll attempt to alternate between high and
1168 * low halves of the page, with one half ready for use by the hardware
1169 * and the other half being consumed by the stack. We use the page
1170 * ref count to determine whether the stack has finished consuming the
1171 * portion of this page that was passed up with a previous packet. If
1172 * the page ref count is >1, we'll assume the "other" half page is
1173 * still busy, and this page cannot be reused.
1175 * For larger pages, @truesize will be the actual space used by the
1176 * received packet (adjusted upward to an even multiple of the cache
1177 * line size). This will advance through the page by the amount
1178 * actually consumed by the received packets while there is still
1179 * space for a buffer. Each region of larger pages will be used at
1180 * most once, after which the page will not be reused.
1182 * In either case, if the page is reusable its refcount is increased.
1184 static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
1186 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1187 struct page *page = rx_buffer->page;
1189 /* Is any reuse possible? */
1190 if (unlikely(!iavf_page_is_reusable(page)))
1193 #if (PAGE_SIZE < 8192)
1194 /* if we are only owner of page we can reuse it */
1195 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1198 #define IAVF_LAST_OFFSET \
1199 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1200 if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
1204 /* If we have drained the page fragment pool we need to update
1205 * the pagecnt_bias and page count so that we fully restock the
1206 * number of references the driver holds.
1208 if (unlikely(!pagecnt_bias)) {
1209 page_ref_add(page, USHRT_MAX);
1210 rx_buffer->pagecnt_bias = USHRT_MAX;
1217 * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
1218 * @rx_ring: rx descriptor ring to transact packets on
1219 * @rx_buffer: buffer containing page to add
1220 * @skb: sk_buff to place the data into
1221 * @size: packet length from rx_desc
1223 * This function will add the data contained in rx_buffer->page to the skb.
1224 * It will just attach the page as a frag to the skb.
1226 * The function will then update the page offset.
1228 static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1229 struct iavf_rx_buffer *rx_buffer,
1230 struct sk_buff *skb,
1233 #if (PAGE_SIZE < 8192)
1234 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1236 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
1242 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1243 rx_buffer->page_offset, size, truesize);
1245 /* page is being used so we must update the page offset */
1246 #if (PAGE_SIZE < 8192)
1247 rx_buffer->page_offset ^= truesize;
1249 rx_buffer->page_offset += truesize;
1254 * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1255 * @rx_ring: rx descriptor ring to transact packets on
1256 * @size: size of buffer to add to skb
1258 * This function will pull an Rx buffer from the ring and synchronize it
1259 * for use by the CPU.
1261 static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1262 const unsigned int size)
1264 struct iavf_rx_buffer *rx_buffer;
1269 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1270 prefetchw(rx_buffer->page);
1272 /* we are reusing so sync this buffer for CPU use */
1273 dma_sync_single_range_for_cpu(rx_ring->dev,
1275 rx_buffer->page_offset,
1279 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1280 rx_buffer->pagecnt_bias--;
1286 * iavf_construct_skb - Allocate skb and populate it
1287 * @rx_ring: rx descriptor ring to transact packets on
1288 * @rx_buffer: rx buffer to pull data from
1289 * @size: size of buffer to add to skb
1291 * This function allocates an skb. It then populates it with the page
1292 * data from the current receive descriptor, taking care to set up the
1295 static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
1296 struct iavf_rx_buffer *rx_buffer,
1299 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1300 #if (PAGE_SIZE < 8192)
1301 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1303 unsigned int truesize = SKB_DATA_ALIGN(size);
1305 unsigned int headlen;
1306 struct sk_buff *skb;
1310 /* prefetch first cache line of first page */
1312 #if L1_CACHE_BYTES < 128
1313 prefetch(va + L1_CACHE_BYTES);
1316 /* allocate a skb to store the frags */
1317 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1319 GFP_ATOMIC | __GFP_NOWARN);
1323 /* Determine available headroom for copy */
1325 if (headlen > IAVF_RX_HDR_SIZE)
1326 headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
1328 /* align pull length to size of long to optimize memcpy performance */
1329 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1331 /* update all of the pointers */
1334 skb_add_rx_frag(skb, 0, rx_buffer->page,
1335 rx_buffer->page_offset + headlen,
1338 /* buffer is used by skb, update page_offset */
1339 #if (PAGE_SIZE < 8192)
1340 rx_buffer->page_offset ^= truesize;
1342 rx_buffer->page_offset += truesize;
1345 /* buffer is unused, reset bias back to rx_buffer */
1346 rx_buffer->pagecnt_bias++;
1353 * iavf_build_skb - Build skb around an existing buffer
1354 * @rx_ring: Rx descriptor ring to transact packets on
1355 * @rx_buffer: Rx buffer to pull data from
1356 * @size: size of buffer to add to skb
1358 * This function builds an skb around an existing Rx buffer, taking care
1359 * to set up the skb correctly and avoid any memcpy overhead.
1361 static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1362 struct iavf_rx_buffer *rx_buffer,
1365 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1366 #if (PAGE_SIZE < 8192)
1367 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1369 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1370 SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
1372 struct sk_buff *skb;
1376 /* prefetch first cache line of first page */
1378 #if L1_CACHE_BYTES < 128
1379 prefetch(va + L1_CACHE_BYTES);
1381 /* build an skb around the page buffer */
1382 skb = build_skb(va - IAVF_SKB_PAD, truesize);
1386 /* update pointers within the skb to store the data */
1387 skb_reserve(skb, IAVF_SKB_PAD);
1388 __skb_put(skb, size);
1390 /* buffer is used by skb, update page_offset */
1391 #if (PAGE_SIZE < 8192)
1392 rx_buffer->page_offset ^= truesize;
1394 rx_buffer->page_offset += truesize;
1401 * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
1402 * @rx_ring: rx descriptor ring to transact packets on
1403 * @rx_buffer: rx buffer to pull data from
1405 * This function will clean up the contents of the rx_buffer. It will
1406 * either recycle the buffer or unmap it and free the associated resources.
1408 static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
1409 struct iavf_rx_buffer *rx_buffer)
1414 if (iavf_can_reuse_rx_page(rx_buffer)) {
1415 /* hand second half of page back to the ring */
1416 iavf_reuse_rx_page(rx_ring, rx_buffer);
1417 rx_ring->rx_stats.page_reuse_count++;
1419 /* we are not reusing the buffer so unmap it */
1420 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1421 iavf_rx_pg_size(rx_ring),
1422 DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
1423 __page_frag_cache_drain(rx_buffer->page,
1424 rx_buffer->pagecnt_bias);
1427 /* clear contents of buffer_info */
1428 rx_buffer->page = NULL;
1432 * iavf_is_non_eop - process handling of non-EOP buffers
1433 * @rx_ring: Rx ring being processed
1434 * @rx_desc: Rx descriptor for current buffer
1435 * @skb: Current socket buffer containing buffer in progress
1437 * This function updates next to clean. If the buffer is an EOP buffer
1438 * this function exits returning false, otherwise it will place the
1439 * sk_buff in the next buffer to be chained and return true indicating
1440 * that this is in fact a non-EOP buffer.
1442 static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
1443 union iavf_rx_desc *rx_desc,
1444 struct sk_buff *skb)
1446 u32 ntc = rx_ring->next_to_clean + 1;
1448 /* fetch, update, and store next to clean */
1449 ntc = (ntc < rx_ring->count) ? ntc : 0;
1450 rx_ring->next_to_clean = ntc;
1452 prefetch(IAVF_RX_DESC(rx_ring, ntc));
1454 /* if we are the last buffer then there is nothing else to do */
1455 #define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
1456 if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
1459 rx_ring->rx_stats.non_eop_descs++;
1465 * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1466 * @rx_ring: rx descriptor ring to transact packets on
1467 * @budget: Total limit on number of packets to process
1469 * This function provides a "bounce buffer" approach to Rx interrupt
1470 * processing. The advantage to this is that on systems that have
1471 * expensive overhead for IOMMU access this provides a means of avoiding
1472 * it by maintaining the mapping of the page to the system.
1474 * Returns amount of work completed
1476 static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1478 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1479 struct sk_buff *skb = rx_ring->skb;
1480 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1481 bool failure = false;
1483 while (likely(total_rx_packets < (unsigned int)budget)) {
1484 struct iavf_rx_buffer *rx_buffer;
1485 union iavf_rx_desc *rx_desc;
1491 /* return some buffers to hardware, one at a time is too slow */
1492 if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1493 failure = failure ||
1494 iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1498 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1500 /* status_error_len will always be zero for unused descriptors
1501 * because it's cleared in cleanup, and overlaps with hdr_addr
1502 * which is always zero because packet split isn't used, if the
1503 * hardware wrote DD then the length will be non-zero
1505 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1507 /* This memory barrier is needed to keep us from reading
1508 * any other fields out of the rx_desc until we have
1509 * verified the descriptor has been written back.
1512 #define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
1513 if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
1516 size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1517 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1519 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1520 rx_buffer = iavf_get_rx_buffer(rx_ring, size);
1522 /* retrieve a buffer from the ring */
1524 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1525 else if (ring_uses_build_skb(rx_ring))
1526 skb = iavf_build_skb(rx_ring, rx_buffer, size);
1528 skb = iavf_construct_skb(rx_ring, rx_buffer, size);
1530 /* exit if we failed to retrieve a buffer */
1532 rx_ring->rx_stats.alloc_buff_failed++;
1534 rx_buffer->pagecnt_bias++;
1538 iavf_put_rx_buffer(rx_ring, rx_buffer);
1541 if (iavf_is_non_eop(rx_ring, rx_desc, skb))
1544 /* ERR_MASK will only have valid bits if EOP set, and
1545 * what we are doing here is actually checking
1546 * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1549 if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
1550 dev_kfree_skb_any(skb);
1555 if (iavf_cleanup_headers(rx_ring, skb)) {
1560 /* probably a little skewed due to removing CRC */
1561 total_rx_bytes += skb->len;
1563 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1564 rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
1565 IAVF_RXD_QW1_PTYPE_SHIFT;
1567 /* populate checksum, VLAN, and protocol */
1568 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1571 vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1572 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1574 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1575 iavf_receive_skb(rx_ring, skb, vlan_tag);
1578 /* update budget accounting */
1584 u64_stats_update_begin(&rx_ring->syncp);
1585 rx_ring->stats.packets += total_rx_packets;
1586 rx_ring->stats.bytes += total_rx_bytes;
1587 u64_stats_update_end(&rx_ring->syncp);
1588 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1589 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1591 /* guarantee a trip back through this routine if there was a failure */
1592 return failure ? budget : (int)total_rx_packets;
1595 static inline u32 iavf_buildreg_itr(const int type, u16 itr)
1599 /* We don't bother with setting the CLEARPBA bit as the data sheet
1600 * points out doing so is "meaningless since it was already
1601 * auto-cleared". The auto-clearing happens when the interrupt is
1604 * Hardware errata 28 for also indicates that writing to a
1605 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
1606 * an event in the PBA anyway so we need to rely on the automask
1607 * to hold pending events for us until the interrupt is re-enabled
1609 * The itr value is reported in microseconds, and the register
1610 * value is recorded in 2 microsecond units. For this reason we
1611 * only need to shift by the interval shift - 1 instead of the
1614 itr &= IAVF_ITR_MASK;
1616 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1617 (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1618 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1623 /* a small macro to shorten up some long lines */
1624 #define INTREG IAVF_VFINT_DYN_CTLN1
1626 /* The act of updating the ITR will cause it to immediately trigger. In order
1627 * to prevent this from throwing off adaptive update statistics we defer the
1628 * update so that it can only happen so often. So after either Tx or Rx are
1629 * updated we make the adaptive scheme wait until either the ITR completely
1630 * expires via the next_update expiration or we have been through at least
1633 #define ITR_COUNTDOWN_START 3
1636 * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
1637 * @vsi: the VSI we care about
1638 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1641 static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
1642 struct iavf_q_vector *q_vector)
1644 struct iavf_hw *hw = &vsi->back->hw;
1647 /* These will do nothing if dynamic updates are not enabled */
1648 iavf_update_itr(q_vector, &q_vector->tx);
1649 iavf_update_itr(q_vector, &q_vector->rx);
1651 /* This block of logic allows us to get away with only updating
1652 * one ITR value with each interrupt. The idea is to perform a
1653 * pseudo-lazy update with the following criteria.
1655 * 1. Rx is given higher priority than Tx if both are in same state
1656 * 2. If we must reduce an ITR that is given highest priority.
1657 * 3. We then give priority to increasing ITR based on amount.
1659 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1660 /* Rx ITR needs to be reduced, this is highest priority */
1661 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1662 q_vector->rx.target_itr);
1663 q_vector->rx.current_itr = q_vector->rx.target_itr;
1664 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1665 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1666 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1667 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1668 /* Tx ITR needs to be reduced, this is second priority
1669 * Tx ITR needs to be increased more than Rx, fourth priority
1671 intval = iavf_buildreg_itr(IAVF_TX_ITR,
1672 q_vector->tx.target_itr);
1673 q_vector->tx.current_itr = q_vector->tx.target_itr;
1674 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1675 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1676 /* Rx ITR needs to be increased, third priority */
1677 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1678 q_vector->rx.target_itr);
1679 q_vector->rx.current_itr = q_vector->rx.target_itr;
1680 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1682 /* No ITR update, lowest priority */
1683 intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1684 if (q_vector->itr_countdown)
1685 q_vector->itr_countdown--;
1688 if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1689 wr32(hw, INTREG(q_vector->reg_idx), intval);
1693 * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
1694 * @napi: napi struct with our devices info in it
1695 * @budget: amount of work driver is allowed to do this pass, in packets
1697 * This function will clean all queues associated with a q_vector.
1699 * Returns the amount of work done
1701 int iavf_napi_poll(struct napi_struct *napi, int budget)
1703 struct iavf_q_vector *q_vector =
1704 container_of(napi, struct iavf_q_vector, napi);
1705 struct iavf_vsi *vsi = q_vector->vsi;
1706 struct iavf_ring *ring;
1707 bool clean_complete = true;
1708 bool arm_wb = false;
1709 int budget_per_ring;
1712 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
1713 napi_complete(napi);
1717 /* Since the actual Tx work is minimal, we can give the Tx a larger
1718 * budget and be more aggressive about cleaning up the Tx descriptors.
1720 iavf_for_each_ring(ring, q_vector->tx) {
1721 if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1722 clean_complete = false;
1725 arm_wb |= ring->arm_wb;
1726 ring->arm_wb = false;
1729 /* Handle case where we are called by netpoll with a budget of 0 */
1733 /* We attempt to distribute budget to each Rx queue fairly, but don't
1734 * allow the budget to go below 1 because that would exit polling early.
1736 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1738 iavf_for_each_ring(ring, q_vector->rx) {
1739 int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
1741 work_done += cleaned;
1742 /* if we clean as many as budgeted, we must not be done */
1743 if (cleaned >= budget_per_ring)
1744 clean_complete = false;
1747 /* If work not completed, return budget and polling will return */
1748 if (!clean_complete) {
1749 int cpu_id = smp_processor_id();
1751 /* It is possible that the interrupt affinity has changed but,
1752 * if the cpu is pegged at 100%, polling will never exit while
1753 * traffic continues and the interrupt will be stuck on this
1754 * cpu. We check to make sure affinity is correct before we
1755 * continue to poll, otherwise we must stop polling so the
1756 * interrupt can move to the correct cpu.
1758 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1759 /* Tell napi that we are done polling */
1760 napi_complete_done(napi, work_done);
1762 /* Force an interrupt */
1763 iavf_force_wb(vsi, q_vector);
1765 /* Return budget-1 so that polling stops */
1770 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1771 iavf_enable_wb_on_itr(vsi, q_vector);
1776 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
1777 q_vector->arm_wb_state = false;
1779 /* Exit the polling mode, but don't re-enable interrupts if stack might
1780 * poll us due to busy-polling
1782 if (likely(napi_complete_done(napi, work_done)))
1783 iavf_update_enable_itr(vsi, q_vector);
1785 return min(work_done, budget - 1);
1789 * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1791 * @tx_ring: ring to send buffer on
1792 * @flags: the tx flags to be set
1794 * Checks the skb and set up correspondingly several generic transmit flags
1795 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1797 * Returns error code indicate the frame should be dropped upon error and the
1798 * otherwise returns 0 to indicate the flags has been set properly.
1800 static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1801 struct iavf_ring *tx_ring,
1804 __be16 protocol = skb->protocol;
1807 if (protocol == htons(ETH_P_8021Q) &&
1808 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1809 /* When HW VLAN acceleration is turned off by the user the
1810 * stack sets the protocol to 8021q so that the driver
1811 * can take any steps required to support the SW only
1812 * VLAN handling. In our case the driver doesn't need
1813 * to take any further steps so just set the protocol
1814 * to the encapsulated ethertype.
1816 skb->protocol = vlan_get_protocol(skb);
1820 /* if we have a HW VLAN tag being added, default to the HW one */
1821 if (skb_vlan_tag_present(skb)) {
1822 tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1823 tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1824 /* else if it is a SW VLAN, check the next protocol and store the tag */
1825 } else if (protocol == htons(ETH_P_8021Q)) {
1826 struct vlan_hdr *vhdr, _vhdr;
1828 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1832 protocol = vhdr->h_vlan_encapsulated_proto;
1833 tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
1834 tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
1843 * iavf_tso - set up the tso context descriptor
1844 * @first: pointer to first Tx buffer for xmit
1845 * @hdr_len: ptr to the size of the packet header
1846 * @cd_type_cmd_tso_mss: Quad Word 1
1848 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1850 static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
1851 u64 *cd_type_cmd_tso_mss)
1853 struct sk_buff *skb = first->skb;
1854 u64 cd_cmd, cd_tso_len, cd_mss;
1865 u32 paylen, l4_offset;
1866 u16 gso_segs, gso_size;
1869 if (skb->ip_summed != CHECKSUM_PARTIAL)
1872 if (!skb_is_gso(skb))
1875 err = skb_cow_head(skb, 0);
1879 ip.hdr = skb_network_header(skb);
1880 l4.hdr = skb_transport_header(skb);
1882 /* initialize outer IP header fields */
1883 if (ip.v4->version == 4) {
1887 ip.v6->payload_len = 0;
1890 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1894 SKB_GSO_UDP_TUNNEL |
1895 SKB_GSO_UDP_TUNNEL_CSUM)) {
1896 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1897 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1900 /* determine offset of outer transport header */
1901 l4_offset = l4.hdr - skb->data;
1903 /* remove payload length from outer checksum */
1904 paylen = skb->len - l4_offset;
1905 csum_replace_by_diff(&l4.udp->check,
1906 (__force __wsum)htonl(paylen));
1909 /* reset pointers to inner headers */
1910 ip.hdr = skb_inner_network_header(skb);
1911 l4.hdr = skb_inner_transport_header(skb);
1913 /* initialize inner IP header fields */
1914 if (ip.v4->version == 4) {
1918 ip.v6->payload_len = 0;
1922 /* determine offset of inner transport header */
1923 l4_offset = l4.hdr - skb->data;
1925 /* remove payload length from inner checksum */
1926 paylen = skb->len - l4_offset;
1927 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1929 /* compute length of segmentation header */
1930 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1932 /* pull values out of skb_shinfo */
1933 gso_size = skb_shinfo(skb)->gso_size;
1934 gso_segs = skb_shinfo(skb)->gso_segs;
1936 /* update GSO size and bytecount with header size */
1937 first->gso_segs = gso_segs;
1938 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1940 /* find the field values */
1941 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1942 cd_tso_len = skb->len - *hdr_len;
1944 *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1945 (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1946 (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1951 * iavf_tx_enable_csum - Enable Tx checksum offloads
1953 * @tx_flags: pointer to Tx flags currently set
1954 * @td_cmd: Tx descriptor command bits to set
1955 * @td_offset: Tx descriptor header offsets to set
1956 * @tx_ring: Tx descriptor ring
1957 * @cd_tunneling: ptr to context desc bits
1959 static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1960 u32 *td_cmd, u32 *td_offset,
1961 struct iavf_ring *tx_ring,
1974 unsigned char *exthdr;
1975 u32 offset, cmd = 0;
1979 if (skb->ip_summed != CHECKSUM_PARTIAL)
1982 ip.hdr = skb_network_header(skb);
1983 l4.hdr = skb_transport_header(skb);
1985 /* compute outer L2 header size */
1986 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1988 if (skb->encapsulation) {
1990 /* define outer network header type */
1991 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
1992 tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
1993 IAVF_TX_CTX_EXT_IP_IPV4 :
1994 IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1996 l4_proto = ip.v4->protocol;
1997 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
1998 tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
2000 exthdr = ip.hdr + sizeof(*ip.v6);
2001 l4_proto = ip.v6->nexthdr;
2002 if (l4.hdr != exthdr)
2003 ipv6_skip_exthdr(skb, exthdr - skb->data,
2004 &l4_proto, &frag_off);
2007 /* define outer transport */
2010 tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
2011 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2014 tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
2015 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2019 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2020 l4.hdr = skb_inner_network_header(skb);
2023 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2026 skb_checksum_help(skb);
2030 /* compute outer L3 header size */
2031 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2032 IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2034 /* switch IP header pointer from outer to inner header */
2035 ip.hdr = skb_inner_network_header(skb);
2037 /* compute tunnel header size */
2038 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2039 IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
2041 /* indicate if we need to offload outer UDP header */
2042 if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
2043 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2044 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2045 tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
2047 /* record tunnel offload values */
2048 *cd_tunneling |= tunnel;
2050 /* switch L4 header pointer from outer to inner */
2051 l4.hdr = skb_inner_transport_header(skb);
2054 /* reset type as we transition from outer to inner headers */
2055 *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
2056 if (ip.v4->version == 4)
2057 *tx_flags |= IAVF_TX_FLAGS_IPV4;
2058 if (ip.v6->version == 6)
2059 *tx_flags |= IAVF_TX_FLAGS_IPV6;
2062 /* Enable IP checksum offloads */
2063 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2064 l4_proto = ip.v4->protocol;
2065 /* the stack computes the IP header already, the only time we
2066 * need the hardware to recompute it is in the case of TSO.
2068 cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2069 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2070 IAVF_TX_DESC_CMD_IIPT_IPV4;
2071 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2072 cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2074 exthdr = ip.hdr + sizeof(*ip.v6);
2075 l4_proto = ip.v6->nexthdr;
2076 if (l4.hdr != exthdr)
2077 ipv6_skip_exthdr(skb, exthdr - skb->data,
2078 &l4_proto, &frag_off);
2081 /* compute inner L3 header size */
2082 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2084 /* Enable L4 checksum offloads */
2087 /* enable checksum offloads */
2088 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2089 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2092 /* enable SCTP checksum offload */
2093 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2094 offset |= (sizeof(struct sctphdr) >> 2) <<
2095 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2098 /* enable UDP checksum offload */
2099 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2100 offset |= (sizeof(struct udphdr) >> 2) <<
2101 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2104 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2106 skb_checksum_help(skb);
2111 *td_offset |= offset;
2117 * iavf_create_tx_ctx Build the Tx context descriptor
2118 * @tx_ring: ring to create the descriptor on
2119 * @cd_type_cmd_tso_mss: Quad Word 1
2120 * @cd_tunneling: Quad Word 0 - bits 0-31
2121 * @cd_l2tag2: Quad Word 0 - bits 32-63
2123 static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2124 const u64 cd_type_cmd_tso_mss,
2125 const u32 cd_tunneling, const u32 cd_l2tag2)
2127 struct iavf_tx_context_desc *context_desc;
2128 int i = tx_ring->next_to_use;
2130 if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2131 !cd_tunneling && !cd_l2tag2)
2134 /* grab the next descriptor */
2135 context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2138 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2140 /* cpu_to_le32 and assign to struct fields */
2141 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2142 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2143 context_desc->rsvd = cpu_to_le16(0);
2144 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2148 * __iavf_chk_linearize - Check if there are more than 8 buffers per packet
2151 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2152 * and so we need to figure out the cases where we need to linearize the skb.
2154 * For TSO we need to count the TSO header and segment payload separately.
2155 * As such we need to check cases where we have 7 fragments or more as we
2156 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2157 * the segment payload in the first descriptor, and another 7 for the
2160 bool __iavf_chk_linearize(struct sk_buff *skb)
2162 const struct skb_frag_struct *frag, *stale;
2165 /* no need to check if number of frags is less than 7 */
2166 nr_frags = skb_shinfo(skb)->nr_frags;
2167 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
2170 /* We need to walk through the list and validate that each group
2171 * of 6 fragments totals at least gso_size.
2173 nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
2174 frag = &skb_shinfo(skb)->frags[0];
2176 /* Initialize size to the negative value of gso_size minus 1. We
2177 * use this as the worst case scenerio in which the frag ahead
2178 * of us only provides one byte which is why we are limited to 6
2179 * descriptors for a single transmit as the header and previous
2180 * fragment are already consuming 2 descriptors.
2182 sum = 1 - skb_shinfo(skb)->gso_size;
2184 /* Add size of frags 0 through 4 to create our initial sum */
2185 sum += skb_frag_size(frag++);
2186 sum += skb_frag_size(frag++);
2187 sum += skb_frag_size(frag++);
2188 sum += skb_frag_size(frag++);
2189 sum += skb_frag_size(frag++);
2191 /* Walk through fragments adding latest fragment, testing it, and
2192 * then removing stale fragments from the sum.
2194 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2195 int stale_size = skb_frag_size(stale);
2197 sum += skb_frag_size(frag++);
2199 /* The stale fragment may present us with a smaller
2200 * descriptor than the actual fragment size. To account
2201 * for that we need to remove all the data on the front and
2202 * figure out what the remainder would be in the last
2203 * descriptor associated with the fragment.
2205 if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2206 int align_pad = -(stale->page_offset) &
2207 (IAVF_MAX_READ_REQ_SIZE - 1);
2210 stale_size -= align_pad;
2213 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2214 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2215 } while (stale_size > IAVF_MAX_DATA_PER_TXD);
2218 /* if sum is negative we failed to make sufficient progress */
2232 * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
2233 * @tx_ring: the ring to be checked
2234 * @size: the size buffer we want to assure is available
2236 * Returns -EBUSY if a stop is needed, else 0
2238 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2240 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2241 /* Memory barrier before checking head and tail */
2244 /* Check again in a case another CPU has just made room available. */
2245 if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2248 /* A reprieve! - use start_queue because it doesn't call schedule */
2249 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2250 ++tx_ring->tx_stats.restart_queue;
2255 * iavf_tx_map - Build the Tx descriptor
2256 * @tx_ring: ring to send buffer on
2258 * @first: first buffer info buffer to use
2259 * @tx_flags: collected send information
2260 * @hdr_len: size of the packet header
2261 * @td_cmd: the command field in the descriptor
2262 * @td_offset: offset for checksum or crc
2264 static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2265 struct iavf_tx_buffer *first, u32 tx_flags,
2266 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2268 unsigned int data_len = skb->data_len;
2269 unsigned int size = skb_headlen(skb);
2270 struct skb_frag_struct *frag;
2271 struct iavf_tx_buffer *tx_bi;
2272 struct iavf_tx_desc *tx_desc;
2273 u16 i = tx_ring->next_to_use;
2277 if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2278 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2279 td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2280 IAVF_TX_FLAGS_VLAN_SHIFT;
2283 first->tx_flags = tx_flags;
2285 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2287 tx_desc = IAVF_TX_DESC(tx_ring, i);
2290 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2291 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2293 if (dma_mapping_error(tx_ring->dev, dma))
2296 /* record length, and DMA address */
2297 dma_unmap_len_set(tx_bi, len, size);
2298 dma_unmap_addr_set(tx_bi, dma, dma);
2300 /* align size to end of page */
2301 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
2302 tx_desc->buffer_addr = cpu_to_le64(dma);
2304 while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
2305 tx_desc->cmd_type_offset_bsz =
2306 build_ctob(td_cmd, td_offset,
2312 if (i == tx_ring->count) {
2313 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2320 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2321 tx_desc->buffer_addr = cpu_to_le64(dma);
2324 if (likely(!data_len))
2327 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2333 if (i == tx_ring->count) {
2334 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2338 size = skb_frag_size(frag);
2341 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2344 tx_bi = &tx_ring->tx_bi[i];
2347 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2350 if (i == tx_ring->count)
2353 tx_ring->next_to_use = i;
2355 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2357 /* write last descriptor with RS and EOP bits */
2358 td_cmd |= IAVF_TXD_CMD;
2359 tx_desc->cmd_type_offset_bsz =
2360 build_ctob(td_cmd, td_offset, size, td_tag);
2362 skb_tx_timestamp(skb);
2364 /* Force memory writes to complete before letting h/w know there
2365 * are new descriptors to fetch.
2367 * We also use this memory barrier to make certain all of the
2368 * status bits have been updated before next_to_watch is written.
2372 /* set next_to_watch value indicating a packet is present */
2373 first->next_to_watch = tx_desc;
2375 /* notify HW of packet */
2376 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
2377 writel(i, tx_ring->tail);
2383 dev_info(tx_ring->dev, "TX DMA map failed\n");
2385 /* clear dma mappings for failed tx_bi map */
2387 tx_bi = &tx_ring->tx_bi[i];
2388 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2396 tx_ring->next_to_use = i;
2400 * iavf_xmit_frame_ring - Sends buffer on Tx ring
2402 * @tx_ring: ring to send buffer on
2404 * Returns NETDEV_TX_OK if sent, else an error code
2406 static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
2407 struct iavf_ring *tx_ring)
2409 u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
2410 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2411 struct iavf_tx_buffer *first;
2419 /* prefetch the data, we'll need it later */
2420 prefetch(skb->data);
2422 iavf_trace(xmit_frame_ring, skb, tx_ring);
2424 count = iavf_xmit_descriptor_count(skb);
2425 if (iavf_chk_linearize(skb, count)) {
2426 if (__skb_linearize(skb)) {
2427 dev_kfree_skb_any(skb);
2428 return NETDEV_TX_OK;
2430 count = iavf_txd_use_count(skb->len);
2431 tx_ring->tx_stats.tx_linearize++;
2434 /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,
2435 * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,
2436 * + 4 desc gap to avoid the cache line where head is,
2437 * + 1 desc for context descriptor,
2438 * otherwise try next time
2440 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2441 tx_ring->tx_stats.tx_busy++;
2442 return NETDEV_TX_BUSY;
2445 /* record the location of the first descriptor for this packet */
2446 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2448 first->bytecount = skb->len;
2449 first->gso_segs = 1;
2451 /* prepare the xmit flags */
2452 if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2455 /* obtain protocol of skb */
2456 protocol = vlan_get_protocol(skb);
2458 /* setup IPv4/IPv6 offloads */
2459 if (protocol == htons(ETH_P_IP))
2460 tx_flags |= IAVF_TX_FLAGS_IPV4;
2461 else if (protocol == htons(ETH_P_IPV6))
2462 tx_flags |= IAVF_TX_FLAGS_IPV6;
2464 tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2469 tx_flags |= IAVF_TX_FLAGS_TSO;
2471 /* Always offload the checksum, since it's in the data descriptor */
2472 tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2473 tx_ring, &cd_tunneling);
2477 /* always enable CRC insertion offload */
2478 td_cmd |= IAVF_TX_DESC_CMD_ICRC;
2480 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2481 cd_tunneling, cd_l2tag2);
2483 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2486 return NETDEV_TX_OK;
2489 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2490 dev_kfree_skb_any(first->skb);
2492 return NETDEV_TX_OK;
2496 * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2498 * @netdev: network interface device structure
2500 * Returns NETDEV_TX_OK if sent, else an error code
2502 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2504 struct iavf_adapter *adapter = netdev_priv(netdev);
2505 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2507 /* hardware can't handle really short frames, hardware padding works
2510 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
2511 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
2512 return NETDEV_TX_OK;
2513 skb->len = IAVF_MIN_TX_LEN;
2514 skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
2517 return iavf_xmit_frame_ring(skb, tx_ring);