]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/net/ethernet/intel/iavf/iavf_txrx.c
iavf: allow null RX descriptors
[linux.git] / drivers / net / ethernet / intel / iavf / iavf_txrx.c
index 6d43cbe29c4955636ea4b9efa3e8368013973a9a..1cde1601bc329ec66937ddfefa3c751346c65d6f 100644 (file)
@@ -190,7 +190,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
 static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
                              struct iavf_ring *tx_ring, int napi_budget)
 {
-       u16 i = tx_ring->next_to_clean;
+       int i = tx_ring->next_to_clean;
        struct iavf_tx_buffer *tx_buf;
        struct iavf_tx_desc *tx_desc;
        unsigned int total_bytes = 0, total_packets = 0;
@@ -1236,6 +1236,9 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
        unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
 #endif
 
+       if (!size)
+               return;
+
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
                        rx_buffer->page_offset, size, truesize);
 
@@ -1260,6 +1263,9 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
 {
        struct iavf_rx_buffer *rx_buffer;
 
+       if (!size)
+               return NULL;
+
        rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
        prefetchw(rx_buffer->page);
 
@@ -1299,6 +1305,8 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
        unsigned int headlen;
        struct sk_buff *skb;
 
+       if (!rx_buffer)
+               return NULL;
        /* prefetch first cache line of first page */
        prefetch(va);
 #if L1_CACHE_BYTES < 128
@@ -1363,6 +1371,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
 #endif
        struct sk_buff *skb;
 
+       if (!rx_buffer)
+               return NULL;
        /* prefetch first cache line of first page */
        prefetch(va);
 #if L1_CACHE_BYTES < 128
@@ -1398,6 +1408,9 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
 static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
                               struct iavf_rx_buffer *rx_buffer)
 {
+       if (!rx_buffer)
+               return;
+
        if (iavf_can_reuse_rx_page(rx_buffer)) {
                /* hand second half of page back to the ring */
                iavf_reuse_rx_page(rx_ring, rx_buffer);
@@ -1496,11 +1509,12 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
                 * verified the descriptor has been written back.
                 */
                dma_rmb();
+#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
+               if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+                       break;
 
                size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
                       IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
-               if (!size)
-                       break;
 
                iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
                rx_buffer = iavf_get_rx_buffer(rx_ring, size);
@@ -1516,7 +1530,8 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
                /* exit if we failed to retrieve a buffer */
                if (!skb) {
                        rx_ring->rx_stats.alloc_buff_failed++;
-                       rx_buffer->pagecnt_bias++;
+                       if (rx_buffer)
+                               rx_buffer->pagecnt_bias++;
                        break;
                }