1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2009 - 2018 Intel Corporation. */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/types.h>
8 #include <linux/init.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pagemap.h>
12 #include <linux/delay.h>
13 #include <linux/netdevice.h>
14 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
17 #include <net/checksum.h>
18 #include <net/ip6_checksum.h>
19 #include <linux/mii.h>
20 #include <linux/ethtool.h>
21 #include <linux/if_vlan.h>
22 #include <linux/prefetch.h>
23 #include <linux/sctp.h>
27 #define DRV_VERSION "2.4.0-k"
28 char igbvf_driver_name[] = "igbvf";
29 const char igbvf_driver_version[] = DRV_VERSION;
30 static const char igbvf_driver_string[] =
31 "Intel(R) Gigabit Virtual Function Network Driver";
32 static const char igbvf_copyright[] =
33 "Copyright (c) 2009 - 2012 Intel Corporation.";
35 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
36 static int debug = -1;
37 module_param(debug, int, 0);
38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
40 static int igbvf_poll(struct napi_struct *napi, int budget);
41 static void igbvf_reset(struct igbvf_adapter *);
42 static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
43 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
45 static struct igbvf_info igbvf_vf_info = {
49 .init_ops = e1000_init_function_pointers_vf,
52 static struct igbvf_info igbvf_i350_vf_info = {
53 .mac = e1000_vfadapt_i350,
56 .init_ops = e1000_init_function_pointers_vf,
59 static const struct igbvf_info *igbvf_info_tbl[] = {
60 [board_vf] = &igbvf_vf_info,
61 [board_i350_vf] = &igbvf_i350_vf_info,
65 * igbvf_desc_unused - calculate if we have unused descriptors
66 * @rx_ring: address of receive ring structure
68 static int igbvf_desc_unused(struct igbvf_ring *ring)
70 if (ring->next_to_clean > ring->next_to_use)
71 return ring->next_to_clean - ring->next_to_use - 1;
73 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
77 * igbvf_receive_skb - helper function to handle Rx indications
78 * @adapter: board private structure
79 * @status: descriptor status field as written by hardware
80 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
81 * @skb: pointer to sk_buff to be indicated to stack
83 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
84 struct net_device *netdev,
90 if (status & E1000_RXD_STAT_VP) {
91 if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
92 (status & E1000_RXDEXT_STATERR_LB))
93 vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
95 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
96 if (test_bit(vid, adapter->active_vlans))
97 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
100 napi_gro_receive(&adapter->rx_ring->napi, skb);
103 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
104 u32 status_err, struct sk_buff *skb)
106 skb_checksum_none_assert(skb);
108 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
109 if ((status_err & E1000_RXD_STAT_IXSM) ||
110 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
113 /* TCP/UDP checksum error bit is set */
115 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
116 /* let the stack verify checksum errors */
117 adapter->hw_csum_err++;
121 /* It must be a TCP or UDP packet with a valid checksum */
122 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
123 skb->ip_summed = CHECKSUM_UNNECESSARY;
125 adapter->hw_csum_good++;
129 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
130 * @rx_ring: address of ring structure to repopulate
131 * @cleaned_count: number of buffers to repopulate
133 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
136 struct igbvf_adapter *adapter = rx_ring->adapter;
137 struct net_device *netdev = adapter->netdev;
138 struct pci_dev *pdev = adapter->pdev;
139 union e1000_adv_rx_desc *rx_desc;
140 struct igbvf_buffer *buffer_info;
145 i = rx_ring->next_to_use;
146 buffer_info = &rx_ring->buffer_info[i];
148 if (adapter->rx_ps_hdr_size)
149 bufsz = adapter->rx_ps_hdr_size;
151 bufsz = adapter->rx_buffer_len;
153 while (cleaned_count--) {
154 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
156 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
157 if (!buffer_info->page) {
158 buffer_info->page = alloc_page(GFP_ATOMIC);
159 if (!buffer_info->page) {
160 adapter->alloc_rx_buff_failed++;
163 buffer_info->page_offset = 0;
165 buffer_info->page_offset ^= PAGE_SIZE / 2;
167 buffer_info->page_dma =
168 dma_map_page(&pdev->dev, buffer_info->page,
169 buffer_info->page_offset,
172 if (dma_mapping_error(&pdev->dev,
173 buffer_info->page_dma)) {
174 __free_page(buffer_info->page);
175 buffer_info->page = NULL;
176 dev_err(&pdev->dev, "RX DMA map failed\n");
181 if (!buffer_info->skb) {
182 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
184 adapter->alloc_rx_buff_failed++;
188 buffer_info->skb = skb;
189 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
192 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
193 dev_kfree_skb(buffer_info->skb);
194 buffer_info->skb = NULL;
195 dev_err(&pdev->dev, "RX DMA map failed\n");
199 /* Refresh the desc even if buffer_addrs didn't change because
200 * each write-back erases this info.
202 if (adapter->rx_ps_hdr_size) {
203 rx_desc->read.pkt_addr =
204 cpu_to_le64(buffer_info->page_dma);
205 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
207 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
208 rx_desc->read.hdr_addr = 0;
212 if (i == rx_ring->count)
214 buffer_info = &rx_ring->buffer_info[i];
218 if (rx_ring->next_to_use != i) {
219 rx_ring->next_to_use = i;
221 i = (rx_ring->count - 1);
225 /* Force memory writes to complete before letting h/w
226 * know there are new descriptors to fetch. (Only
227 * applicable for weak-ordered memory model archs,
231 writel(i, adapter->hw.hw_addr + rx_ring->tail);
236 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
237 * @adapter: board private structure
239 * the return value indicates whether actual cleaning was done, there
240 * is no guarantee that everything was cleaned
242 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
243 int *work_done, int work_to_do)
245 struct igbvf_ring *rx_ring = adapter->rx_ring;
246 struct net_device *netdev = adapter->netdev;
247 struct pci_dev *pdev = adapter->pdev;
248 union e1000_adv_rx_desc *rx_desc, *next_rxd;
249 struct igbvf_buffer *buffer_info, *next_buffer;
251 bool cleaned = false;
252 int cleaned_count = 0;
253 unsigned int total_bytes = 0, total_packets = 0;
255 u32 length, hlen, staterr;
257 i = rx_ring->next_to_clean;
258 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
259 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
261 while (staterr & E1000_RXD_STAT_DD) {
262 if (*work_done >= work_to_do)
265 rmb(); /* read descriptor and rx_buffer_info after status DD */
267 buffer_info = &rx_ring->buffer_info[i];
269 /* HW will not DMA in data larger than the given buffer, even
270 * if it parses the (NFS, of course) header to be larger. In
271 * that case, it fills the header buffer and spills the rest
274 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
275 & E1000_RXDADV_HDRBUFLEN_MASK) >>
276 E1000_RXDADV_HDRBUFLEN_SHIFT;
277 if (hlen > adapter->rx_ps_hdr_size)
278 hlen = adapter->rx_ps_hdr_size;
280 length = le16_to_cpu(rx_desc->wb.upper.length);
284 skb = buffer_info->skb;
285 prefetch(skb->data - NET_IP_ALIGN);
286 buffer_info->skb = NULL;
287 if (!adapter->rx_ps_hdr_size) {
288 dma_unmap_single(&pdev->dev, buffer_info->dma,
289 adapter->rx_buffer_len,
291 buffer_info->dma = 0;
292 skb_put(skb, length);
296 if (!skb_shinfo(skb)->nr_frags) {
297 dma_unmap_single(&pdev->dev, buffer_info->dma,
298 adapter->rx_ps_hdr_size,
300 buffer_info->dma = 0;
305 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
308 buffer_info->page_dma = 0;
310 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
312 buffer_info->page_offset,
315 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
316 (page_count(buffer_info->page) != 1))
317 buffer_info->page = NULL;
319 get_page(buffer_info->page);
322 skb->data_len += length;
323 skb->truesize += PAGE_SIZE / 2;
327 if (i == rx_ring->count)
329 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
331 next_buffer = &rx_ring->buffer_info[i];
333 if (!(staterr & E1000_RXD_STAT_EOP)) {
334 buffer_info->skb = next_buffer->skb;
335 buffer_info->dma = next_buffer->dma;
336 next_buffer->skb = skb;
337 next_buffer->dma = 0;
341 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
342 dev_kfree_skb_irq(skb);
346 total_bytes += skb->len;
349 igbvf_rx_checksum_adv(adapter, staterr, skb);
351 skb->protocol = eth_type_trans(skb, netdev);
353 igbvf_receive_skb(adapter, netdev, skb, staterr,
354 rx_desc->wb.upper.vlan);
357 rx_desc->wb.upper.status_error = 0;
359 /* return some buffers to hardware, one at a time is too slow */
360 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
361 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
365 /* use prefetched values */
367 buffer_info = next_buffer;
369 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
372 rx_ring->next_to_clean = i;
373 cleaned_count = igbvf_desc_unused(rx_ring);
376 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
378 adapter->total_rx_packets += total_packets;
379 adapter->total_rx_bytes += total_bytes;
380 netdev->stats.rx_bytes += total_bytes;
381 netdev->stats.rx_packets += total_packets;
385 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
386 struct igbvf_buffer *buffer_info)
388 if (buffer_info->dma) {
389 if (buffer_info->mapped_as_page)
390 dma_unmap_page(&adapter->pdev->dev,
395 dma_unmap_single(&adapter->pdev->dev,
399 buffer_info->dma = 0;
401 if (buffer_info->skb) {
402 dev_kfree_skb_any(buffer_info->skb);
403 buffer_info->skb = NULL;
405 buffer_info->time_stamp = 0;
409 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
410 * @adapter: board private structure
412 * Return 0 on success, negative on failure
414 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
415 struct igbvf_ring *tx_ring)
417 struct pci_dev *pdev = adapter->pdev;
420 size = sizeof(struct igbvf_buffer) * tx_ring->count;
421 tx_ring->buffer_info = vzalloc(size);
422 if (!tx_ring->buffer_info)
425 /* round up to nearest 4K */
426 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
427 tx_ring->size = ALIGN(tx_ring->size, 4096);
429 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
430 &tx_ring->dma, GFP_KERNEL);
434 tx_ring->adapter = adapter;
435 tx_ring->next_to_use = 0;
436 tx_ring->next_to_clean = 0;
440 vfree(tx_ring->buffer_info);
441 dev_err(&adapter->pdev->dev,
442 "Unable to allocate memory for the transmit descriptor ring\n");
447 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
448 * @adapter: board private structure
450 * Returns 0 on success, negative on failure
452 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
453 struct igbvf_ring *rx_ring)
455 struct pci_dev *pdev = adapter->pdev;
458 size = sizeof(struct igbvf_buffer) * rx_ring->count;
459 rx_ring->buffer_info = vzalloc(size);
460 if (!rx_ring->buffer_info)
463 desc_len = sizeof(union e1000_adv_rx_desc);
465 /* Round up to nearest 4K */
466 rx_ring->size = rx_ring->count * desc_len;
467 rx_ring->size = ALIGN(rx_ring->size, 4096);
469 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
470 &rx_ring->dma, GFP_KERNEL);
474 rx_ring->next_to_clean = 0;
475 rx_ring->next_to_use = 0;
477 rx_ring->adapter = adapter;
482 vfree(rx_ring->buffer_info);
483 rx_ring->buffer_info = NULL;
484 dev_err(&adapter->pdev->dev,
485 "Unable to allocate memory for the receive descriptor ring\n");
490 * igbvf_clean_tx_ring - Free Tx Buffers
491 * @tx_ring: ring to be cleaned
493 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
495 struct igbvf_adapter *adapter = tx_ring->adapter;
496 struct igbvf_buffer *buffer_info;
500 if (!tx_ring->buffer_info)
503 /* Free all the Tx ring sk_buffs */
504 for (i = 0; i < tx_ring->count; i++) {
505 buffer_info = &tx_ring->buffer_info[i];
506 igbvf_put_txbuf(adapter, buffer_info);
509 size = sizeof(struct igbvf_buffer) * tx_ring->count;
510 memset(tx_ring->buffer_info, 0, size);
512 /* Zero out the descriptor ring */
513 memset(tx_ring->desc, 0, tx_ring->size);
515 tx_ring->next_to_use = 0;
516 tx_ring->next_to_clean = 0;
518 writel(0, adapter->hw.hw_addr + tx_ring->head);
519 writel(0, adapter->hw.hw_addr + tx_ring->tail);
523 * igbvf_free_tx_resources - Free Tx Resources per Queue
524 * @tx_ring: ring to free resources from
526 * Free all transmit software resources
528 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
530 struct pci_dev *pdev = tx_ring->adapter->pdev;
532 igbvf_clean_tx_ring(tx_ring);
534 vfree(tx_ring->buffer_info);
535 tx_ring->buffer_info = NULL;
537 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
540 tx_ring->desc = NULL;
544 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
545 * @adapter: board private structure
547 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
549 struct igbvf_adapter *adapter = rx_ring->adapter;
550 struct igbvf_buffer *buffer_info;
551 struct pci_dev *pdev = adapter->pdev;
555 if (!rx_ring->buffer_info)
558 /* Free all the Rx ring sk_buffs */
559 for (i = 0; i < rx_ring->count; i++) {
560 buffer_info = &rx_ring->buffer_info[i];
561 if (buffer_info->dma) {
562 if (adapter->rx_ps_hdr_size) {
563 dma_unmap_single(&pdev->dev, buffer_info->dma,
564 adapter->rx_ps_hdr_size,
567 dma_unmap_single(&pdev->dev, buffer_info->dma,
568 adapter->rx_buffer_len,
571 buffer_info->dma = 0;
574 if (buffer_info->skb) {
575 dev_kfree_skb(buffer_info->skb);
576 buffer_info->skb = NULL;
579 if (buffer_info->page) {
580 if (buffer_info->page_dma)
581 dma_unmap_page(&pdev->dev,
582 buffer_info->page_dma,
585 put_page(buffer_info->page);
586 buffer_info->page = NULL;
587 buffer_info->page_dma = 0;
588 buffer_info->page_offset = 0;
592 size = sizeof(struct igbvf_buffer) * rx_ring->count;
593 memset(rx_ring->buffer_info, 0, size);
595 /* Zero out the descriptor ring */
596 memset(rx_ring->desc, 0, rx_ring->size);
598 rx_ring->next_to_clean = 0;
599 rx_ring->next_to_use = 0;
601 writel(0, adapter->hw.hw_addr + rx_ring->head);
602 writel(0, adapter->hw.hw_addr + rx_ring->tail);
606 * igbvf_free_rx_resources - Free Rx Resources
607 * @rx_ring: ring to clean the resources from
609 * Free all receive software resources
612 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
614 struct pci_dev *pdev = rx_ring->adapter->pdev;
616 igbvf_clean_rx_ring(rx_ring);
618 vfree(rx_ring->buffer_info);
619 rx_ring->buffer_info = NULL;
621 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
623 rx_ring->desc = NULL;
627 * igbvf_update_itr - update the dynamic ITR value based on statistics
628 * @adapter: pointer to adapter
629 * @itr_setting: current adapter->itr
630 * @packets: the number of packets during this measurement interval
631 * @bytes: the number of bytes during this measurement interval
633 * Stores a new ITR value based on packets and byte counts during the last
634 * interrupt. The advantage of per interrupt computation is faster updates
635 * and more accurate ITR for the current traffic pattern. Constants in this
636 * function were computed based on theoretical maximum wire speed and thresholds
637 * were set based on testing data as well as attempting to minimize response
638 * time while increasing bulk throughput.
640 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
641 enum latency_range itr_setting,
642 int packets, int bytes)
644 enum latency_range retval = itr_setting;
647 goto update_itr_done;
649 switch (itr_setting) {
651 /* handle TSO and jumbo frames */
652 if (bytes/packets > 8000)
653 retval = bulk_latency;
654 else if ((packets < 5) && (bytes > 512))
655 retval = low_latency;
657 case low_latency: /* 50 usec aka 20000 ints/s */
659 /* this if handles the TSO accounting */
660 if (bytes/packets > 8000)
661 retval = bulk_latency;
662 else if ((packets < 10) || ((bytes/packets) > 1200))
663 retval = bulk_latency;
664 else if ((packets > 35))
665 retval = lowest_latency;
666 } else if (bytes/packets > 2000) {
667 retval = bulk_latency;
668 } else if (packets <= 2 && bytes < 512) {
669 retval = lowest_latency;
672 case bulk_latency: /* 250 usec aka 4000 ints/s */
675 retval = low_latency;
676 } else if (bytes < 6000) {
677 retval = low_latency;
688 static int igbvf_range_to_itr(enum latency_range current_range)
692 switch (current_range) {
693 /* counts and packets in update_itr are dependent on these numbers */
695 new_itr = IGBVF_70K_ITR;
698 new_itr = IGBVF_20K_ITR;
701 new_itr = IGBVF_4K_ITR;
704 new_itr = IGBVF_START_ITR;
710 static void igbvf_set_itr(struct igbvf_adapter *adapter)
714 adapter->tx_ring->itr_range =
715 igbvf_update_itr(adapter,
716 adapter->tx_ring->itr_val,
717 adapter->total_tx_packets,
718 adapter->total_tx_bytes);
720 /* conservative mode (itr 3) eliminates the lowest_latency setting */
721 if (adapter->requested_itr == 3 &&
722 adapter->tx_ring->itr_range == lowest_latency)
723 adapter->tx_ring->itr_range = low_latency;
725 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
727 if (new_itr != adapter->tx_ring->itr_val) {
728 u32 current_itr = adapter->tx_ring->itr_val;
729 /* this attempts to bias the interrupt rate towards Bulk
730 * by adding intermediate steps when interrupt rate is
733 new_itr = new_itr > current_itr ?
734 min(current_itr + (new_itr >> 2), new_itr) :
736 adapter->tx_ring->itr_val = new_itr;
738 adapter->tx_ring->set_itr = 1;
741 adapter->rx_ring->itr_range =
742 igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
743 adapter->total_rx_packets,
744 adapter->total_rx_bytes);
745 if (adapter->requested_itr == 3 &&
746 adapter->rx_ring->itr_range == lowest_latency)
747 adapter->rx_ring->itr_range = low_latency;
749 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
751 if (new_itr != adapter->rx_ring->itr_val) {
752 u32 current_itr = adapter->rx_ring->itr_val;
754 new_itr = new_itr > current_itr ?
755 min(current_itr + (new_itr >> 2), new_itr) :
757 adapter->rx_ring->itr_val = new_itr;
759 adapter->rx_ring->set_itr = 1;
764 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
765 * @adapter: board private structure
767 * returns true if ring is completely cleaned
769 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
771 struct igbvf_adapter *adapter = tx_ring->adapter;
772 struct net_device *netdev = adapter->netdev;
773 struct igbvf_buffer *buffer_info;
775 union e1000_adv_tx_desc *tx_desc, *eop_desc;
776 unsigned int total_bytes = 0, total_packets = 0;
777 unsigned int i, count = 0;
778 bool cleaned = false;
780 i = tx_ring->next_to_clean;
781 buffer_info = &tx_ring->buffer_info[i];
782 eop_desc = buffer_info->next_to_watch;
785 /* if next_to_watch is not set then there is no work pending */
789 /* prevent any other reads prior to eop_desc */
792 /* if DD is not set pending work has not been completed */
793 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
796 /* clear next_to_watch to prevent false hangs */
797 buffer_info->next_to_watch = NULL;
799 for (cleaned = false; !cleaned; count++) {
800 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
801 cleaned = (tx_desc == eop_desc);
802 skb = buffer_info->skb;
805 unsigned int segs, bytecount;
807 /* gso_segs is currently only valid for tcp */
808 segs = skb_shinfo(skb)->gso_segs ?: 1;
809 /* multiply data chunks by size of headers */
810 bytecount = ((segs - 1) * skb_headlen(skb)) +
812 total_packets += segs;
813 total_bytes += bytecount;
816 igbvf_put_txbuf(adapter, buffer_info);
817 tx_desc->wb.status = 0;
820 if (i == tx_ring->count)
823 buffer_info = &tx_ring->buffer_info[i];
826 eop_desc = buffer_info->next_to_watch;
827 } while (count < tx_ring->count);
829 tx_ring->next_to_clean = i;
831 if (unlikely(count && netif_carrier_ok(netdev) &&
832 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
833 /* Make sure that anybody stopping the queue after this
834 * sees the new next_to_clean.
837 if (netif_queue_stopped(netdev) &&
838 !(test_bit(__IGBVF_DOWN, &adapter->state))) {
839 netif_wake_queue(netdev);
840 ++adapter->restart_queue;
844 netdev->stats.tx_bytes += total_bytes;
845 netdev->stats.tx_packets += total_packets;
846 return count < tx_ring->count;
849 static irqreturn_t igbvf_msix_other(int irq, void *data)
851 struct net_device *netdev = data;
852 struct igbvf_adapter *adapter = netdev_priv(netdev);
853 struct e1000_hw *hw = &adapter->hw;
855 adapter->int_counter1++;
857 hw->mac.get_link_status = 1;
858 if (!test_bit(__IGBVF_DOWN, &adapter->state))
859 mod_timer(&adapter->watchdog_timer, jiffies + 1);
861 ew32(EIMS, adapter->eims_other);
866 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
868 struct net_device *netdev = data;
869 struct igbvf_adapter *adapter = netdev_priv(netdev);
870 struct e1000_hw *hw = &adapter->hw;
871 struct igbvf_ring *tx_ring = adapter->tx_ring;
873 if (tx_ring->set_itr) {
874 writel(tx_ring->itr_val,
875 adapter->hw.hw_addr + tx_ring->itr_register);
876 adapter->tx_ring->set_itr = 0;
879 adapter->total_tx_bytes = 0;
880 adapter->total_tx_packets = 0;
882 /* auto mask will automatically re-enable the interrupt when we write
885 if (!igbvf_clean_tx_irq(tx_ring))
886 /* Ring was not completely cleaned, so fire another interrupt */
887 ew32(EICS, tx_ring->eims_value);
889 ew32(EIMS, tx_ring->eims_value);
894 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
896 struct net_device *netdev = data;
897 struct igbvf_adapter *adapter = netdev_priv(netdev);
899 adapter->int_counter0++;
901 /* Write the ITR value calculated at the end of the
902 * previous interrupt.
904 if (adapter->rx_ring->set_itr) {
905 writel(adapter->rx_ring->itr_val,
906 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
907 adapter->rx_ring->set_itr = 0;
910 if (napi_schedule_prep(&adapter->rx_ring->napi)) {
911 adapter->total_rx_bytes = 0;
912 adapter->total_rx_packets = 0;
913 __napi_schedule(&adapter->rx_ring->napi);
919 #define IGBVF_NO_QUEUE -1
921 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
922 int tx_queue, int msix_vector)
924 struct e1000_hw *hw = &adapter->hw;
927 /* 82576 uses a table-based method for assigning vectors.
928 * Each queue has a single entry in the table to which we write
929 * a vector number along with a "valid" bit. Sadly, the layout
930 * of the table is somewhat counterintuitive.
932 if (rx_queue > IGBVF_NO_QUEUE) {
933 index = (rx_queue >> 1);
934 ivar = array_er32(IVAR0, index);
935 if (rx_queue & 0x1) {
936 /* vector goes into third byte of register */
937 ivar = ivar & 0xFF00FFFF;
938 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
940 /* vector goes into low byte of register */
941 ivar = ivar & 0xFFFFFF00;
942 ivar |= msix_vector | E1000_IVAR_VALID;
944 adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
945 array_ew32(IVAR0, index, ivar);
947 if (tx_queue > IGBVF_NO_QUEUE) {
948 index = (tx_queue >> 1);
949 ivar = array_er32(IVAR0, index);
950 if (tx_queue & 0x1) {
951 /* vector goes into high byte of register */
952 ivar = ivar & 0x00FFFFFF;
953 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
955 /* vector goes into second byte of register */
956 ivar = ivar & 0xFFFF00FF;
957 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
959 adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
960 array_ew32(IVAR0, index, ivar);
965 * igbvf_configure_msix - Configure MSI-X hardware
966 * @adapter: board private structure
968 * igbvf_configure_msix sets up the hardware to properly
969 * generate MSI-X interrupts.
971 static void igbvf_configure_msix(struct igbvf_adapter *adapter)
974 struct e1000_hw *hw = &adapter->hw;
975 struct igbvf_ring *tx_ring = adapter->tx_ring;
976 struct igbvf_ring *rx_ring = adapter->rx_ring;
979 adapter->eims_enable_mask = 0;
981 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
982 adapter->eims_enable_mask |= tx_ring->eims_value;
983 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
984 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
985 adapter->eims_enable_mask |= rx_ring->eims_value;
986 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
988 /* set vector for other causes, i.e. link changes */
990 tmp = (vector++ | E1000_IVAR_VALID);
992 ew32(IVAR_MISC, tmp);
994 adapter->eims_enable_mask = GENMASK(vector - 1, 0);
995 adapter->eims_other = BIT(vector - 1);
999 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
1001 if (adapter->msix_entries) {
1002 pci_disable_msix(adapter->pdev);
1003 kfree(adapter->msix_entries);
1004 adapter->msix_entries = NULL;
1009 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1010 * @adapter: board private structure
1012 * Attempt to configure interrupts using the best available
1013 * capabilities of the hardware and kernel.
1015 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1020 /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */
1021 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1023 if (adapter->msix_entries) {
1024 for (i = 0; i < 3; i++)
1025 adapter->msix_entries[i].entry = i;
1027 err = pci_enable_msix_range(adapter->pdev,
1028 adapter->msix_entries, 3, 3);
1033 dev_err(&adapter->pdev->dev,
1034 "Failed to initialize MSI-X interrupts.\n");
1035 igbvf_reset_interrupt_capability(adapter);
1040 * igbvf_request_msix - Initialize MSI-X interrupts
1041 * @adapter: board private structure
1043 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1046 static int igbvf_request_msix(struct igbvf_adapter *adapter)
1048 struct net_device *netdev = adapter->netdev;
1049 int err = 0, vector = 0;
1051 if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1052 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1053 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1055 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1056 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1059 err = request_irq(adapter->msix_entries[vector].vector,
1060 igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1065 adapter->tx_ring->itr_register = E1000_EITR(vector);
1066 adapter->tx_ring->itr_val = adapter->current_itr;
1069 err = request_irq(adapter->msix_entries[vector].vector,
1070 igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1075 adapter->rx_ring->itr_register = E1000_EITR(vector);
1076 adapter->rx_ring->itr_val = adapter->current_itr;
1079 err = request_irq(adapter->msix_entries[vector].vector,
1080 igbvf_msix_other, 0, netdev->name, netdev);
1084 igbvf_configure_msix(adapter);
1091 * igbvf_alloc_queues - Allocate memory for all rings
1092 * @adapter: board private structure to initialize
1094 static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
1096 struct net_device *netdev = adapter->netdev;
1098 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1099 if (!adapter->tx_ring)
1102 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1103 if (!adapter->rx_ring) {
1104 kfree(adapter->tx_ring);
1108 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1114 * igbvf_request_irq - initialize interrupts
1115 * @adapter: board private structure
1117 * Attempts to configure interrupts using the best available
1118 * capabilities of the hardware and kernel.
1120 static int igbvf_request_irq(struct igbvf_adapter *adapter)
1124 /* igbvf supports msi-x only */
1125 if (adapter->msix_entries)
1126 err = igbvf_request_msix(adapter);
1131 dev_err(&adapter->pdev->dev,
1132 "Unable to allocate interrupt, Error: %d\n", err);
1137 static void igbvf_free_irq(struct igbvf_adapter *adapter)
1139 struct net_device *netdev = adapter->netdev;
1142 if (adapter->msix_entries) {
1143 for (vector = 0; vector < 3; vector++)
1144 free_irq(adapter->msix_entries[vector].vector, netdev);
1149 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1150 * @adapter: board private structure
1152 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1154 struct e1000_hw *hw = &adapter->hw;
1158 if (adapter->msix_entries)
1163 * igbvf_irq_enable - Enable default interrupt generation settings
1164 * @adapter: board private structure
1166 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1168 struct e1000_hw *hw = &adapter->hw;
1170 ew32(EIAC, adapter->eims_enable_mask);
1171 ew32(EIAM, adapter->eims_enable_mask);
1172 ew32(EIMS, adapter->eims_enable_mask);
1176 * igbvf_poll - NAPI Rx polling callback
1177 * @napi: struct associated with this polling callback
1178 * @budget: amount of packets driver is allowed to process this poll
1180 static int igbvf_poll(struct napi_struct *napi, int budget)
1182 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1183 struct igbvf_adapter *adapter = rx_ring->adapter;
1184 struct e1000_hw *hw = &adapter->hw;
1187 igbvf_clean_rx_irq(adapter, &work_done, budget);
1189 if (work_done == budget)
1192 /* Exit the polling mode, but don't re-enable interrupts if stack might
1193 * poll us due to busy-polling
1195 if (likely(napi_complete_done(napi, work_done))) {
1196 if (adapter->requested_itr & 3)
1197 igbvf_set_itr(adapter);
1199 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1200 ew32(EIMS, adapter->rx_ring->eims_value);
1207 * igbvf_set_rlpml - set receive large packet maximum length
1208 * @adapter: board private structure
1210 * Configure the maximum size of packets that will be received
1212 static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1215 struct e1000_hw *hw = &adapter->hw;
1217 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
1219 spin_lock_bh(&hw->mbx_lock);
1221 e1000_rlpml_set_vf(hw, max_frame_size);
1223 spin_unlock_bh(&hw->mbx_lock);
1226 static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
1227 __be16 proto, u16 vid)
1229 struct igbvf_adapter *adapter = netdev_priv(netdev);
1230 struct e1000_hw *hw = &adapter->hw;
1232 spin_lock_bh(&hw->mbx_lock);
1234 if (hw->mac.ops.set_vfta(hw, vid, true)) {
1235 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1236 spin_unlock_bh(&hw->mbx_lock);
1240 spin_unlock_bh(&hw->mbx_lock);
1242 set_bit(vid, adapter->active_vlans);
1246 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
1247 __be16 proto, u16 vid)
1249 struct igbvf_adapter *adapter = netdev_priv(netdev);
1250 struct e1000_hw *hw = &adapter->hw;
1252 spin_lock_bh(&hw->mbx_lock);
1254 if (hw->mac.ops.set_vfta(hw, vid, false)) {
1255 dev_err(&adapter->pdev->dev,
1256 "Failed to remove vlan id %d\n", vid);
1257 spin_unlock_bh(&hw->mbx_lock);
1261 spin_unlock_bh(&hw->mbx_lock);
1263 clear_bit(vid, adapter->active_vlans);
1267 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1271 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1272 igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
1276 * igbvf_configure_tx - Configure Transmit Unit after Reset
1277 * @adapter: board private structure
1279 * Configure the Tx unit of the MAC after a reset.
1281 static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1283 struct e1000_hw *hw = &adapter->hw;
1284 struct igbvf_ring *tx_ring = adapter->tx_ring;
1286 u32 txdctl, dca_txctrl;
1288 /* disable transmits */
1289 txdctl = er32(TXDCTL(0));
1290 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1294 /* Setup the HW Tx Head and Tail descriptor pointers */
1295 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1296 tdba = tx_ring->dma;
1297 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1298 ew32(TDBAH(0), (tdba >> 32));
1301 tx_ring->head = E1000_TDH(0);
1302 tx_ring->tail = E1000_TDT(0);
1304 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1305 * MUST be delivered in order or it will completely screw up
1308 dca_txctrl = er32(DCA_TXCTRL(0));
1309 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1310 ew32(DCA_TXCTRL(0), dca_txctrl);
1312 /* enable transmits */
1313 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1314 ew32(TXDCTL(0), txdctl);
1316 /* Setup Transmit Descriptor Settings for eop descriptor */
1317 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1319 /* enable Report Status bit */
1320 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1324 * igbvf_setup_srrctl - configure the receive control registers
1325 * @adapter: Board private structure
1327 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1329 struct e1000_hw *hw = &adapter->hw;
1332 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1333 E1000_SRRCTL_BSIZEHDR_MASK |
1334 E1000_SRRCTL_BSIZEPKT_MASK);
1336 /* Enable queue drop to avoid head of line blocking */
1337 srrctl |= E1000_SRRCTL_DROP_EN;
1339 /* Setup buffer sizes */
1340 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1341 E1000_SRRCTL_BSIZEPKT_SHIFT;
1343 if (adapter->rx_buffer_len < 2048) {
1344 adapter->rx_ps_hdr_size = 0;
1345 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1347 adapter->rx_ps_hdr_size = 128;
1348 srrctl |= adapter->rx_ps_hdr_size <<
1349 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1350 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1353 ew32(SRRCTL(0), srrctl);
1357 * igbvf_configure_rx - Configure Receive Unit after Reset
1358 * @adapter: board private structure
1360 * Configure the Rx unit of the MAC after a reset.
1362 static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1364 struct e1000_hw *hw = &adapter->hw;
1365 struct igbvf_ring *rx_ring = adapter->rx_ring;
1369 /* disable receives */
1370 rxdctl = er32(RXDCTL(0));
1371 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1375 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1376 * the Base and Length of the Rx Descriptor Ring
1378 rdba = rx_ring->dma;
1379 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1380 ew32(RDBAH(0), (rdba >> 32));
1381 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1382 rx_ring->head = E1000_RDH(0);
1383 rx_ring->tail = E1000_RDT(0);
1387 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1388 rxdctl &= 0xFFF00000;
1389 rxdctl |= IGBVF_RX_PTHRESH;
1390 rxdctl |= IGBVF_RX_HTHRESH << 8;
1391 rxdctl |= IGBVF_RX_WTHRESH << 16;
1393 igbvf_set_rlpml(adapter);
1395 /* enable receives */
1396 ew32(RXDCTL(0), rxdctl);
1400 * igbvf_set_multi - Multicast and Promiscuous mode set
1401 * @netdev: network interface device structure
1403 * The set_multi entry point is called whenever the multicast address
1404 * list or the network interface flags are updated. This routine is
1405 * responsible for configuring the hardware for proper multicast,
1406 * promiscuous mode, and all-multi behavior.
1408 static void igbvf_set_multi(struct net_device *netdev)
1410 struct igbvf_adapter *adapter = netdev_priv(netdev);
1411 struct e1000_hw *hw = &adapter->hw;
1412 struct netdev_hw_addr *ha;
1413 u8 *mta_list = NULL;
1416 if (!netdev_mc_empty(netdev)) {
1417 mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN,
1423 /* prepare a packed array of only addresses. */
1425 netdev_for_each_mc_addr(ha, netdev)
1426 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1428 spin_lock_bh(&hw->mbx_lock);
1430 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1432 spin_unlock_bh(&hw->mbx_lock);
1437 * igbvf_set_uni - Configure unicast MAC filters
1438 * @netdev: network interface device structure
1440 * This routine is responsible for configuring the hardware for proper
1443 static int igbvf_set_uni(struct net_device *netdev)
1445 struct igbvf_adapter *adapter = netdev_priv(netdev);
1446 struct e1000_hw *hw = &adapter->hw;
1448 if (netdev_uc_count(netdev) > IGBVF_MAX_MAC_FILTERS) {
1449 pr_err("Too many unicast filters - No Space\n");
1453 spin_lock_bh(&hw->mbx_lock);
1455 /* Clear all unicast MAC filters */
1456 hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL);
1458 spin_unlock_bh(&hw->mbx_lock);
1460 if (!netdev_uc_empty(netdev)) {
1461 struct netdev_hw_addr *ha;
1463 /* Add MAC filters one by one */
1464 netdev_for_each_uc_addr(ha, netdev) {
1465 spin_lock_bh(&hw->mbx_lock);
1467 hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD,
1470 spin_unlock_bh(&hw->mbx_lock);
1478 static void igbvf_set_rx_mode(struct net_device *netdev)
1480 igbvf_set_multi(netdev);
1481 igbvf_set_uni(netdev);
1485 * igbvf_configure - configure the hardware for Rx and Tx
1486 * @adapter: private board structure
1488 static void igbvf_configure(struct igbvf_adapter *adapter)
1490 igbvf_set_rx_mode(adapter->netdev);
1492 igbvf_restore_vlan(adapter);
1494 igbvf_configure_tx(adapter);
1495 igbvf_setup_srrctl(adapter);
1496 igbvf_configure_rx(adapter);
1497 igbvf_alloc_rx_buffers(adapter->rx_ring,
1498 igbvf_desc_unused(adapter->rx_ring));
1501 /* igbvf_reset - bring the hardware into a known good state
1502 * @adapter: private board structure
1504 * This function boots the hardware and enables some settings that
1505 * require a configuration cycle of the hardware - those cannot be
1506 * set/changed during runtime. After reset the device needs to be
1507 * properly configured for Rx, Tx etc.
1509 static void igbvf_reset(struct igbvf_adapter *adapter)
1511 struct e1000_mac_info *mac = &adapter->hw.mac;
1512 struct net_device *netdev = adapter->netdev;
1513 struct e1000_hw *hw = &adapter->hw;
1515 spin_lock_bh(&hw->mbx_lock);
1517 /* Allow time for pending master requests to run */
1518 if (mac->ops.reset_hw(hw))
1519 dev_err(&adapter->pdev->dev, "PF still resetting\n");
1521 mac->ops.init_hw(hw);
1523 spin_unlock_bh(&hw->mbx_lock);
1525 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1526 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1528 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1532 adapter->last_reset = jiffies;
1535 int igbvf_up(struct igbvf_adapter *adapter)
1537 struct e1000_hw *hw = &adapter->hw;
1539 /* hardware has been reset, we need to reload some things */
1540 igbvf_configure(adapter);
1542 clear_bit(__IGBVF_DOWN, &adapter->state);
1544 napi_enable(&adapter->rx_ring->napi);
1545 if (adapter->msix_entries)
1546 igbvf_configure_msix(adapter);
1548 /* Clear any pending interrupts. */
1550 igbvf_irq_enable(adapter);
1552 /* start the watchdog */
1553 hw->mac.get_link_status = 1;
1554 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1559 void igbvf_down(struct igbvf_adapter *adapter)
1561 struct net_device *netdev = adapter->netdev;
1562 struct e1000_hw *hw = &adapter->hw;
1565 /* signal that we're down so the interrupt handler does not
1566 * reschedule our watchdog timer
1568 set_bit(__IGBVF_DOWN, &adapter->state);
1570 /* disable receives in the hardware */
1571 rxdctl = er32(RXDCTL(0));
1572 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1574 netif_carrier_off(netdev);
1575 netif_stop_queue(netdev);
1577 /* disable transmits in the hardware */
1578 txdctl = er32(TXDCTL(0));
1579 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1581 /* flush both disables and wait for them to finish */
1585 napi_disable(&adapter->rx_ring->napi);
1587 igbvf_irq_disable(adapter);
1589 del_timer_sync(&adapter->watchdog_timer);
1591 /* record the stats before reset*/
1592 igbvf_update_stats(adapter);
1594 adapter->link_speed = 0;
1595 adapter->link_duplex = 0;
1597 igbvf_reset(adapter);
1598 igbvf_clean_tx_ring(adapter->tx_ring);
1599 igbvf_clean_rx_ring(adapter->rx_ring);
1602 void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1605 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1606 usleep_range(1000, 2000);
1607 igbvf_down(adapter);
1609 clear_bit(__IGBVF_RESETTING, &adapter->state);
1613 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1614 * @adapter: board private structure to initialize
1616 * igbvf_sw_init initializes the Adapter private data structure.
1617 * Fields are initialized based on PCI device information and
1618 * OS network device settings (MTU size).
1620 static int igbvf_sw_init(struct igbvf_adapter *adapter)
1622 struct net_device *netdev = adapter->netdev;
1625 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1626 adapter->rx_ps_hdr_size = 0;
1627 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1628 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1630 adapter->tx_int_delay = 8;
1631 adapter->tx_abs_int_delay = 32;
1632 adapter->rx_int_delay = 0;
1633 adapter->rx_abs_int_delay = 8;
1634 adapter->requested_itr = 3;
1635 adapter->current_itr = IGBVF_START_ITR;
1637 /* Set various function pointers */
1638 adapter->ei->init_ops(&adapter->hw);
1640 rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1644 rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1648 igbvf_set_interrupt_capability(adapter);
1650 if (igbvf_alloc_queues(adapter))
1653 spin_lock_init(&adapter->tx_queue_lock);
1655 /* Explicitly disable IRQ since the NIC can be in any state. */
1656 igbvf_irq_disable(adapter);
1658 spin_lock_init(&adapter->stats_lock);
1659 spin_lock_init(&adapter->hw.mbx_lock);
1661 set_bit(__IGBVF_DOWN, &adapter->state);
1665 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1667 struct e1000_hw *hw = &adapter->hw;
1669 adapter->stats.last_gprc = er32(VFGPRC);
1670 adapter->stats.last_gorc = er32(VFGORC);
1671 adapter->stats.last_gptc = er32(VFGPTC);
1672 adapter->stats.last_gotc = er32(VFGOTC);
1673 adapter->stats.last_mprc = er32(VFMPRC);
1674 adapter->stats.last_gotlbc = er32(VFGOTLBC);
1675 adapter->stats.last_gptlbc = er32(VFGPTLBC);
1676 adapter->stats.last_gorlbc = er32(VFGORLBC);
1677 adapter->stats.last_gprlbc = er32(VFGPRLBC);
1679 adapter->stats.base_gprc = er32(VFGPRC);
1680 adapter->stats.base_gorc = er32(VFGORC);
1681 adapter->stats.base_gptc = er32(VFGPTC);
1682 adapter->stats.base_gotc = er32(VFGOTC);
1683 adapter->stats.base_mprc = er32(VFMPRC);
1684 adapter->stats.base_gotlbc = er32(VFGOTLBC);
1685 adapter->stats.base_gptlbc = er32(VFGPTLBC);
1686 adapter->stats.base_gorlbc = er32(VFGORLBC);
1687 adapter->stats.base_gprlbc = er32(VFGPRLBC);
1691 * igbvf_open - Called when a network interface is made active
1692 * @netdev: network interface device structure
1694 * Returns 0 on success, negative value on failure
1696 * The open entry point is called when a network interface is made
1697 * active by the system (IFF_UP). At this point all resources needed
1698 * for transmit and receive operations are allocated, the interrupt
1699 * handler is registered with the OS, the watchdog timer is started,
1700 * and the stack is notified that the interface is ready.
1702 static int igbvf_open(struct net_device *netdev)
1704 struct igbvf_adapter *adapter = netdev_priv(netdev);
1705 struct e1000_hw *hw = &adapter->hw;
1708 /* disallow open during test */
1709 if (test_bit(__IGBVF_TESTING, &adapter->state))
1712 /* allocate transmit descriptors */
1713 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1717 /* allocate receive descriptors */
1718 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1722 /* before we allocate an interrupt, we must be ready to handle it.
1723 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1724 * as soon as we call pci_request_irq, so we have to setup our
1725 * clean_rx handler before we do so.
1727 igbvf_configure(adapter);
1729 err = igbvf_request_irq(adapter);
1733 /* From here on the code is the same as igbvf_up() */
1734 clear_bit(__IGBVF_DOWN, &adapter->state);
1736 napi_enable(&adapter->rx_ring->napi);
1738 /* clear any pending interrupts */
1741 igbvf_irq_enable(adapter);
1743 /* start the watchdog */
1744 hw->mac.get_link_status = 1;
1745 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1750 igbvf_free_rx_resources(adapter->rx_ring);
1752 igbvf_free_tx_resources(adapter->tx_ring);
1754 igbvf_reset(adapter);
1760 * igbvf_close - Disables a network interface
1761 * @netdev: network interface device structure
1763 * Returns 0, this is not allowed to fail
1765 * The close entry point is called when an interface is de-activated
1766 * by the OS. The hardware is still under the drivers control, but
1767 * needs to be disabled. A global MAC reset is issued to stop the
1768 * hardware, and all transmit and receive resources are freed.
1770 static int igbvf_close(struct net_device *netdev)
1772 struct igbvf_adapter *adapter = netdev_priv(netdev);
1774 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1775 igbvf_down(adapter);
1777 igbvf_free_irq(adapter);
1779 igbvf_free_tx_resources(adapter->tx_ring);
1780 igbvf_free_rx_resources(adapter->rx_ring);
1786 * igbvf_set_mac - Change the Ethernet Address of the NIC
1787 * @netdev: network interface device structure
1788 * @p: pointer to an address structure
1790 * Returns 0 on success, negative on failure
1792 static int igbvf_set_mac(struct net_device *netdev, void *p)
1794 struct igbvf_adapter *adapter = netdev_priv(netdev);
1795 struct e1000_hw *hw = &adapter->hw;
1796 struct sockaddr *addr = p;
1798 if (!is_valid_ether_addr(addr->sa_data))
1799 return -EADDRNOTAVAIL;
1801 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1803 spin_lock_bh(&hw->mbx_lock);
1805 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1807 spin_unlock_bh(&hw->mbx_lock);
1809 if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
1810 return -EADDRNOTAVAIL;
1812 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1817 #define UPDATE_VF_COUNTER(reg, name) \
1819 u32 current_counter = er32(reg); \
1820 if (current_counter < adapter->stats.last_##name) \
1821 adapter->stats.name += 0x100000000LL; \
1822 adapter->stats.last_##name = current_counter; \
1823 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1824 adapter->stats.name |= current_counter; \
1828 * igbvf_update_stats - Update the board statistics counters
1829 * @adapter: board private structure
1831 void igbvf_update_stats(struct igbvf_adapter *adapter)
1833 struct e1000_hw *hw = &adapter->hw;
1834 struct pci_dev *pdev = adapter->pdev;
1836 /* Prevent stats update while adapter is being reset, link is down
1837 * or if the pci connection is down.
1839 if (adapter->link_speed == 0)
1842 if (test_bit(__IGBVF_RESETTING, &adapter->state))
1845 if (pci_channel_offline(pdev))
1848 UPDATE_VF_COUNTER(VFGPRC, gprc);
1849 UPDATE_VF_COUNTER(VFGORC, gorc);
1850 UPDATE_VF_COUNTER(VFGPTC, gptc);
1851 UPDATE_VF_COUNTER(VFGOTC, gotc);
1852 UPDATE_VF_COUNTER(VFMPRC, mprc);
1853 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1854 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1855 UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1856 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1858 /* Fill out the OS statistics structure */
1859 adapter->netdev->stats.multicast = adapter->stats.mprc;
1862 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1864 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
1865 adapter->link_speed,
1866 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
1869 static bool igbvf_has_link(struct igbvf_adapter *adapter)
1871 struct e1000_hw *hw = &adapter->hw;
1872 s32 ret_val = E1000_SUCCESS;
1875 /* If interface is down, stay link down */
1876 if (test_bit(__IGBVF_DOWN, &adapter->state))
1879 spin_lock_bh(&hw->mbx_lock);
1881 ret_val = hw->mac.ops.check_for_link(hw);
1883 spin_unlock_bh(&hw->mbx_lock);
1885 link_active = !hw->mac.get_link_status;
1887 /* if check for link returns error we will need to reset */
1888 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
1889 schedule_work(&adapter->reset_task);
1895 * igbvf_watchdog - Timer Call-back
1896 * @data: pointer to adapter cast into an unsigned long
1898 static void igbvf_watchdog(struct timer_list *t)
1900 struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1902 /* Do the rest outside of interrupt context */
1903 schedule_work(&adapter->watchdog_task);
1906 static void igbvf_watchdog_task(struct work_struct *work)
1908 struct igbvf_adapter *adapter = container_of(work,
1909 struct igbvf_adapter,
1911 struct net_device *netdev = adapter->netdev;
1912 struct e1000_mac_info *mac = &adapter->hw.mac;
1913 struct igbvf_ring *tx_ring = adapter->tx_ring;
1914 struct e1000_hw *hw = &adapter->hw;
1918 link = igbvf_has_link(adapter);
1921 if (!netif_carrier_ok(netdev)) {
1922 mac->ops.get_link_up_info(&adapter->hw,
1923 &adapter->link_speed,
1924 &adapter->link_duplex);
1925 igbvf_print_link_info(adapter);
1927 netif_carrier_on(netdev);
1928 netif_wake_queue(netdev);
1931 if (netif_carrier_ok(netdev)) {
1932 adapter->link_speed = 0;
1933 adapter->link_duplex = 0;
1934 dev_info(&adapter->pdev->dev, "Link is Down\n");
1935 netif_carrier_off(netdev);
1936 netif_stop_queue(netdev);
1940 if (netif_carrier_ok(netdev)) {
1941 igbvf_update_stats(adapter);
1943 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1946 /* We've lost link, so the controller stops DMA,
1947 * but we've got queued Tx work that's never going
1948 * to get done, so reset controller to flush Tx.
1949 * (Do the reset outside of interrupt context).
1951 adapter->tx_timeout_count++;
1952 schedule_work(&adapter->reset_task);
1956 /* Cause software interrupt to ensure Rx ring is cleaned */
1957 ew32(EICS, adapter->rx_ring->eims_value);
1959 /* Reset the timer */
1960 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1961 mod_timer(&adapter->watchdog_timer,
1962 round_jiffies(jiffies + (2 * HZ)));
1965 #define IGBVF_TX_FLAGS_CSUM 0x00000001
1966 #define IGBVF_TX_FLAGS_VLAN 0x00000002
1967 #define IGBVF_TX_FLAGS_TSO 0x00000004
1968 #define IGBVF_TX_FLAGS_IPV4 0x00000008
1969 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1970 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1972 static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
1973 u32 type_tucmd, u32 mss_l4len_idx)
1975 struct e1000_adv_tx_context_desc *context_desc;
1976 struct igbvf_buffer *buffer_info;
1977 u16 i = tx_ring->next_to_use;
1979 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1980 buffer_info = &tx_ring->buffer_info[i];
1983 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1985 /* set bits to identify this as an advanced context descriptor */
1986 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1988 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1989 context_desc->seqnum_seed = 0;
1990 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1991 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1993 buffer_info->time_stamp = jiffies;
1994 buffer_info->dma = 0;
1997 static int igbvf_tso(struct igbvf_ring *tx_ring,
1998 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2000 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
2010 u32 paylen, l4_offset;
2013 if (skb->ip_summed != CHECKSUM_PARTIAL)
2016 if (!skb_is_gso(skb))
2019 err = skb_cow_head(skb, 0);
2023 ip.hdr = skb_network_header(skb);
2024 l4.hdr = skb_checksum_start(skb);
2026 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2027 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
2029 /* initialize outer IP header fields */
2030 if (ip.v4->version == 4) {
2031 unsigned char *csum_start = skb_checksum_start(skb);
2032 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
2034 /* IP header will have to cancel out any data that
2035 * is not a part of the outer IP header
2037 ip.v4->check = csum_fold(csum_partial(trans_start,
2038 csum_start - trans_start,
2040 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
2044 ip.v6->payload_len = 0;
2047 /* determine offset of inner transport header */
2048 l4_offset = l4.hdr - skb->data;
2050 /* compute length of segmentation header */
2051 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2053 /* remove payload length from inner checksum */
2054 paylen = skb->len - l4_offset;
2055 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
2058 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
2059 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
2061 /* VLAN MACLEN IPLEN */
2062 vlan_macip_lens = l4.hdr - ip.hdr;
2063 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
2064 vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
2066 igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
2071 static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
2073 unsigned int offset = 0;
2075 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
2077 return offset == skb_checksum_start_offset(skb);
2080 static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
2081 u32 tx_flags, __be16 protocol)
2083 u32 vlan_macip_lens = 0;
2086 if (skb->ip_summed != CHECKSUM_PARTIAL) {
2088 if (!(tx_flags & IGBVF_TX_FLAGS_VLAN))
2093 switch (skb->csum_offset) {
2094 case offsetof(struct tcphdr, check):
2095 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
2097 case offsetof(struct udphdr, check):
2099 case offsetof(struct sctphdr, checksum):
2100 /* validate that this is actually an SCTP request */
2101 if (((protocol == htons(ETH_P_IP)) &&
2102 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
2103 ((protocol == htons(ETH_P_IPV6)) &&
2104 igbvf_ipv6_csum_is_sctp(skb))) {
2105 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
2110 skb_checksum_help(skb);
2114 vlan_macip_lens = skb_checksum_start_offset(skb) -
2115 skb_network_offset(skb);
2117 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
2118 vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
2120 igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
2124 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2126 struct igbvf_adapter *adapter = netdev_priv(netdev);
2128 /* there is enough descriptors then we don't need to worry */
2129 if (igbvf_desc_unused(adapter->tx_ring) >= size)
2132 netif_stop_queue(netdev);
2134 /* Herbert's original patch had:
2135 * smp_mb__after_netif_stop_queue();
2136 * but since that doesn't exist yet, just open code it.
2140 /* We need to check again just in case room has been made available */
2141 if (igbvf_desc_unused(adapter->tx_ring) < size)
2144 netif_wake_queue(netdev);
2146 ++adapter->restart_queue;
2150 #define IGBVF_MAX_TXD_PWR 16
2151 #define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR)
2153 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2154 struct igbvf_ring *tx_ring,
2155 struct sk_buff *skb)
2157 struct igbvf_buffer *buffer_info;
2158 struct pci_dev *pdev = adapter->pdev;
2159 unsigned int len = skb_headlen(skb);
2160 unsigned int count = 0, i;
2163 i = tx_ring->next_to_use;
2165 buffer_info = &tx_ring->buffer_info[i];
2166 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2167 buffer_info->length = len;
2168 /* set time_stamp *before* dma to help avoid a possible race */
2169 buffer_info->time_stamp = jiffies;
2170 buffer_info->mapped_as_page = false;
2171 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2173 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2176 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2177 const struct skb_frag_struct *frag;
2181 if (i == tx_ring->count)
2184 frag = &skb_shinfo(skb)->frags[f];
2185 len = skb_frag_size(frag);
2187 buffer_info = &tx_ring->buffer_info[i];
2188 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2189 buffer_info->length = len;
2190 buffer_info->time_stamp = jiffies;
2191 buffer_info->mapped_as_page = true;
2192 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2194 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2198 tx_ring->buffer_info[i].skb = skb;
2203 dev_err(&pdev->dev, "TX DMA map failed\n");
2205 /* clear timestamp and dma mappings for failed buffer_info mapping */
2206 buffer_info->dma = 0;
2207 buffer_info->time_stamp = 0;
2208 buffer_info->length = 0;
2209 buffer_info->mapped_as_page = false;
2213 /* clear timestamp and dma mappings for remaining portion of packet */
2216 i += tx_ring->count;
2218 buffer_info = &tx_ring->buffer_info[i];
2219 igbvf_put_txbuf(adapter, buffer_info);
2225 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2226 struct igbvf_ring *tx_ring,
2227 int tx_flags, int count,
2228 unsigned int first, u32 paylen,
2231 union e1000_adv_tx_desc *tx_desc = NULL;
2232 struct igbvf_buffer *buffer_info;
2233 u32 olinfo_status = 0, cmd_type_len;
2236 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2237 E1000_ADVTXD_DCMD_DEXT);
2239 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2240 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2242 if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2243 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2245 /* insert tcp checksum */
2246 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2248 /* insert ip checksum */
2249 if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2250 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2252 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2253 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2256 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2258 i = tx_ring->next_to_use;
2260 buffer_info = &tx_ring->buffer_info[i];
2261 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2262 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2263 tx_desc->read.cmd_type_len =
2264 cpu_to_le32(cmd_type_len | buffer_info->length);
2265 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2267 if (i == tx_ring->count)
2271 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2272 /* Force memory writes to complete before letting h/w
2273 * know there are new descriptors to fetch. (Only
2274 * applicable for weak-ordered memory model archs,
2279 tx_ring->buffer_info[first].next_to_watch = tx_desc;
2280 tx_ring->next_to_use = i;
2281 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2282 /* we need this if more than one processor can write to our tail
2283 * at a time, it synchronizes IO on IA64/Altix systems
2288 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2289 struct net_device *netdev,
2290 struct igbvf_ring *tx_ring)
2292 struct igbvf_adapter *adapter = netdev_priv(netdev);
2293 unsigned int first, tx_flags = 0;
2297 __be16 protocol = vlan_get_protocol(skb);
2299 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2300 dev_kfree_skb_any(skb);
2301 return NETDEV_TX_OK;
2304 if (skb->len <= 0) {
2305 dev_kfree_skb_any(skb);
2306 return NETDEV_TX_OK;
2309 /* need: count + 4 desc gap to keep tail from touching
2310 * + 2 desc gap to keep tail from touching head,
2311 * + 1 desc for skb->data,
2312 * + 1 desc for context descriptor,
2313 * head, otherwise try next time
2315 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2316 /* this is a hard error */
2317 return NETDEV_TX_BUSY;
2320 if (skb_vlan_tag_present(skb)) {
2321 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2322 tx_flags |= (skb_vlan_tag_get(skb) <<
2323 IGBVF_TX_FLAGS_VLAN_SHIFT);
2326 if (protocol == htons(ETH_P_IP))
2327 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2329 first = tx_ring->next_to_use;
2331 tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
2332 if (unlikely(tso < 0)) {
2333 dev_kfree_skb_any(skb);
2334 return NETDEV_TX_OK;
2338 tx_flags |= IGBVF_TX_FLAGS_TSO;
2339 else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) &&
2340 (skb->ip_summed == CHECKSUM_PARTIAL))
2341 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2343 /* count reflects descriptors mapped, if 0 then mapping error
2344 * has occurred and we need to rewind the descriptor queue
2346 count = igbvf_tx_map_adv(adapter, tx_ring, skb);
2349 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2350 first, skb->len, hdr_len);
2351 /* Make sure there is space in the ring for the next send. */
2352 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2354 dev_kfree_skb_any(skb);
2355 tx_ring->buffer_info[first].time_stamp = 0;
2356 tx_ring->next_to_use = first;
2359 return NETDEV_TX_OK;
2362 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2363 struct net_device *netdev)
2365 struct igbvf_adapter *adapter = netdev_priv(netdev);
2366 struct igbvf_ring *tx_ring;
2368 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2369 dev_kfree_skb_any(skb);
2370 return NETDEV_TX_OK;
2373 tx_ring = &adapter->tx_ring[0];
2375 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2379 * igbvf_tx_timeout - Respond to a Tx Hang
2380 * @netdev: network interface device structure
2382 static void igbvf_tx_timeout(struct net_device *netdev)
2384 struct igbvf_adapter *adapter = netdev_priv(netdev);
2386 /* Do the reset outside of interrupt context */
2387 adapter->tx_timeout_count++;
2388 schedule_work(&adapter->reset_task);
2391 static void igbvf_reset_task(struct work_struct *work)
2393 struct igbvf_adapter *adapter;
2395 adapter = container_of(work, struct igbvf_adapter, reset_task);
2397 igbvf_reinit_locked(adapter);
2401 * igbvf_change_mtu - Change the Maximum Transfer Unit
2402 * @netdev: network interface device structure
2403 * @new_mtu: new value for maximum frame size
2405 * Returns 0 on success, negative on failure
2407 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2409 struct igbvf_adapter *adapter = netdev_priv(netdev);
2410 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2412 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2413 usleep_range(1000, 2000);
2414 /* igbvf_down has a dependency on max_frame_size */
2415 adapter->max_frame_size = max_frame;
2416 if (netif_running(netdev))
2417 igbvf_down(adapter);
2419 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2420 * means we reserve 2 more, this pushes us to allocate from the next
2422 * i.e. RXBUFFER_2048 --> size-4096 slab
2423 * However with the new *_jumbo_rx* routines, jumbo receives will use
2427 if (max_frame <= 1024)
2428 adapter->rx_buffer_len = 1024;
2429 else if (max_frame <= 2048)
2430 adapter->rx_buffer_len = 2048;
2432 #if (PAGE_SIZE / 2) > 16384
2433 adapter->rx_buffer_len = 16384;
2435 adapter->rx_buffer_len = PAGE_SIZE / 2;
2438 /* adjust allocation if LPE protects us, and we aren't using SBP */
2439 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2440 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2441 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2444 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2445 netdev->mtu, new_mtu);
2446 netdev->mtu = new_mtu;
2448 if (netif_running(netdev))
2451 igbvf_reset(adapter);
2453 clear_bit(__IGBVF_RESETTING, &adapter->state);
2458 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2466 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2468 struct net_device *netdev = pci_get_drvdata(pdev);
2469 struct igbvf_adapter *adapter = netdev_priv(netdev);
2474 netif_device_detach(netdev);
2476 if (netif_running(netdev)) {
2477 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2478 igbvf_down(adapter);
2479 igbvf_free_irq(adapter);
2483 retval = pci_save_state(pdev);
2488 pci_disable_device(pdev);
2494 static int igbvf_resume(struct pci_dev *pdev)
2496 struct net_device *netdev = pci_get_drvdata(pdev);
2497 struct igbvf_adapter *adapter = netdev_priv(netdev);
2500 pci_restore_state(pdev);
2501 err = pci_enable_device_mem(pdev);
2503 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2507 pci_set_master(pdev);
2509 if (netif_running(netdev)) {
2510 err = igbvf_request_irq(adapter);
2515 igbvf_reset(adapter);
2517 if (netif_running(netdev))
2520 netif_device_attach(netdev);
2526 static void igbvf_shutdown(struct pci_dev *pdev)
2528 igbvf_suspend(pdev, PMSG_SUSPEND);
2531 #ifdef CONFIG_NET_POLL_CONTROLLER
2532 /* Polling 'interrupt' - used by things like netconsole to send skbs
2533 * without having to re-enable interrupts. It's not called while
2534 * the interrupt routine is executing.
2536 static void igbvf_netpoll(struct net_device *netdev)
2538 struct igbvf_adapter *adapter = netdev_priv(netdev);
2540 disable_irq(adapter->pdev->irq);
2542 igbvf_clean_tx_irq(adapter->tx_ring);
2544 enable_irq(adapter->pdev->irq);
2549 * igbvf_io_error_detected - called when PCI error is detected
2550 * @pdev: Pointer to PCI device
2551 * @state: The current pci connection state
2553 * This function is called after a PCI bus error affecting
2554 * this device has been detected.
2556 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2557 pci_channel_state_t state)
2559 struct net_device *netdev = pci_get_drvdata(pdev);
2560 struct igbvf_adapter *adapter = netdev_priv(netdev);
2562 netif_device_detach(netdev);
2564 if (state == pci_channel_io_perm_failure)
2565 return PCI_ERS_RESULT_DISCONNECT;
2567 if (netif_running(netdev))
2568 igbvf_down(adapter);
2569 pci_disable_device(pdev);
2571 /* Request a slot slot reset. */
2572 return PCI_ERS_RESULT_NEED_RESET;
2576 * igbvf_io_slot_reset - called after the pci bus has been reset.
2577 * @pdev: Pointer to PCI device
2579 * Restart the card from scratch, as if from a cold-boot. Implementation
2580 * resembles the first-half of the igbvf_resume routine.
2582 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2584 struct net_device *netdev = pci_get_drvdata(pdev);
2585 struct igbvf_adapter *adapter = netdev_priv(netdev);
2587 if (pci_enable_device_mem(pdev)) {
2589 "Cannot re-enable PCI device after reset.\n");
2590 return PCI_ERS_RESULT_DISCONNECT;
2592 pci_set_master(pdev);
2594 igbvf_reset(adapter);
2596 return PCI_ERS_RESULT_RECOVERED;
2600 * igbvf_io_resume - called when traffic can start flowing again.
2601 * @pdev: Pointer to PCI device
2603 * This callback is called when the error recovery driver tells us that
2604 * its OK to resume normal operation. Implementation resembles the
2605 * second-half of the igbvf_resume routine.
2607 static void igbvf_io_resume(struct pci_dev *pdev)
2609 struct net_device *netdev = pci_get_drvdata(pdev);
2610 struct igbvf_adapter *adapter = netdev_priv(netdev);
2612 if (netif_running(netdev)) {
2613 if (igbvf_up(adapter)) {
2615 "can't bring device back up after reset\n");
2620 netif_device_attach(netdev);
2623 static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2625 struct e1000_hw *hw = &adapter->hw;
2626 struct net_device *netdev = adapter->netdev;
2627 struct pci_dev *pdev = adapter->pdev;
2629 if (hw->mac.type == e1000_vfadapt_i350)
2630 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2632 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2633 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2636 static int igbvf_set_features(struct net_device *netdev,
2637 netdev_features_t features)
2639 struct igbvf_adapter *adapter = netdev_priv(netdev);
2641 if (features & NETIF_F_RXCSUM)
2642 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2644 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2649 #define IGBVF_MAX_MAC_HDR_LEN 127
2650 #define IGBVF_MAX_NETWORK_HDR_LEN 511
2652 static netdev_features_t
2653 igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
2654 netdev_features_t features)
2656 unsigned int network_hdr_len, mac_hdr_len;
2658 /* Make certain the headers can be described by a context descriptor */
2659 mac_hdr_len = skb_network_header(skb) - skb->data;
2660 if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
2661 return features & ~(NETIF_F_HW_CSUM |
2663 NETIF_F_HW_VLAN_CTAG_TX |
2667 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2668 if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN))
2669 return features & ~(NETIF_F_HW_CSUM |
2674 /* We can only support IPV4 TSO in tunnels if we can mangle the
2675 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2677 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2678 features &= ~NETIF_F_TSO;
2683 static const struct net_device_ops igbvf_netdev_ops = {
2684 .ndo_open = igbvf_open,
2685 .ndo_stop = igbvf_close,
2686 .ndo_start_xmit = igbvf_xmit_frame,
2687 .ndo_set_rx_mode = igbvf_set_rx_mode,
2688 .ndo_set_mac_address = igbvf_set_mac,
2689 .ndo_change_mtu = igbvf_change_mtu,
2690 .ndo_do_ioctl = igbvf_ioctl,
2691 .ndo_tx_timeout = igbvf_tx_timeout,
2692 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2693 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2694 #ifdef CONFIG_NET_POLL_CONTROLLER
2695 .ndo_poll_controller = igbvf_netpoll,
2697 .ndo_set_features = igbvf_set_features,
2698 .ndo_features_check = igbvf_features_check,
2702 * igbvf_probe - Device Initialization Routine
2703 * @pdev: PCI device information struct
2704 * @ent: entry in igbvf_pci_tbl
2706 * Returns 0 on success, negative on failure
2708 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2709 * The OS initialization, configuring of the adapter private structure,
2710 * and a hardware reset occur.
2712 static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2714 struct net_device *netdev;
2715 struct igbvf_adapter *adapter;
2716 struct e1000_hw *hw;
2717 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2719 static int cards_found;
2720 int err, pci_using_dac;
2722 err = pci_enable_device_mem(pdev);
2727 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2731 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2734 "No usable DMA configuration, aborting\n");
2739 err = pci_request_regions(pdev, igbvf_driver_name);
2743 pci_set_master(pdev);
2746 netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2748 goto err_alloc_etherdev;
2750 SET_NETDEV_DEV(netdev, &pdev->dev);
2752 pci_set_drvdata(pdev, netdev);
2753 adapter = netdev_priv(netdev);
2755 adapter->netdev = netdev;
2756 adapter->pdev = pdev;
2758 adapter->pba = ei->pba;
2759 adapter->flags = ei->flags;
2760 adapter->hw.back = adapter;
2761 adapter->hw.mac.type = ei->mac;
2762 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2764 /* PCI config space info */
2766 hw->vendor_id = pdev->vendor;
2767 hw->device_id = pdev->device;
2768 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2769 hw->subsystem_device_id = pdev->subsystem_device;
2770 hw->revision_id = pdev->revision;
2773 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2774 pci_resource_len(pdev, 0));
2776 if (!adapter->hw.hw_addr)
2779 if (ei->get_variants) {
2780 err = ei->get_variants(adapter);
2782 goto err_get_variants;
2785 /* setup adapter struct */
2786 err = igbvf_sw_init(adapter);
2790 /* construct the net_device struct */
2791 netdev->netdev_ops = &igbvf_netdev_ops;
2793 igbvf_set_ethtool_ops(netdev);
2794 netdev->watchdog_timeo = 5 * HZ;
2795 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2797 adapter->bd_number = cards_found++;
2799 netdev->hw_features = NETIF_F_SG |
2806 #define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
2807 NETIF_F_GSO_GRE_CSUM | \
2808 NETIF_F_GSO_IPXIP4 | \
2809 NETIF_F_GSO_IPXIP6 | \
2810 NETIF_F_GSO_UDP_TUNNEL | \
2811 NETIF_F_GSO_UDP_TUNNEL_CSUM)
2813 netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
2814 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
2815 IGBVF_GSO_PARTIAL_FEATURES;
2817 netdev->features = netdev->hw_features;
2820 netdev->features |= NETIF_F_HIGHDMA;
2822 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
2823 netdev->mpls_features |= NETIF_F_HW_CSUM;
2824 netdev->hw_enc_features |= netdev->vlan_features;
2826 /* set this bit last since it cannot be part of vlan_features */
2827 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2828 NETIF_F_HW_VLAN_CTAG_RX |
2829 NETIF_F_HW_VLAN_CTAG_TX;
2831 /* MTU range: 68 - 9216 */
2832 netdev->min_mtu = ETH_MIN_MTU;
2833 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
2835 spin_lock_bh(&hw->mbx_lock);
2837 /*reset the controller to put the device in a known good state */
2838 err = hw->mac.ops.reset_hw(hw);
2840 dev_info(&pdev->dev,
2841 "PF still in reset state. Is the PF interface up?\n");
2843 err = hw->mac.ops.read_mac_addr(hw);
2845 dev_info(&pdev->dev, "Error reading MAC address.\n");
2846 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2847 dev_info(&pdev->dev,
2848 "MAC address not assigned by administrator.\n");
2849 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2853 spin_unlock_bh(&hw->mbx_lock);
2855 if (!is_valid_ether_addr(netdev->dev_addr)) {
2856 dev_info(&pdev->dev, "Assigning random MAC address.\n");
2857 eth_hw_addr_random(netdev);
2858 memcpy(adapter->hw.mac.addr, netdev->dev_addr,
2862 timer_setup(&adapter->watchdog_timer, igbvf_watchdog, 0);
2864 INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2865 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2867 /* ring size defaults */
2868 adapter->rx_ring->count = 1024;
2869 adapter->tx_ring->count = 1024;
2871 /* reset the hardware with the new settings */
2872 igbvf_reset(adapter);
2874 /* set hardware-specific flags */
2875 if (adapter->hw.mac.type == e1000_vfadapt_i350)
2876 adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
2878 strcpy(netdev->name, "eth%d");
2879 err = register_netdev(netdev);
2883 /* tell the stack to leave us alone until igbvf_open() is called */
2884 netif_carrier_off(netdev);
2885 netif_stop_queue(netdev);
2887 igbvf_print_device_info(adapter);
2889 igbvf_initialize_last_counter_stats(adapter);
2894 kfree(adapter->tx_ring);
2895 kfree(adapter->rx_ring);
2897 igbvf_reset_interrupt_capability(adapter);
2899 iounmap(adapter->hw.hw_addr);
2901 free_netdev(netdev);
2903 pci_release_regions(pdev);
2906 pci_disable_device(pdev);
2911 * igbvf_remove - Device Removal Routine
2912 * @pdev: PCI device information struct
2914 * igbvf_remove is called by the PCI subsystem to alert the driver
2915 * that it should release a PCI device. The could be caused by a
2916 * Hot-Plug event, or because the driver is going to be removed from
2919 static void igbvf_remove(struct pci_dev *pdev)
2921 struct net_device *netdev = pci_get_drvdata(pdev);
2922 struct igbvf_adapter *adapter = netdev_priv(netdev);
2923 struct e1000_hw *hw = &adapter->hw;
2925 /* The watchdog timer may be rescheduled, so explicitly
2926 * disable it from being rescheduled.
2928 set_bit(__IGBVF_DOWN, &adapter->state);
2929 del_timer_sync(&adapter->watchdog_timer);
2931 cancel_work_sync(&adapter->reset_task);
2932 cancel_work_sync(&adapter->watchdog_task);
2934 unregister_netdev(netdev);
2936 igbvf_reset_interrupt_capability(adapter);
2938 /* it is important to delete the NAPI struct prior to freeing the
2939 * Rx ring so that you do not end up with null pointer refs
2941 netif_napi_del(&adapter->rx_ring->napi);
2942 kfree(adapter->tx_ring);
2943 kfree(adapter->rx_ring);
2945 iounmap(hw->hw_addr);
2946 if (hw->flash_address)
2947 iounmap(hw->flash_address);
2948 pci_release_regions(pdev);
2950 free_netdev(netdev);
2952 pci_disable_device(pdev);
2955 /* PCI Error Recovery (ERS) */
2956 static const struct pci_error_handlers igbvf_err_handler = {
2957 .error_detected = igbvf_io_error_detected,
2958 .slot_reset = igbvf_io_slot_reset,
2959 .resume = igbvf_io_resume,
2962 static const struct pci_device_id igbvf_pci_tbl[] = {
2963 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2964 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
2965 { } /* terminate list */
2967 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2969 /* PCI Device API Driver */
2970 static struct pci_driver igbvf_driver = {
2971 .name = igbvf_driver_name,
2972 .id_table = igbvf_pci_tbl,
2973 .probe = igbvf_probe,
2974 .remove = igbvf_remove,
2976 /* Power Management Hooks */
2977 .suspend = igbvf_suspend,
2978 .resume = igbvf_resume,
2980 .shutdown = igbvf_shutdown,
2981 .err_handler = &igbvf_err_handler
2985 * igbvf_init_module - Driver Registration Routine
2987 * igbvf_init_module is the first routine called when the driver is
2988 * loaded. All it does is register with the PCI subsystem.
2990 static int __init igbvf_init_module(void)
2994 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
2995 pr_info("%s\n", igbvf_copyright);
2997 ret = pci_register_driver(&igbvf_driver);
3001 module_init(igbvf_init_module);
3004 * igbvf_exit_module - Driver Exit Cleanup Routine
3006 * igbvf_exit_module is called just before the driver is removed
3009 static void __exit igbvf_exit_module(void)
3011 pci_unregister_driver(&igbvf_driver);
3013 module_exit(igbvf_exit_module);
3015 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
3016 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
3017 MODULE_LICENSE("GPL v2");
3018 MODULE_VERSION(DRV_VERSION);