1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2008 Intel Corporation. */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/prefetch.h>
9 char ixgb_driver_name[] = "ixgb";
10 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
12 #define DRIVERNAPI "-NAPI"
13 #define DRV_VERSION "1.0.135-k2" DRIVERNAPI
14 const char ixgb_driver_version[] = DRV_VERSION;
15 static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
17 #define IXGB_CB_LENGTH 256
18 static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
19 module_param(copybreak, uint, 0644);
20 MODULE_PARM_DESC(copybreak,
21 "Maximum size of packet that is copied to a new buffer on receive");
23 /* ixgb_pci_tbl - PCI Device ID Table
25 * Wildcard entries (PCI_ANY_ID) should come last
26 * Last entry must be all 0s
28 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
29 * Class, Class Mask, private data (not used) }
31 static const struct pci_device_id ixgb_pci_tbl[] = {
32 {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
33 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
34 {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
35 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
36 {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
37 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
38 {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
39 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
41 /* required last entry */
45 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
47 /* Local Function Prototypes */
48 static int ixgb_init_module(void);
49 static void ixgb_exit_module(void);
50 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
51 static void ixgb_remove(struct pci_dev *pdev);
52 static int ixgb_sw_init(struct ixgb_adapter *adapter);
53 static int ixgb_open(struct net_device *netdev);
54 static int ixgb_close(struct net_device *netdev);
55 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
56 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
57 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
58 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
59 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
60 static void ixgb_set_multi(struct net_device *netdev);
61 static void ixgb_watchdog(struct timer_list *t);
62 static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
63 struct net_device *netdev);
64 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
65 static int ixgb_set_mac(struct net_device *netdev, void *p);
66 static irqreturn_t ixgb_intr(int irq, void *data);
67 static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
69 static int ixgb_clean(struct napi_struct *, int);
70 static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
71 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
73 static void ixgb_tx_timeout(struct net_device *dev);
74 static void ixgb_tx_timeout_task(struct work_struct *work);
76 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
77 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
78 static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
79 __be16 proto, u16 vid);
80 static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
81 __be16 proto, u16 vid);
82 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
84 #ifdef CONFIG_NET_POLL_CONTROLLER
85 /* for netdump / net console */
86 static void ixgb_netpoll(struct net_device *dev);
89 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
90 enum pci_channel_state state);
91 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
92 static void ixgb_io_resume (struct pci_dev *pdev);
94 static const struct pci_error_handlers ixgb_err_handler = {
95 .error_detected = ixgb_io_error_detected,
96 .slot_reset = ixgb_io_slot_reset,
97 .resume = ixgb_io_resume,
100 static struct pci_driver ixgb_driver = {
101 .name = ixgb_driver_name,
102 .id_table = ixgb_pci_tbl,
104 .remove = ixgb_remove,
105 .err_handler = &ixgb_err_handler
108 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
109 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
110 MODULE_LICENSE("GPL");
111 MODULE_VERSION(DRV_VERSION);
113 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
114 static int debug = -1;
115 module_param(debug, int, 0);
116 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
119 * ixgb_init_module - Driver Registration Routine
121 * ixgb_init_module is the first routine called when the driver is
122 * loaded. All it does is register with the PCI subsystem.
126 ixgb_init_module(void)
128 pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
129 pr_info("%s\n", ixgb_copyright);
131 return pci_register_driver(&ixgb_driver);
134 module_init(ixgb_init_module);
137 * ixgb_exit_module - Driver Exit Cleanup Routine
139 * ixgb_exit_module is called just before the driver is removed
144 ixgb_exit_module(void)
146 pci_unregister_driver(&ixgb_driver);
149 module_exit(ixgb_exit_module);
152 * ixgb_irq_disable - Mask off interrupt generation on the NIC
153 * @adapter: board private structure
157 ixgb_irq_disable(struct ixgb_adapter *adapter)
159 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
160 IXGB_WRITE_FLUSH(&adapter->hw);
161 synchronize_irq(adapter->pdev->irq);
165 * ixgb_irq_enable - Enable default interrupt generation settings
166 * @adapter: board private structure
170 ixgb_irq_enable(struct ixgb_adapter *adapter)
172 u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
173 IXGB_INT_TXDW | IXGB_INT_LSC;
174 if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
175 val |= IXGB_INT_GPI0;
176 IXGB_WRITE_REG(&adapter->hw, IMS, val);
177 IXGB_WRITE_FLUSH(&adapter->hw);
181 ixgb_up(struct ixgb_adapter *adapter)
183 struct net_device *netdev = adapter->netdev;
184 int err, irq_flags = IRQF_SHARED;
185 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
186 struct ixgb_hw *hw = &adapter->hw;
188 /* hardware has been reset, we need to reload some things */
190 ixgb_rar_set(hw, netdev->dev_addr, 0);
191 ixgb_set_multi(netdev);
193 ixgb_restore_vlan(adapter);
195 ixgb_configure_tx(adapter);
196 ixgb_setup_rctl(adapter);
197 ixgb_configure_rx(adapter);
198 ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
200 /* disable interrupts and get the hardware into a known state */
201 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
203 /* only enable MSI if bus is in PCI-X mode */
204 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
205 err = pci_enable_msi(adapter->pdev);
207 adapter->have_msi = true;
210 /* proceed to try to request regular interrupt */
213 err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
214 netdev->name, netdev);
216 if (adapter->have_msi)
217 pci_disable_msi(adapter->pdev);
218 netif_err(adapter, probe, adapter->netdev,
219 "Unable to allocate interrupt Error: %d\n", err);
223 if ((hw->max_frame_size != max_frame) ||
224 (hw->max_frame_size !=
225 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
227 hw->max_frame_size = max_frame;
229 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
231 if (hw->max_frame_size >
232 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
233 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
235 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
236 ctrl0 |= IXGB_CTRL0_JFE;
237 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
242 clear_bit(__IXGB_DOWN, &adapter->flags);
244 napi_enable(&adapter->napi);
245 ixgb_irq_enable(adapter);
247 netif_wake_queue(netdev);
249 mod_timer(&adapter->watchdog_timer, jiffies);
255 ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
257 struct net_device *netdev = adapter->netdev;
259 /* prevent the interrupt handler from restarting watchdog */
260 set_bit(__IXGB_DOWN, &adapter->flags);
262 netif_carrier_off(netdev);
264 napi_disable(&adapter->napi);
265 /* waiting for NAPI to complete can re-enable interrupts */
266 ixgb_irq_disable(adapter);
267 free_irq(adapter->pdev->irq, netdev);
269 if (adapter->have_msi)
270 pci_disable_msi(adapter->pdev);
273 del_timer_sync(&adapter->watchdog_timer);
275 adapter->link_speed = 0;
276 adapter->link_duplex = 0;
277 netif_stop_queue(netdev);
280 ixgb_clean_tx_ring(adapter);
281 ixgb_clean_rx_ring(adapter);
285 ixgb_reset(struct ixgb_adapter *adapter)
287 struct ixgb_hw *hw = &adapter->hw;
289 ixgb_adapter_stop(hw);
290 if (!ixgb_init_hw(hw))
291 netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
293 /* restore frame size information */
294 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
295 if (hw->max_frame_size >
296 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
297 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
298 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
299 ctrl0 |= IXGB_CTRL0_JFE;
300 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
305 static netdev_features_t
306 ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
309 * Tx VLAN insertion does not work per HW design when Rx stripping is
312 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
313 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
319 ixgb_set_features(struct net_device *netdev, netdev_features_t features)
321 struct ixgb_adapter *adapter = netdev_priv(netdev);
322 netdev_features_t changed = features ^ netdev->features;
324 if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
327 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
329 if (netif_running(netdev)) {
330 ixgb_down(adapter, true);
332 ixgb_set_speed_duplex(netdev);
340 static const struct net_device_ops ixgb_netdev_ops = {
341 .ndo_open = ixgb_open,
342 .ndo_stop = ixgb_close,
343 .ndo_start_xmit = ixgb_xmit_frame,
344 .ndo_set_rx_mode = ixgb_set_multi,
345 .ndo_validate_addr = eth_validate_addr,
346 .ndo_set_mac_address = ixgb_set_mac,
347 .ndo_change_mtu = ixgb_change_mtu,
348 .ndo_tx_timeout = ixgb_tx_timeout,
349 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
350 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
351 #ifdef CONFIG_NET_POLL_CONTROLLER
352 .ndo_poll_controller = ixgb_netpoll,
354 .ndo_fix_features = ixgb_fix_features,
355 .ndo_set_features = ixgb_set_features,
359 * ixgb_probe - Device Initialization Routine
360 * @pdev: PCI device information struct
361 * @ent: entry in ixgb_pci_tbl
363 * Returns 0 on success, negative on failure
365 * ixgb_probe initializes an adapter identified by a pci_dev structure.
366 * The OS initialization, configuring of the adapter private structure,
367 * and a hardware reset occur.
371 ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
373 struct net_device *netdev = NULL;
374 struct ixgb_adapter *adapter;
375 static int cards_found = 0;
380 err = pci_enable_device(pdev);
385 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
389 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
391 pr_err("No usable DMA configuration, aborting\n");
396 err = pci_request_regions(pdev, ixgb_driver_name);
398 goto err_request_regions;
400 pci_set_master(pdev);
402 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
405 goto err_alloc_etherdev;
408 SET_NETDEV_DEV(netdev, &pdev->dev);
410 pci_set_drvdata(pdev, netdev);
411 adapter = netdev_priv(netdev);
412 adapter->netdev = netdev;
413 adapter->pdev = pdev;
414 adapter->hw.back = adapter;
415 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
417 adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
418 if (!adapter->hw.hw_addr) {
423 for (i = BAR_1; i <= BAR_5; i++) {
424 if (pci_resource_len(pdev, i) == 0)
426 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
427 adapter->hw.io_base = pci_resource_start(pdev, i);
432 netdev->netdev_ops = &ixgb_netdev_ops;
433 ixgb_set_ethtool_ops(netdev);
434 netdev->watchdog_timeo = 5 * HZ;
435 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
437 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
439 adapter->bd_number = cards_found;
440 adapter->link_speed = 0;
441 adapter->link_duplex = 0;
443 /* setup the private structure */
445 err = ixgb_sw_init(adapter);
449 netdev->hw_features = NETIF_F_SG |
452 NETIF_F_HW_VLAN_CTAG_TX |
453 NETIF_F_HW_VLAN_CTAG_RX;
454 netdev->features = netdev->hw_features |
455 NETIF_F_HW_VLAN_CTAG_FILTER;
456 netdev->hw_features |= NETIF_F_RXCSUM;
459 netdev->features |= NETIF_F_HIGHDMA;
460 netdev->vlan_features |= NETIF_F_HIGHDMA;
463 /* MTU range: 68 - 16114 */
464 netdev->min_mtu = ETH_MIN_MTU;
465 netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
467 /* make sure the EEPROM is good */
469 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
470 netif_err(adapter, probe, adapter->netdev,
471 "The EEPROM Checksum Is Not Valid\n");
476 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
478 if (!is_valid_ether_addr(netdev->dev_addr)) {
479 netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
484 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
486 timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
488 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
490 strcpy(netdev->name, "eth%d");
491 err = register_netdev(netdev);
495 /* carrier off reporting is important to ethtool even BEFORE open */
496 netif_carrier_off(netdev);
498 netif_info(adapter, probe, adapter->netdev,
499 "Intel(R) PRO/10GbE Network Connection\n");
500 ixgb_check_options(adapter);
501 /* reset the hardware with the new settings */
511 iounmap(adapter->hw.hw_addr);
515 pci_release_regions(pdev);
518 pci_disable_device(pdev);
523 * ixgb_remove - Device Removal Routine
524 * @pdev: PCI device information struct
526 * ixgb_remove is called by the PCI subsystem to alert the driver
527 * that it should release a PCI device. The could be caused by a
528 * Hot-Plug event, or because the driver is going to be removed from
533 ixgb_remove(struct pci_dev *pdev)
535 struct net_device *netdev = pci_get_drvdata(pdev);
536 struct ixgb_adapter *adapter = netdev_priv(netdev);
538 cancel_work_sync(&adapter->tx_timeout_task);
540 unregister_netdev(netdev);
542 iounmap(adapter->hw.hw_addr);
543 pci_release_regions(pdev);
546 pci_disable_device(pdev);
550 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
551 * @adapter: board private structure to initialize
553 * ixgb_sw_init initializes the Adapter private data structure.
554 * Fields are initialized based on PCI device information and
555 * OS network device settings (MTU size).
559 ixgb_sw_init(struct ixgb_adapter *adapter)
561 struct ixgb_hw *hw = &adapter->hw;
562 struct net_device *netdev = adapter->netdev;
563 struct pci_dev *pdev = adapter->pdev;
565 /* PCI config space info */
567 hw->vendor_id = pdev->vendor;
568 hw->device_id = pdev->device;
569 hw->subsystem_vendor_id = pdev->subsystem_vendor;
570 hw->subsystem_id = pdev->subsystem_device;
572 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
573 adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
575 if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
576 (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
577 (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
578 (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
579 hw->mac_type = ixgb_82597;
581 /* should never have loaded on this device */
582 netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
585 /* enable flow control to be programmed */
588 set_bit(__IXGB_DOWN, &adapter->flags);
593 * ixgb_open - Called when a network interface is made active
594 * @netdev: network interface device structure
596 * Returns 0 on success, negative value on failure
598 * The open entry point is called when a network interface is made
599 * active by the system (IFF_UP). At this point all resources needed
600 * for transmit and receive operations are allocated, the interrupt
601 * handler is registered with the OS, the watchdog timer is started,
602 * and the stack is notified that the interface is ready.
606 ixgb_open(struct net_device *netdev)
608 struct ixgb_adapter *adapter = netdev_priv(netdev);
611 /* allocate transmit descriptors */
612 err = ixgb_setup_tx_resources(adapter);
616 netif_carrier_off(netdev);
618 /* allocate receive descriptors */
620 err = ixgb_setup_rx_resources(adapter);
624 err = ixgb_up(adapter);
628 netif_start_queue(netdev);
633 ixgb_free_rx_resources(adapter);
635 ixgb_free_tx_resources(adapter);
643 * ixgb_close - Disables a network interface
644 * @netdev: network interface device structure
646 * Returns 0, this is not allowed to fail
648 * The close entry point is called when an interface is de-activated
649 * by the OS. The hardware is still under the drivers control, but
650 * needs to be disabled. A global MAC reset is issued to stop the
651 * hardware, and all transmit and receive resources are freed.
655 ixgb_close(struct net_device *netdev)
657 struct ixgb_adapter *adapter = netdev_priv(netdev);
659 ixgb_down(adapter, true);
661 ixgb_free_tx_resources(adapter);
662 ixgb_free_rx_resources(adapter);
668 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
669 * @adapter: board private structure
671 * Return 0 on success, negative on failure
675 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
677 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
678 struct pci_dev *pdev = adapter->pdev;
681 size = sizeof(struct ixgb_buffer) * txdr->count;
682 txdr->buffer_info = vzalloc(size);
683 if (!txdr->buffer_info)
686 /* round up to nearest 4K */
688 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
689 txdr->size = ALIGN(txdr->size, 4096);
691 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
694 vfree(txdr->buffer_info);
698 txdr->next_to_use = 0;
699 txdr->next_to_clean = 0;
705 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
706 * @adapter: board private structure
708 * Configure the Tx unit of the MAC after a reset.
712 ixgb_configure_tx(struct ixgb_adapter *adapter)
714 u64 tdba = adapter->tx_ring.dma;
715 u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
717 struct ixgb_hw *hw = &adapter->hw;
719 /* Setup the Base and Length of the Tx Descriptor Ring
720 * tx_ring.dma can be either a 32 or 64 bit value
723 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
724 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
726 IXGB_WRITE_REG(hw, TDLEN, tdlen);
728 /* Setup the HW Tx Head and Tail descriptor pointers */
730 IXGB_WRITE_REG(hw, TDH, 0);
731 IXGB_WRITE_REG(hw, TDT, 0);
733 /* don't set up txdctl, it induces performance problems if configured
735 /* Set the Tx Interrupt Delay register */
737 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
739 /* Program the Transmit Control Register */
741 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
742 IXGB_WRITE_REG(hw, TCTL, tctl);
744 /* Setup Transmit Descriptor Settings for this adapter */
745 adapter->tx_cmd_type =
747 (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
751 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
752 * @adapter: board private structure
754 * Returns 0 on success, negative on failure
758 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
760 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
761 struct pci_dev *pdev = adapter->pdev;
764 size = sizeof(struct ixgb_buffer) * rxdr->count;
765 rxdr->buffer_info = vzalloc(size);
766 if (!rxdr->buffer_info)
769 /* Round up to nearest 4K */
771 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
772 rxdr->size = ALIGN(rxdr->size, 4096);
774 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
778 vfree(rxdr->buffer_info);
781 memset(rxdr->desc, 0, rxdr->size);
783 rxdr->next_to_clean = 0;
784 rxdr->next_to_use = 0;
790 * ixgb_setup_rctl - configure the receive control register
791 * @adapter: Board private structure
795 ixgb_setup_rctl(struct ixgb_adapter *adapter)
799 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
801 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
804 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
805 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
806 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
808 rctl |= IXGB_RCTL_SECRC;
810 if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
811 rctl |= IXGB_RCTL_BSIZE_2048;
812 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
813 rctl |= IXGB_RCTL_BSIZE_4096;
814 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
815 rctl |= IXGB_RCTL_BSIZE_8192;
816 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
817 rctl |= IXGB_RCTL_BSIZE_16384;
819 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
823 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
824 * @adapter: board private structure
826 * Configure the Rx unit of the MAC after a reset.
830 ixgb_configure_rx(struct ixgb_adapter *adapter)
832 u64 rdba = adapter->rx_ring.dma;
833 u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
834 struct ixgb_hw *hw = &adapter->hw;
838 /* make sure receives are disabled while setting up the descriptors */
840 rctl = IXGB_READ_REG(hw, RCTL);
841 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
843 /* set the Receive Delay Timer Register */
845 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
847 /* Setup the Base and Length of the Rx Descriptor Ring */
849 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
850 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
852 IXGB_WRITE_REG(hw, RDLEN, rdlen);
854 /* Setup the HW Rx Head and Tail Descriptor Pointers */
855 IXGB_WRITE_REG(hw, RDH, 0);
856 IXGB_WRITE_REG(hw, RDT, 0);
858 /* due to the hardware errata with RXDCTL, we are unable to use any of
859 * the performance enhancing features of it without causing other
860 * subtle bugs, some of the bugs could include receive length
861 * corruption at high data rates (WTHRESH > 0) and/or receive
862 * descriptor ring irregularites (particularly in hardware cache) */
863 IXGB_WRITE_REG(hw, RXDCTL, 0);
865 /* Enable Receive Checksum Offload for TCP and UDP */
866 if (adapter->rx_csum) {
867 rxcsum = IXGB_READ_REG(hw, RXCSUM);
868 rxcsum |= IXGB_RXCSUM_TUOFL;
869 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
872 /* Enable Receives */
874 IXGB_WRITE_REG(hw, RCTL, rctl);
878 * ixgb_free_tx_resources - Free Tx Resources
879 * @adapter: board private structure
881 * Free all transmit software resources
885 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
887 struct pci_dev *pdev = adapter->pdev;
889 ixgb_clean_tx_ring(adapter);
891 vfree(adapter->tx_ring.buffer_info);
892 adapter->tx_ring.buffer_info = NULL;
894 dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
895 adapter->tx_ring.desc, adapter->tx_ring.dma);
897 adapter->tx_ring.desc = NULL;
901 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
902 struct ixgb_buffer *buffer_info)
904 if (buffer_info->dma) {
905 if (buffer_info->mapped_as_page)
906 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
907 buffer_info->length, DMA_TO_DEVICE);
909 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
910 buffer_info->length, DMA_TO_DEVICE);
911 buffer_info->dma = 0;
914 if (buffer_info->skb) {
915 dev_kfree_skb_any(buffer_info->skb);
916 buffer_info->skb = NULL;
918 buffer_info->time_stamp = 0;
919 /* these fields must always be initialized in tx
920 * buffer_info->length = 0;
921 * buffer_info->next_to_watch = 0; */
925 * ixgb_clean_tx_ring - Free Tx Buffers
926 * @adapter: board private structure
930 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
932 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
933 struct ixgb_buffer *buffer_info;
937 /* Free all the Tx ring sk_buffs */
939 for (i = 0; i < tx_ring->count; i++) {
940 buffer_info = &tx_ring->buffer_info[i];
941 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
944 size = sizeof(struct ixgb_buffer) * tx_ring->count;
945 memset(tx_ring->buffer_info, 0, size);
947 /* Zero out the descriptor ring */
949 memset(tx_ring->desc, 0, tx_ring->size);
951 tx_ring->next_to_use = 0;
952 tx_ring->next_to_clean = 0;
954 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
955 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
959 * ixgb_free_rx_resources - Free Rx Resources
960 * @adapter: board private structure
962 * Free all receive software resources
966 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
968 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
969 struct pci_dev *pdev = adapter->pdev;
971 ixgb_clean_rx_ring(adapter);
973 vfree(rx_ring->buffer_info);
974 rx_ring->buffer_info = NULL;
976 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
979 rx_ring->desc = NULL;
983 * ixgb_clean_rx_ring - Free Rx Buffers
984 * @adapter: board private structure
988 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
990 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
991 struct ixgb_buffer *buffer_info;
992 struct pci_dev *pdev = adapter->pdev;
996 /* Free all the Rx ring sk_buffs */
998 for (i = 0; i < rx_ring->count; i++) {
999 buffer_info = &rx_ring->buffer_info[i];
1000 if (buffer_info->dma) {
1001 dma_unmap_single(&pdev->dev,
1003 buffer_info->length,
1005 buffer_info->dma = 0;
1006 buffer_info->length = 0;
1009 if (buffer_info->skb) {
1010 dev_kfree_skb(buffer_info->skb);
1011 buffer_info->skb = NULL;
1015 size = sizeof(struct ixgb_buffer) * rx_ring->count;
1016 memset(rx_ring->buffer_info, 0, size);
1018 /* Zero out the descriptor ring */
1020 memset(rx_ring->desc, 0, rx_ring->size);
1022 rx_ring->next_to_clean = 0;
1023 rx_ring->next_to_use = 0;
1025 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1026 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1030 * ixgb_set_mac - Change the Ethernet Address of the NIC
1031 * @netdev: network interface device structure
1032 * @p: pointer to an address structure
1034 * Returns 0 on success, negative on failure
1038 ixgb_set_mac(struct net_device *netdev, void *p)
1040 struct ixgb_adapter *adapter = netdev_priv(netdev);
1041 struct sockaddr *addr = p;
1043 if (!is_valid_ether_addr(addr->sa_data))
1044 return -EADDRNOTAVAIL;
1046 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1048 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1054 * ixgb_set_multi - Multicast and Promiscuous mode set
1055 * @netdev: network interface device structure
1057 * The set_multi entry point is called whenever the multicast address
1058 * list or the network interface flags are updated. This routine is
1059 * responsible for configuring the hardware for proper multicast,
1060 * promiscuous mode, and all-multi behavior.
1064 ixgb_set_multi(struct net_device *netdev)
1066 struct ixgb_adapter *adapter = netdev_priv(netdev);
1067 struct ixgb_hw *hw = &adapter->hw;
1068 struct netdev_hw_addr *ha;
1071 /* Check for Promiscuous and All Multicast modes */
1073 rctl = IXGB_READ_REG(hw, RCTL);
1075 if (netdev->flags & IFF_PROMISC) {
1076 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1077 /* disable VLAN filtering */
1078 rctl &= ~IXGB_RCTL_CFIEN;
1079 rctl &= ~IXGB_RCTL_VFE;
1081 if (netdev->flags & IFF_ALLMULTI) {
1082 rctl |= IXGB_RCTL_MPE;
1083 rctl &= ~IXGB_RCTL_UPE;
1085 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1087 /* enable VLAN filtering */
1088 rctl |= IXGB_RCTL_VFE;
1089 rctl &= ~IXGB_RCTL_CFIEN;
1092 if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1093 rctl |= IXGB_RCTL_MPE;
1094 IXGB_WRITE_REG(hw, RCTL, rctl);
1096 u8 *mta = kmalloc_array(ETH_ALEN,
1097 IXGB_MAX_NUM_MULTICAST_ADDRESSES,
1103 IXGB_WRITE_REG(hw, RCTL, rctl);
1106 netdev_for_each_mc_addr(ha, netdev) {
1107 memcpy(addr, ha->addr, ETH_ALEN);
1111 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1116 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1117 ixgb_vlan_strip_enable(adapter);
1119 ixgb_vlan_strip_disable(adapter);
1124 * ixgb_watchdog - Timer Call-back
1125 * @data: pointer to netdev cast into an unsigned long
1129 ixgb_watchdog(struct timer_list *t)
1131 struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1132 struct net_device *netdev = adapter->netdev;
1133 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1135 ixgb_check_for_link(&adapter->hw);
1137 if (ixgb_check_for_bad_link(&adapter->hw)) {
1138 /* force the reset path */
1139 netif_stop_queue(netdev);
1142 if (adapter->hw.link_up) {
1143 if (!netif_carrier_ok(netdev)) {
1145 "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1146 (adapter->hw.fc.type == ixgb_fc_full) ?
1148 (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1150 (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1152 adapter->link_speed = 10000;
1153 adapter->link_duplex = FULL_DUPLEX;
1154 netif_carrier_on(netdev);
1157 if (netif_carrier_ok(netdev)) {
1158 adapter->link_speed = 0;
1159 adapter->link_duplex = 0;
1160 netdev_info(netdev, "NIC Link is Down\n");
1161 netif_carrier_off(netdev);
1165 ixgb_update_stats(adapter);
1167 if (!netif_carrier_ok(netdev)) {
1168 if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1169 /* We've lost link, so the controller stops DMA,
1170 * but we've got queued Tx work that's never going
1171 * to get done, so reset controller to flush Tx.
1172 * (Do the reset outside of interrupt context). */
1173 schedule_work(&adapter->tx_timeout_task);
1174 /* return immediately since reset is imminent */
1179 /* Force detection of hung controller every watchdog period */
1180 adapter->detect_tx_hung = true;
1182 /* generate an interrupt to force clean up of any stragglers */
1183 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1185 /* Reset the timer */
1186 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1189 #define IXGB_TX_FLAGS_CSUM 0x00000001
1190 #define IXGB_TX_FLAGS_VLAN 0x00000002
1191 #define IXGB_TX_FLAGS_TSO 0x00000004
1194 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1196 struct ixgb_context_desc *context_desc;
1198 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1199 u16 ipcse, tucse, mss;
1201 if (likely(skb_is_gso(skb))) {
1202 struct ixgb_buffer *buffer_info;
1206 err = skb_cow_head(skb, 0);
1210 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1211 mss = skb_shinfo(skb)->gso_size;
1215 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1218 ipcss = skb_network_offset(skb);
1219 ipcso = (void *)&(iph->check) - (void *)skb->data;
1220 ipcse = skb_transport_offset(skb) - 1;
1221 tucss = skb_transport_offset(skb);
1222 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1225 i = adapter->tx_ring.next_to_use;
1226 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1227 buffer_info = &adapter->tx_ring.buffer_info[i];
1228 WARN_ON(buffer_info->dma != 0);
1230 context_desc->ipcss = ipcss;
1231 context_desc->ipcso = ipcso;
1232 context_desc->ipcse = cpu_to_le16(ipcse);
1233 context_desc->tucss = tucss;
1234 context_desc->tucso = tucso;
1235 context_desc->tucse = cpu_to_le16(tucse);
1236 context_desc->mss = cpu_to_le16(mss);
1237 context_desc->hdr_len = hdr_len;
1238 context_desc->status = 0;
1239 context_desc->cmd_type_len = cpu_to_le32(
1240 IXGB_CONTEXT_DESC_TYPE
1241 | IXGB_CONTEXT_DESC_CMD_TSE
1242 | IXGB_CONTEXT_DESC_CMD_IP
1243 | IXGB_CONTEXT_DESC_CMD_TCP
1244 | IXGB_CONTEXT_DESC_CMD_IDE
1245 | (skb->len - (hdr_len)));
1248 if (++i == adapter->tx_ring.count) i = 0;
1249 adapter->tx_ring.next_to_use = i;
1258 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1260 struct ixgb_context_desc *context_desc;
1264 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1265 struct ixgb_buffer *buffer_info;
1266 css = skb_checksum_start_offset(skb);
1267 cso = css + skb->csum_offset;
1269 i = adapter->tx_ring.next_to_use;
1270 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1271 buffer_info = &adapter->tx_ring.buffer_info[i];
1272 WARN_ON(buffer_info->dma != 0);
1274 context_desc->tucss = css;
1275 context_desc->tucso = cso;
1276 context_desc->tucse = 0;
1277 /* zero out any previously existing data in one instruction */
1278 *(u32 *)&(context_desc->ipcss) = 0;
1279 context_desc->status = 0;
1280 context_desc->hdr_len = 0;
1281 context_desc->mss = 0;
1282 context_desc->cmd_type_len =
1283 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1284 | IXGB_TX_DESC_CMD_IDE);
1286 if (++i == adapter->tx_ring.count) i = 0;
1287 adapter->tx_ring.next_to_use = i;
1295 #define IXGB_MAX_TXD_PWR 14
1296 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1299 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1302 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1303 struct pci_dev *pdev = adapter->pdev;
1304 struct ixgb_buffer *buffer_info;
1305 int len = skb_headlen(skb);
1306 unsigned int offset = 0, size, count = 0, i;
1307 unsigned int mss = skb_shinfo(skb)->gso_size;
1308 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1311 i = tx_ring->next_to_use;
1314 buffer_info = &tx_ring->buffer_info[i];
1315 size = min(len, IXGB_MAX_DATA_PER_TXD);
1316 /* Workaround for premature desc write-backs
1317 * in TSO mode. Append 4-byte sentinel desc */
1318 if (unlikely(mss && !nr_frags && size == len && size > 8))
1321 buffer_info->length = size;
1322 WARN_ON(buffer_info->dma != 0);
1323 buffer_info->time_stamp = jiffies;
1324 buffer_info->mapped_as_page = false;
1325 buffer_info->dma = dma_map_single(&pdev->dev,
1327 size, DMA_TO_DEVICE);
1328 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1330 buffer_info->next_to_watch = 0;
1337 if (i == tx_ring->count)
1342 for (f = 0; f < nr_frags; f++) {
1343 const struct skb_frag_struct *frag;
1345 frag = &skb_shinfo(skb)->frags[f];
1346 len = skb_frag_size(frag);
1351 if (i == tx_ring->count)
1354 buffer_info = &tx_ring->buffer_info[i];
1355 size = min(len, IXGB_MAX_DATA_PER_TXD);
1357 /* Workaround for premature desc write-backs
1358 * in TSO mode. Append 4-byte sentinel desc */
1359 if (unlikely(mss && (f == (nr_frags - 1))
1360 && size == len && size > 8))
1363 buffer_info->length = size;
1364 buffer_info->time_stamp = jiffies;
1365 buffer_info->mapped_as_page = true;
1367 skb_frag_dma_map(&pdev->dev, frag, offset, size,
1369 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1371 buffer_info->next_to_watch = 0;
1378 tx_ring->buffer_info[i].skb = skb;
1379 tx_ring->buffer_info[first].next_to_watch = i;
1384 dev_err(&pdev->dev, "TX DMA map failed\n");
1385 buffer_info->dma = 0;
1391 i += tx_ring->count;
1393 buffer_info = &tx_ring->buffer_info[i];
1394 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1401 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1403 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1404 struct ixgb_tx_desc *tx_desc = NULL;
1405 struct ixgb_buffer *buffer_info;
1406 u32 cmd_type_len = adapter->tx_cmd_type;
1411 if (tx_flags & IXGB_TX_FLAGS_TSO) {
1412 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1413 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1416 if (tx_flags & IXGB_TX_FLAGS_CSUM)
1417 popts |= IXGB_TX_DESC_POPTS_TXSM;
1419 if (tx_flags & IXGB_TX_FLAGS_VLAN)
1420 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1422 i = tx_ring->next_to_use;
1425 buffer_info = &tx_ring->buffer_info[i];
1426 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1427 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1428 tx_desc->cmd_type_len =
1429 cpu_to_le32(cmd_type_len | buffer_info->length);
1430 tx_desc->status = status;
1431 tx_desc->popts = popts;
1432 tx_desc->vlan = cpu_to_le16(vlan_id);
1434 if (++i == tx_ring->count) i = 0;
1437 tx_desc->cmd_type_len |=
1438 cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1440 /* Force memory writes to complete before letting h/w
1441 * know there are new descriptors to fetch. (Only
1442 * applicable for weak-ordered memory model archs,
1443 * such as IA-64). */
1446 tx_ring->next_to_use = i;
1447 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1450 static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1452 struct ixgb_adapter *adapter = netdev_priv(netdev);
1453 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1455 netif_stop_queue(netdev);
1456 /* Herbert's original patch had:
1457 * smp_mb__after_netif_stop_queue();
1458 * but since that doesn't exist yet, just open code it. */
1461 /* We need to check again in a case another CPU has just
1462 * made room available. */
1463 if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1467 netif_start_queue(netdev);
1468 ++adapter->restart_queue;
1472 static int ixgb_maybe_stop_tx(struct net_device *netdev,
1473 struct ixgb_desc_ring *tx_ring, int size)
1475 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1477 return __ixgb_maybe_stop_tx(netdev, size);
1481 /* Tx Descriptors needed, worst case */
1482 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1483 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1484 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1485 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1486 + 1 /* one more needed for sentinel TSO workaround */
1489 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1491 struct ixgb_adapter *adapter = netdev_priv(netdev);
1493 unsigned int tx_flags = 0;
1498 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1499 dev_kfree_skb_any(skb);
1500 return NETDEV_TX_OK;
1503 if (skb->len <= 0) {
1504 dev_kfree_skb_any(skb);
1505 return NETDEV_TX_OK;
1508 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1510 return NETDEV_TX_BUSY;
1512 if (skb_vlan_tag_present(skb)) {
1513 tx_flags |= IXGB_TX_FLAGS_VLAN;
1514 vlan_id = skb_vlan_tag_get(skb);
1517 first = adapter->tx_ring.next_to_use;
1519 tso = ixgb_tso(adapter, skb);
1521 dev_kfree_skb_any(skb);
1522 return NETDEV_TX_OK;
1526 tx_flags |= IXGB_TX_FLAGS_TSO;
1527 else if (ixgb_tx_csum(adapter, skb))
1528 tx_flags |= IXGB_TX_FLAGS_CSUM;
1530 count = ixgb_tx_map(adapter, skb, first);
1533 ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1534 /* Make sure there is space in the ring for the next send. */
1535 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1538 dev_kfree_skb_any(skb);
1539 adapter->tx_ring.buffer_info[first].time_stamp = 0;
1540 adapter->tx_ring.next_to_use = first;
1543 return NETDEV_TX_OK;
1547 * ixgb_tx_timeout - Respond to a Tx Hang
1548 * @netdev: network interface device structure
1552 ixgb_tx_timeout(struct net_device *netdev)
1554 struct ixgb_adapter *adapter = netdev_priv(netdev);
1556 /* Do the reset outside of interrupt context */
1557 schedule_work(&adapter->tx_timeout_task);
1561 ixgb_tx_timeout_task(struct work_struct *work)
1563 struct ixgb_adapter *adapter =
1564 container_of(work, struct ixgb_adapter, tx_timeout_task);
1566 adapter->tx_timeout_count++;
1567 ixgb_down(adapter, true);
1572 * ixgb_change_mtu - Change the Maximum Transfer Unit
1573 * @netdev: network interface device structure
1574 * @new_mtu: new value for maximum frame size
1576 * Returns 0 on success, negative on failure
1580 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1582 struct ixgb_adapter *adapter = netdev_priv(netdev);
1583 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1585 if (netif_running(netdev))
1586 ixgb_down(adapter, true);
1588 adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1590 netdev->mtu = new_mtu;
1592 if (netif_running(netdev))
1599 * ixgb_update_stats - Update the board statistics counters.
1600 * @adapter: board private structure
1604 ixgb_update_stats(struct ixgb_adapter *adapter)
1606 struct net_device *netdev = adapter->netdev;
1607 struct pci_dev *pdev = adapter->pdev;
1609 /* Prevent stats update while adapter is being reset */
1610 if (pci_channel_offline(pdev))
1613 if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1614 (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1615 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1616 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1617 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1618 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1620 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1621 /* fix up multicast stats by removing broadcasts */
1625 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1626 adapter->stats.mprch += (multi >> 32);
1627 adapter->stats.bprcl += bcast_l;
1628 adapter->stats.bprch += bcast_h;
1630 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1631 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1632 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1633 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1635 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1636 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1637 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1638 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1639 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1640 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1641 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1642 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1643 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1644 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1645 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1646 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1647 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1648 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1649 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1650 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1651 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1652 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1653 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1654 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1655 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1656 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1657 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1658 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1659 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1660 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1661 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1662 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1663 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1664 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1665 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1666 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1667 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1668 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1669 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1670 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1671 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1672 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1673 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1674 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1675 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1676 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1677 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1678 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1679 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1680 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1681 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1682 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1683 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1684 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1685 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1686 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1687 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1688 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1689 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1690 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1692 /* Fill out the OS statistics structure */
1694 netdev->stats.rx_packets = adapter->stats.gprcl;
1695 netdev->stats.tx_packets = adapter->stats.gptcl;
1696 netdev->stats.rx_bytes = adapter->stats.gorcl;
1697 netdev->stats.tx_bytes = adapter->stats.gotcl;
1698 netdev->stats.multicast = adapter->stats.mprcl;
1699 netdev->stats.collisions = 0;
1701 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1702 * with a length in the type/len field */
1703 netdev->stats.rx_errors =
1704 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1705 adapter->stats.ruc +
1706 adapter->stats.roc /*+ adapter->stats.rlec */ +
1707 adapter->stats.icbc +
1708 adapter->stats.ecbc + adapter->stats.mpc;
1711 * netdev->stats.rx_length_errors = adapter->stats.rlec;
1714 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1715 netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1716 netdev->stats.rx_missed_errors = adapter->stats.mpc;
1717 netdev->stats.rx_over_errors = adapter->stats.mpc;
1719 netdev->stats.tx_errors = 0;
1720 netdev->stats.rx_frame_errors = 0;
1721 netdev->stats.tx_aborted_errors = 0;
1722 netdev->stats.tx_carrier_errors = 0;
1723 netdev->stats.tx_fifo_errors = 0;
1724 netdev->stats.tx_heartbeat_errors = 0;
1725 netdev->stats.tx_window_errors = 0;
1728 #define IXGB_MAX_INTR 10
1730 * ixgb_intr - Interrupt Handler
1731 * @irq: interrupt number
1732 * @data: pointer to a network interface device structure
1736 ixgb_intr(int irq, void *data)
1738 struct net_device *netdev = data;
1739 struct ixgb_adapter *adapter = netdev_priv(netdev);
1740 struct ixgb_hw *hw = &adapter->hw;
1741 u32 icr = IXGB_READ_REG(hw, ICR);
1744 return IRQ_NONE; /* Not our interrupt */
1746 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1747 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1748 mod_timer(&adapter->watchdog_timer, jiffies);
1750 if (napi_schedule_prep(&adapter->napi)) {
1752 /* Disable interrupts and register for poll. The flush
1753 of the posted write is intentionally left out.
1756 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1757 __napi_schedule(&adapter->napi);
1763 * ixgb_clean - NAPI Rx polling callback
1764 * @adapter: board private structure
1768 ixgb_clean(struct napi_struct *napi, int budget)
1770 struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1773 ixgb_clean_tx_irq(adapter);
1774 ixgb_clean_rx_irq(adapter, &work_done, budget);
1776 /* If budget not fully consumed, exit the polling mode */
1777 if (work_done < budget) {
1778 napi_complete_done(napi, work_done);
1779 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1780 ixgb_irq_enable(adapter);
1787 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1788 * @adapter: board private structure
1792 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1794 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1795 struct net_device *netdev = adapter->netdev;
1796 struct ixgb_tx_desc *tx_desc, *eop_desc;
1797 struct ixgb_buffer *buffer_info;
1798 unsigned int i, eop;
1799 bool cleaned = false;
1801 i = tx_ring->next_to_clean;
1802 eop = tx_ring->buffer_info[i].next_to_watch;
1803 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1805 while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1807 rmb(); /* read buffer_info after eop_desc */
1808 for (cleaned = false; !cleaned; ) {
1809 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1810 buffer_info = &tx_ring->buffer_info[i];
1812 if (tx_desc->popts &
1813 (IXGB_TX_DESC_POPTS_TXSM |
1814 IXGB_TX_DESC_POPTS_IXSM))
1815 adapter->hw_csum_tx_good++;
1817 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1819 *(u32 *)&(tx_desc->status) = 0;
1821 cleaned = (i == eop);
1822 if (++i == tx_ring->count) i = 0;
1825 eop = tx_ring->buffer_info[i].next_to_watch;
1826 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1829 tx_ring->next_to_clean = i;
1831 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1832 IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1833 /* Make sure that anybody stopping the queue after this
1834 * sees the new next_to_clean. */
1837 if (netif_queue_stopped(netdev) &&
1838 !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1839 netif_wake_queue(netdev);
1840 ++adapter->restart_queue;
1844 if (adapter->detect_tx_hung) {
1845 /* detect a transmit hang in hardware, this serializes the
1846 * check with the clearing of time_stamp and movement of i */
1847 adapter->detect_tx_hung = false;
1848 if (tx_ring->buffer_info[eop].time_stamp &&
1849 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1850 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1851 IXGB_STATUS_TXOFF)) {
1852 /* detected Tx unit hang */
1853 netif_err(adapter, drv, adapter->netdev,
1854 "Detected Tx Unit Hang\n"
1857 " next_to_use <%x>\n"
1858 " next_to_clean <%x>\n"
1859 "buffer_info[next_to_clean]\n"
1860 " time_stamp <%lx>\n"
1861 " next_to_watch <%x>\n"
1863 " next_to_watch.status <%x>\n",
1864 IXGB_READ_REG(&adapter->hw, TDH),
1865 IXGB_READ_REG(&adapter->hw, TDT),
1866 tx_ring->next_to_use,
1867 tx_ring->next_to_clean,
1868 tx_ring->buffer_info[eop].time_stamp,
1872 netif_stop_queue(netdev);
1880 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1881 * @adapter: board private structure
1882 * @rx_desc: receive descriptor
1883 * @sk_buff: socket buffer with received data
1887 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1888 struct ixgb_rx_desc *rx_desc,
1889 struct sk_buff *skb)
1891 /* Ignore Checksum bit is set OR
1892 * TCP Checksum has not been calculated
1894 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1895 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1896 skb_checksum_none_assert(skb);
1900 /* At this point we know the hardware did the TCP checksum */
1901 /* now look at the TCP checksum error bit */
1902 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1903 /* let the stack verify checksum errors */
1904 skb_checksum_none_assert(skb);
1905 adapter->hw_csum_rx_error++;
1907 /* TCP checksum is good */
1908 skb->ip_summed = CHECKSUM_UNNECESSARY;
1909 adapter->hw_csum_rx_good++;
1914 * this should improve performance for small packets with large amounts
1915 * of reassembly being done in the stack
1917 static void ixgb_check_copybreak(struct napi_struct *napi,
1918 struct ixgb_buffer *buffer_info,
1919 u32 length, struct sk_buff **skb)
1921 struct sk_buff *new_skb;
1923 if (length > copybreak)
1926 new_skb = napi_alloc_skb(napi, length);
1930 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1931 (*skb)->data - NET_IP_ALIGN,
1932 length + NET_IP_ALIGN);
1933 /* save the skb in buffer_info as good */
1934 buffer_info->skb = *skb;
1939 * ixgb_clean_rx_irq - Send received data up the network stack,
1940 * @adapter: board private structure
1944 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1946 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1947 struct net_device *netdev = adapter->netdev;
1948 struct pci_dev *pdev = adapter->pdev;
1949 struct ixgb_rx_desc *rx_desc, *next_rxd;
1950 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1953 int cleaned_count = 0;
1954 bool cleaned = false;
1956 i = rx_ring->next_to_clean;
1957 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1958 buffer_info = &rx_ring->buffer_info[i];
1960 while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1961 struct sk_buff *skb;
1964 if (*work_done >= work_to_do)
1968 rmb(); /* read descriptor and rx_buffer_info after status DD */
1969 status = rx_desc->status;
1970 skb = buffer_info->skb;
1971 buffer_info->skb = NULL;
1973 prefetch(skb->data - NET_IP_ALIGN);
1975 if (++i == rx_ring->count)
1977 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1981 if (j == rx_ring->count)
1983 next2_buffer = &rx_ring->buffer_info[j];
1984 prefetch(next2_buffer);
1986 next_buffer = &rx_ring->buffer_info[i];
1991 dma_unmap_single(&pdev->dev,
1993 buffer_info->length,
1995 buffer_info->dma = 0;
1997 length = le16_to_cpu(rx_desc->length);
1998 rx_desc->length = 0;
2000 if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
2002 /* All receives must fit into a single buffer */
2004 pr_debug("Receive packet consumed multiple buffers length<%x>\n",
2007 dev_kfree_skb_irq(skb);
2011 if (unlikely(rx_desc->errors &
2012 (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2013 IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2014 dev_kfree_skb_irq(skb);
2018 ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
2021 skb_put(skb, length);
2023 /* Receive Checksum Offload */
2024 ixgb_rx_checksum(adapter, rx_desc, skb);
2026 skb->protocol = eth_type_trans(skb, netdev);
2027 if (status & IXGB_RX_DESC_STATUS_VP)
2028 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2029 le16_to_cpu(rx_desc->special));
2031 netif_receive_skb(skb);
2034 /* clean up descriptor, might be written over by hw */
2035 rx_desc->status = 0;
2037 /* return some buffers to hardware, one at a time is too slow */
2038 if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2039 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2043 /* use prefetched values */
2045 buffer_info = next_buffer;
2048 rx_ring->next_to_clean = i;
2050 cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2052 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2058 * ixgb_alloc_rx_buffers - Replace used receive buffers
2059 * @adapter: address of board private structure
2063 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2065 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2066 struct net_device *netdev = adapter->netdev;
2067 struct pci_dev *pdev = adapter->pdev;
2068 struct ixgb_rx_desc *rx_desc;
2069 struct ixgb_buffer *buffer_info;
2070 struct sk_buff *skb;
2074 i = rx_ring->next_to_use;
2075 buffer_info = &rx_ring->buffer_info[i];
2076 cleancount = IXGB_DESC_UNUSED(rx_ring);
2079 /* leave three descriptors unused */
2080 while (--cleancount > 2 && cleaned_count--) {
2081 /* recycle! its good for you */
2082 skb = buffer_info->skb;
2088 skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2089 if (unlikely(!skb)) {
2090 /* Better luck next round */
2091 adapter->alloc_rx_buff_failed++;
2095 buffer_info->skb = skb;
2096 buffer_info->length = adapter->rx_buffer_len;
2098 buffer_info->dma = dma_map_single(&pdev->dev,
2100 adapter->rx_buffer_len,
2102 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
2103 adapter->alloc_rx_buff_failed++;
2107 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2108 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2109 /* guarantee DD bit not set now before h/w gets descriptor
2110 * this is the rest of the workaround for h/w double
2112 rx_desc->status = 0;
2115 if (++i == rx_ring->count)
2117 buffer_info = &rx_ring->buffer_info[i];
2120 if (likely(rx_ring->next_to_use != i)) {
2121 rx_ring->next_to_use = i;
2122 if (unlikely(i-- == 0))
2123 i = (rx_ring->count - 1);
2125 /* Force memory writes to complete before letting h/w
2126 * know there are new descriptors to fetch. (Only
2127 * applicable for weak-ordered memory model archs, such
2130 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2135 ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2139 /* enable VLAN tag insert/strip */
2140 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2141 ctrl |= IXGB_CTRL0_VME;
2142 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2146 ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2150 /* disable VLAN tag insert/strip */
2151 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2152 ctrl &= ~IXGB_CTRL0_VME;
2153 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2157 ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2159 struct ixgb_adapter *adapter = netdev_priv(netdev);
2162 /* add VID to filter table */
2164 index = (vid >> 5) & 0x7F;
2165 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2166 vfta |= (1 << (vid & 0x1F));
2167 ixgb_write_vfta(&adapter->hw, index, vfta);
2168 set_bit(vid, adapter->active_vlans);
2174 ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2176 struct ixgb_adapter *adapter = netdev_priv(netdev);
2179 /* remove VID from filter table */
2181 index = (vid >> 5) & 0x7F;
2182 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2183 vfta &= ~(1 << (vid & 0x1F));
2184 ixgb_write_vfta(&adapter->hw, index, vfta);
2185 clear_bit(vid, adapter->active_vlans);
2191 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2195 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2196 ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2199 #ifdef CONFIG_NET_POLL_CONTROLLER
2201 * Polling 'interrupt' - used by things like netconsole to send skbs
2202 * without having to re-enable interrupts. It's not called while
2203 * the interrupt routine is executing.
2206 static void ixgb_netpoll(struct net_device *dev)
2208 struct ixgb_adapter *adapter = netdev_priv(dev);
2210 disable_irq(adapter->pdev->irq);
2211 ixgb_intr(adapter->pdev->irq, dev);
2212 enable_irq(adapter->pdev->irq);
2217 * ixgb_io_error_detected - called when PCI error is detected
2218 * @pdev: pointer to pci device with error
2219 * @state: pci channel state after error
2221 * This callback is called by the PCI subsystem whenever
2222 * a PCI bus error is detected.
2224 static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2225 enum pci_channel_state state)
2227 struct net_device *netdev = pci_get_drvdata(pdev);
2228 struct ixgb_adapter *adapter = netdev_priv(netdev);
2230 netif_device_detach(netdev);
2232 if (state == pci_channel_io_perm_failure)
2233 return PCI_ERS_RESULT_DISCONNECT;
2235 if (netif_running(netdev))
2236 ixgb_down(adapter, true);
2238 pci_disable_device(pdev);
2240 /* Request a slot reset. */
2241 return PCI_ERS_RESULT_NEED_RESET;
2245 * ixgb_io_slot_reset - called after the pci bus has been reset.
2246 * @pdev pointer to pci device with error
2248 * This callback is called after the PCI bus has been reset.
2249 * Basically, this tries to restart the card from scratch.
2250 * This is a shortened version of the device probe/discovery code,
2251 * it resembles the first-half of the ixgb_probe() routine.
2253 static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2255 struct net_device *netdev = pci_get_drvdata(pdev);
2256 struct ixgb_adapter *adapter = netdev_priv(netdev);
2258 if (pci_enable_device(pdev)) {
2259 netif_err(adapter, probe, adapter->netdev,
2260 "Cannot re-enable PCI device after reset\n");
2261 return PCI_ERS_RESULT_DISCONNECT;
2264 /* Perform card reset only on one instance of the card */
2265 if (0 != PCI_FUNC (pdev->devfn))
2266 return PCI_ERS_RESULT_RECOVERED;
2268 pci_set_master(pdev);
2270 netif_carrier_off(netdev);
2271 netif_stop_queue(netdev);
2272 ixgb_reset(adapter);
2274 /* Make sure the EEPROM is good */
2275 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2276 netif_err(adapter, probe, adapter->netdev,
2277 "After reset, the EEPROM checksum is not valid\n");
2278 return PCI_ERS_RESULT_DISCONNECT;
2280 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2281 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2283 if (!is_valid_ether_addr(netdev->perm_addr)) {
2284 netif_err(adapter, probe, adapter->netdev,
2285 "After reset, invalid MAC address\n");
2286 return PCI_ERS_RESULT_DISCONNECT;
2289 return PCI_ERS_RESULT_RECOVERED;
2293 * ixgb_io_resume - called when its OK to resume normal operations
2294 * @pdev pointer to pci device with error
2296 * The error recovery driver tells us that its OK to resume
2297 * normal operation. Implementation resembles the second-half
2298 * of the ixgb_probe() routine.
2300 static void ixgb_io_resume(struct pci_dev *pdev)
2302 struct net_device *netdev = pci_get_drvdata(pdev);
2303 struct ixgb_adapter *adapter = netdev_priv(netdev);
2305 pci_set_master(pdev);
2307 if (netif_running(netdev)) {
2308 if (ixgb_up(adapter)) {
2309 pr_err("can't bring device back up after reset\n");
2314 netif_device_attach(netdev);
2315 mod_timer(&adapter->watchdog_timer, jiffies);