2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER | */
61 /* NETIF_MSG_TX_QUEUED | */
62 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 static int debug = -1; /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
81 "Default is OFF - Do Not allocate memory. ");
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
89 static const struct pci_device_id qlge_pci_tbl[] = {
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92 /* required last entry */
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
103 /* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
147 unsigned int wait_count = 30;
149 if (!ql_sem_trylock(qdev, sem_mask))
152 } while (--wait_count);
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
162 /* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
170 int count = UDELAY_COUNT;
173 temp = ql_read32(qdev, reg);
175 /* check for errors */
176 if (temp & err_bit) {
177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
181 } else if (temp & bit)
183 udelay(UDELAY_DELAY);
186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
191 /* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
196 int count = UDELAY_COUNT;
200 temp = ql_read32(qdev, CFG);
205 udelay(UDELAY_DELAY);
212 /* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
238 status = ql_wait_cfg(qdev, bit);
240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
253 * Wait for the bit to clear after signaling hw.
255 status = ql_wait_cfg(qdev, bit);
257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
258 pci_unmap_single(qdev->pdev, map, size, direction);
262 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
274 ql_wait_reg_rdy(qdev,
275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
282 ql_wait_reg_rdy(qdev,
283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
296 ql_wait_reg_rdy(qdev,
297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
303 ql_wait_reg_rdy(qdev,
304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
330 /* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
340 case MAC_ADDR_TYPE_MULTI_MAC:
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
372 case MAC_ADDR_TYPE_CAM_MAC:
375 u32 upper = (addr[0] << 8) | addr[1];
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
380 ql_wait_reg_rdy(qdev,
381 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 ql_wait_reg_rdy(qdev,
390 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 ql_wait_reg_rdy(qdev,
399 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
403 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 /* This field should also include the queue id
406 and possibly the function id. Right now we hardcode
407 the route field to NIC core.
409 cam_output = (CAM_OUT_ROUTE_NIC |
411 func << CAM_OUT_FUNC_SHIFT) |
412 (0 << CAM_OUT_CQ_ID_SHIFT));
413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
414 cam_output |= CAM_OUT_RV;
415 /* route to NIC core */
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
419 case MAC_ADDR_TYPE_VLAN:
421 u32 enable_bit = *((u32 *) &addr[0]);
422 /* For VLAN, the addr actually holds a bit that
423 * either enables or disables the vlan id we are
424 * addressing. It's either MAC_ADDR_E on or off.
425 * That's bit-27 we're talking about.
428 ql_wait_reg_rdy(qdev,
429 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433 (index << MAC_ADDR_IDX_SHIFT) | /* index */
435 enable_bit); /* enable/disable */
438 case MAC_ADDR_TYPE_MULTI_FLTR:
440 netif_crit(qdev, ifup, qdev->ndev,
441 "Address type %d not yet supported.\n", type);
448 /* Set or clear MAC address in hardware. We sometimes
449 * have to clear it to prevent wrong frame routing
450 * especially in a bonding environment.
452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
455 char zero_mac_addr[ETH_ALEN];
459 addr = &qdev->current_mac_addr[0];
460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 "Set Mac addr %pM\n", addr);
463 eth_zero_addr(zero_mac_addr);
464 addr = &zero_mac_addr[0];
465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 "Clearing MAC address\n");
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
475 netif_err(qdev, ifup, qdev->ndev,
476 "Failed to init mac address.\n");
480 void ql_link_on(struct ql_adapter *qdev)
482 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
483 netif_carrier_on(qdev->ndev);
484 ql_set_mac_addr(qdev, 1);
487 void ql_link_off(struct ql_adapter *qdev)
489 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
490 netif_carrier_off(qdev->ndev);
491 ql_set_mac_addr(qdev, 0);
494 /* Get a specific frame routing value from the CAM.
495 * Used for debug and reg dump.
497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
505 ql_write32(qdev, RT_IDX,
506 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
510 *value = ql_read32(qdev, RT_DATA);
515 /* The NIC function for this chip has 16 routing indexes. Each one can be used
516 * to route different frame types to various inbound queues. We send broadcast/
517 * multicast/error frames to the default queue for slow handling,
518 * and CAM hit/RSS frames to the fast handling queues.
520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
523 int status = -EINVAL; /* Return error if no mask match. */
529 value = RT_IDX_DST_CAM_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
534 case RT_IDX_VALID: /* Promiscuous Mode frames. */
536 value = RT_IDX_DST_DFLT_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
541 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
543 value = RT_IDX_DST_DFLT_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
548 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
550 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_IP_CSUM_ERR_SLOT <<
553 RT_IDX_IDX_SHIFT); /* index */
556 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
558 value = RT_IDX_DST_DFLT_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 RT_IDX_IDX_SHIFT); /* index */
564 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
566 value = RT_IDX_DST_DFLT_Q | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
571 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
573 value = RT_IDX_DST_DFLT_Q | /* dest */
574 RT_IDX_TYPE_NICQ | /* type */
575 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
578 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
580 value = RT_IDX_DST_DFLT_Q | /* dest */
581 RT_IDX_TYPE_NICQ | /* type */
582 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
585 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
587 value = RT_IDX_DST_RSS | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
592 case 0: /* Clear the E-bit on an entry. */
594 value = RT_IDX_DST_DFLT_Q | /* dest */
595 RT_IDX_TYPE_NICQ | /* type */
596 (index << RT_IDX_IDX_SHIFT);/* index */
600 netif_err(qdev, ifup, qdev->ndev,
601 "Mask type %d not yet supported.\n", mask);
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
610 value |= (enable ? RT_IDX_E : 0);
611 ql_write32(qdev, RT_IDX, value);
612 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 static void ql_enable_interrupts(struct ql_adapter *qdev)
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
623 static void ql_disable_interrupts(struct ql_adapter *qdev)
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
628 /* If we're running with multiple MSI-X vectors then we enable on the fly.
629 * Otherwise, we may have multiple outstanding workers and don't want to
630 * enable until the last one finishes. In this case, the irq_cnt gets
631 * incremented every time we queue a worker and decremented every time
632 * a worker finishes. Once it hits zero we enable the interrupt.
634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
637 unsigned long hw_flags = 0;
638 struct intr_context *ctx = qdev->intr_context + intr;
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 /* Always enable if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
644 ql_write32(qdev, INTR_EN,
646 var = ql_read32(qdev, STS);
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 ql_write32(qdev, INTR_EN,
654 var = ql_read32(qdev, STS);
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
663 struct intr_context *ctx;
665 /* HW disables for us if we're MSIX multi interrupts and
666 * it's not the default (zeroeth) interrupt.
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
671 ctx = qdev->intr_context + intr;
672 spin_lock(&qdev->hw_lock);
673 if (!atomic_read(&ctx->irq_cnt)) {
674 ql_write32(qdev, INTR_EN,
676 var = ql_read32(qdev, STS);
678 atomic_inc(&ctx->irq_cnt);
679 spin_unlock(&qdev->hw_lock);
683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
686 for (i = 0; i < qdev->intr_count; i++) {
687 /* The enable call does a atomic_dec_and_test
688 * and enables only if the result is zero.
689 * So we precharge it here.
691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
694 ql_enable_completion_interrupt(qdev, i);
699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
703 __le16 *flash = (__le16 *)&qdev->flash;
705 status = strncmp((char *)&qdev->flash, str, 4);
707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
711 for (i = 0; i < size; i++)
712 csum += le16_to_cpu(*flash++);
715 netif_err(qdev, ifup, qdev->ndev,
716 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
724 /* wait for reg to come ready */
725 status = ql_wait_reg_rdy(qdev,
726 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
729 /* set up for reg read */
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 /* wait for reg to come ready */
732 status = ql_wait_reg_rdy(qdev,
733 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
736 /* This data is stored on flash as an array of
737 * __le32. Since ql_read32() returns cpu endian
738 * we need to swap it back.
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
745 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
749 __le32 *p = (__le32 *)&qdev->flash;
753 /* Get flash offset for function and adjust
757 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
759 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
764 size = sizeof(struct flash_params_8000) / sizeof(u32);
765 for (i = 0; i < size; i++, p++) {
766 status = ql_read_flash_word(qdev, i+offset, p);
768 netif_err(qdev, ifup, qdev->ndev,
769 "Error reading flash.\n");
774 status = ql_validate_flash(qdev,
775 sizeof(struct flash_params_8000) / sizeof(u16),
778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
783 /* Extract either manufacturer or BOFM modified
786 if (qdev->flash.flash_params_8000.data_type1 == 2)
788 qdev->flash.flash_params_8000.mac_addr1,
789 qdev->ndev->addr_len);
792 qdev->flash.flash_params_8000.mac_addr,
793 qdev->ndev->addr_len);
795 if (!is_valid_ether_addr(mac_addr)) {
796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
801 memcpy(qdev->ndev->dev_addr,
803 qdev->ndev->addr_len);
806 ql_sem_unlock(qdev, SEM_FLASH_MASK);
810 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
814 __le32 *p = (__le32 *)&qdev->flash;
816 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
818 /* Second function's parameters follow the first
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
827 for (i = 0; i < size; i++, p++) {
828 status = ql_read_flash_word(qdev, i+offset, p);
830 netif_err(qdev, ifup, qdev->ndev,
831 "Error reading flash.\n");
837 status = ql_validate_flash(qdev,
838 sizeof(struct flash_params_8012) / sizeof(u16),
841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
851 memcpy(qdev->ndev->dev_addr,
852 qdev->flash.flash_params_8012.mac_addr,
853 qdev->ndev->addr_len);
856 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 /* xgmac register are located behind the xgmac_addr and xgmac_data
861 * register pair. Each read/write requires us to wait for the ready
862 * bit before reading/writing the data.
864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
872 /* write the data to the data reg */
873 ql_write32(qdev, XGMAC_DATA, data);
874 /* trigger the write */
875 ql_write32(qdev, XGMAC_ADDR, reg);
879 /* xgmac register are located behind the xgmac_addr and xgmac_data
880 * register pair. Each read/write requires us to wait for the ready
881 * bit before reading/writing the data.
883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
886 /* wait for reg to come ready */
887 status = ql_wait_reg_rdy(qdev,
888 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
891 /* set up for reg read */
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 /* wait for reg to come ready */
894 status = ql_wait_reg_rdy(qdev,
895 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 *data = ql_read32(qdev, XGMAC_DATA);
904 /* This is used for reading the 64-bit statistics regs. */
905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
911 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 *data = (u64) lo | ((u64) hi << 32);
925 static int ql_8000_port_initialize(struct ql_adapter *qdev)
929 * Get MPI firmware version for driver banner
932 status = ql_mb_about_fw(qdev);
935 status = ql_mb_get_fw_state(qdev);
938 /* Wake up a worker to get/set the TX/RX frame sizes. */
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
944 /* Take the MAC Core out of reset.
945 * Enable statistics counting.
946 * Take the transmitter/receiver out of reset.
947 * This functionality may be done in the MPI firmware at a
950 static int ql_8012_port_initialize(struct ql_adapter *qdev)
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 /* Another function has the semaphore, so
957 * wait for the port init bit to come ready.
959 netif_info(qdev, link, qdev->ndev,
960 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
963 netif_crit(qdev, link, qdev->ndev,
964 "Port initialize timed out.\n");
969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
970 /* Set the core reset. */
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
974 data |= GLOBAL_CFG_RESET;
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 /* Clear the core reset and turn on jumbo for receiver. */
980 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
981 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
982 data |= GLOBAL_CFG_TX_STAT_EN;
983 data |= GLOBAL_CFG_RX_STAT_EN;
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 /* Enable transmitter, and clear it's reset. */
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
992 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
993 data |= TX_CFG_EN; /* Enable the transmitter. */
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 /* Enable receiver and clear it's reset. */
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1002 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1003 data |= RX_CFG_EN; /* Enable the receiver. */
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 /* Turn on jumbo. */
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 /* Signal to the world that the port is enabled. */
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1027 return PAGE_SIZE << qdev->lbq_buf_order;
1030 /* Get the next large buffer. */
1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 rx_ring->lbq_curr_idx++;
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 rx_ring->lbq_curr_idx = 0;
1037 rx_ring->lbq_free_cnt++;
1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 struct rx_ring *rx_ring)
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1046 pci_dma_sync_single_for_cpu(qdev->pdev,
1047 dma_unmap_addr(lbq_desc, mapaddr),
1048 rx_ring->lbq_buf_size,
1049 PCI_DMA_FROMDEVICE);
1051 /* If it's the last chunk of our master page then
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 == ql_lbq_block_size(qdev))
1056 pci_unmap_page(qdev->pdev,
1057 lbq_desc->p.pg_chunk.map,
1058 ql_lbq_block_size(qdev),
1059 PCI_DMA_FROMDEVICE);
1063 /* Get the next small buffer. */
1064 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 rx_ring->sbq_curr_idx++;
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 rx_ring->sbq_curr_idx = 0;
1070 rx_ring->sbq_free_cnt++;
1074 /* Update an rx ring index. */
1075 static void ql_update_cq(struct rx_ring *rx_ring)
1077 rx_ring->cnsmr_idx++;
1078 rx_ring->curr_entry++;
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 rx_ring->cnsmr_idx = 0;
1081 rx_ring->curr_entry = rx_ring->cq_base;
1085 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 struct bq_desc *lbq_desc)
1093 if (!rx_ring->pg_chunk.page) {
1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
1098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
1109 rx_ring->pg_chunk.page = NULL;
1110 netif_err(qdev, drv, qdev->ndev,
1111 "PCI mapping failed.\n");
1114 rx_ring->pg_chunk.map = map;
1115 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1118 /* Copy the current master pg_chunk info
1119 * to the current descriptor.
1121 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123 /* Adjust the master page chunk for next
1126 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1127 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1128 rx_ring->pg_chunk.page = NULL;
1129 lbq_desc->p.pg_chunk.last_flag = 1;
1131 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1132 get_page(rx_ring->pg_chunk.page);
1133 lbq_desc->p.pg_chunk.last_flag = 0;
1137 /* Process (refill) a large buffer queue. */
1138 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140 u32 clean_idx = rx_ring->lbq_clean_idx;
1141 u32 start_idx = clean_idx;
1142 struct bq_desc *lbq_desc;
1146 while (rx_ring->lbq_free_cnt > 32) {
1147 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1148 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1149 "lbq: try cleaning clean_idx = %d.\n",
1151 lbq_desc = &rx_ring->lbq[clean_idx];
1152 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1153 rx_ring->lbq_clean_idx = clean_idx;
1154 netif_err(qdev, ifup, qdev->ndev,
1155 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1160 map = lbq_desc->p.pg_chunk.map +
1161 lbq_desc->p.pg_chunk.offset;
1162 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1163 dma_unmap_len_set(lbq_desc, maplen,
1164 rx_ring->lbq_buf_size);
1165 *lbq_desc->addr = cpu_to_le64(map);
1167 pci_dma_sync_single_for_device(qdev->pdev, map,
1168 rx_ring->lbq_buf_size,
1169 PCI_DMA_FROMDEVICE);
1171 if (clean_idx == rx_ring->lbq_len)
1175 rx_ring->lbq_clean_idx = clean_idx;
1176 rx_ring->lbq_prod_idx += 16;
1177 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1178 rx_ring->lbq_prod_idx = 0;
1179 rx_ring->lbq_free_cnt -= 16;
1182 if (start_idx != clean_idx) {
1183 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1184 "lbq: updating prod idx = %d.\n",
1185 rx_ring->lbq_prod_idx);
1186 ql_write_db_reg(rx_ring->lbq_prod_idx,
1187 rx_ring->lbq_prod_idx_db_reg);
1191 /* Process (refill) a small buffer queue. */
1192 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194 u32 clean_idx = rx_ring->sbq_clean_idx;
1195 u32 start_idx = clean_idx;
1196 struct bq_desc *sbq_desc;
1200 while (rx_ring->sbq_free_cnt > 16) {
1201 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1202 sbq_desc = &rx_ring->sbq[clean_idx];
1203 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1204 "sbq: try cleaning clean_idx = %d.\n",
1206 if (sbq_desc->p.skb == NULL) {
1207 netif_printk(qdev, rx_status, KERN_DEBUG,
1209 "sbq: getting new skb for index %d.\n",
1212 netdev_alloc_skb(qdev->ndev,
1214 if (sbq_desc->p.skb == NULL) {
1215 rx_ring->sbq_clean_idx = clean_idx;
1218 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1219 map = pci_map_single(qdev->pdev,
1220 sbq_desc->p.skb->data,
1221 rx_ring->sbq_buf_size,
1222 PCI_DMA_FROMDEVICE);
1223 if (pci_dma_mapping_error(qdev->pdev, map)) {
1224 netif_err(qdev, ifup, qdev->ndev,
1225 "PCI mapping failed.\n");
1226 rx_ring->sbq_clean_idx = clean_idx;
1227 dev_kfree_skb_any(sbq_desc->p.skb);
1228 sbq_desc->p.skb = NULL;
1231 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1232 dma_unmap_len_set(sbq_desc, maplen,
1233 rx_ring->sbq_buf_size);
1234 *sbq_desc->addr = cpu_to_le64(map);
1238 if (clean_idx == rx_ring->sbq_len)
1241 rx_ring->sbq_clean_idx = clean_idx;
1242 rx_ring->sbq_prod_idx += 16;
1243 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1244 rx_ring->sbq_prod_idx = 0;
1245 rx_ring->sbq_free_cnt -= 16;
1248 if (start_idx != clean_idx) {
1249 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1250 "sbq: updating prod idx = %d.\n",
1251 rx_ring->sbq_prod_idx);
1252 ql_write_db_reg(rx_ring->sbq_prod_idx,
1253 rx_ring->sbq_prod_idx_db_reg);
1257 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1258 struct rx_ring *rx_ring)
1260 ql_update_sbq(qdev, rx_ring);
1261 ql_update_lbq(qdev, rx_ring);
1264 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1265 * fails at some stage, or from the interrupt when a tx completes.
1267 static void ql_unmap_send(struct ql_adapter *qdev,
1268 struct tx_ring_desc *tx_ring_desc, int mapped)
1271 for (i = 0; i < mapped; i++) {
1272 if (i == 0 || (i == 7 && mapped > 7)) {
1274 * Unmap the skb->data area, or the
1275 * external sglist (AKA the Outbound
1276 * Address List (OAL)).
1277 * If its the zeroeth element, then it's
1278 * the skb->data area. If it's the 7th
1279 * element and there is more than 6 frags,
1283 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 "unmapping OAL area.\n");
1287 pci_unmap_single(qdev->pdev,
1288 dma_unmap_addr(&tx_ring_desc->map[i],
1290 dma_unmap_len(&tx_ring_desc->map[i],
1294 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1295 "unmapping frag %d.\n", i);
1296 pci_unmap_page(qdev->pdev,
1297 dma_unmap_addr(&tx_ring_desc->map[i],
1299 dma_unmap_len(&tx_ring_desc->map[i],
1300 maplen), PCI_DMA_TODEVICE);
1306 /* Map the buffers for this transmit. This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 static int ql_map_send(struct ql_adapter *qdev,
1310 struct ob_mac_iocb_req *mac_iocb_ptr,
1311 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313 int len = skb_headlen(skb);
1315 int frag_idx, err, map_idx = 0;
1316 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317 int frag_cnt = skb_shinfo(skb)->nr_frags;
1320 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1321 "frag_cnt = %d.\n", frag_cnt);
1324 * Map the skb buffer first.
1326 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328 err = pci_dma_mapping_error(qdev->pdev, map);
1330 netif_err(qdev, tx_queued, qdev->ndev,
1331 "PCI mapping failed with error: %d\n", err);
1333 return NETDEV_TX_BUSY;
1336 tbd->len = cpu_to_le32(len);
1337 tbd->addr = cpu_to_le64(map);
1338 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1343 * This loop fills the remainder of the 8 address descriptors
1344 * in the IOCB. If there are more than 7 fragments, then the
1345 * eighth address desc will point to an external list (OAL).
1346 * When this happens, the remainder of the frags will be stored
1349 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1350 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 if (frag_idx == 6 && frag_cnt > 7) {
1353 /* Let's tack on an sglist.
1354 * Our control block will now
1356 * iocb->seg[0] = skb->data
1357 * iocb->seg[1] = frag[0]
1358 * iocb->seg[2] = frag[1]
1359 * iocb->seg[3] = frag[2]
1360 * iocb->seg[4] = frag[3]
1361 * iocb->seg[5] = frag[4]
1362 * iocb->seg[6] = frag[5]
1363 * iocb->seg[7] = ptr to OAL (external sglist)
1364 * oal->seg[0] = frag[6]
1365 * oal->seg[1] = frag[7]
1366 * oal->seg[2] = frag[8]
1367 * oal->seg[3] = frag[9]
1368 * oal->seg[4] = frag[10]
1371 /* Tack on the OAL in the eighth segment of IOCB. */
1372 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1375 err = pci_dma_mapping_error(qdev->pdev, map);
1377 netif_err(qdev, tx_queued, qdev->ndev,
1378 "PCI mapping outbound address list with error: %d\n",
1383 tbd->addr = cpu_to_le64(map);
1385 * The length is the number of fragments
1386 * that remain to be mapped times the length
1387 * of our sglist (OAL).
1390 cpu_to_le32((sizeof(struct tx_buf_desc) *
1391 (frag_cnt - frag_idx)) | TX_DESC_C);
1392 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1395 sizeof(struct oal));
1396 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1400 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1403 err = dma_mapping_error(&qdev->pdev->dev, map);
1405 netif_err(qdev, tx_queued, qdev->ndev,
1406 "PCI mapping frags failed with error: %d.\n",
1411 tbd->addr = cpu_to_le64(map);
1412 tbd->len = cpu_to_le32(skb_frag_size(frag));
1413 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1414 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1415 skb_frag_size(frag));
1418 /* Save the number of segments we've mapped. */
1419 tx_ring_desc->map_cnt = map_idx;
1420 /* Terminate the last segment. */
1421 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1422 return NETDEV_TX_OK;
1426 * If the first frag mapping failed, then i will be zero.
1427 * This causes the unmap of the skb->data area. Otherwise
1428 * we pass in the number of frags that mapped successfully
1429 * so they can be umapped.
1431 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1432 return NETDEV_TX_BUSY;
1435 /* Categorizing receive firmware frame errors */
1436 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1437 struct rx_ring *rx_ring)
1439 struct nic_stats *stats = &qdev->nic_stats;
1441 stats->rx_err_count++;
1442 rx_ring->rx_errors++;
1444 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1445 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1446 stats->rx_code_err++;
1448 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1449 stats->rx_oversize_err++;
1451 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1452 stats->rx_undersize_err++;
1454 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1455 stats->rx_preamble_err++;
1457 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1458 stats->rx_frame_len_err++;
1460 case IB_MAC_IOCB_RSP_ERR_CRC:
1461 stats->rx_crc_err++;
1468 * ql_update_mac_hdr_len - helper routine to update the mac header length
1469 * based on vlan tags if present
1471 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 void *page, size_t *len)
1477 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481 /* Look for stacked vlan tags in ethertype field */
1482 if (tags[6] == ETH_P_8021Q &&
1483 tags[8] == ETH_P_8021Q)
1484 *len += 2 * VLAN_HLEN;
1490 /* Process an inbound completion from an rx ring. */
1491 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1492 struct rx_ring *rx_ring,
1493 struct ib_mac_iocb_rsp *ib_mac_rsp,
1497 struct sk_buff *skb;
1498 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1499 struct napi_struct *napi = &rx_ring->napi;
1501 /* Frame error, so drop the packet. */
1502 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1503 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1504 put_page(lbq_desc->p.pg_chunk.page);
1507 napi->dev = qdev->ndev;
1509 skb = napi_get_frags(napi);
1511 netif_err(qdev, drv, qdev->ndev,
1512 "Couldn't get an skb, exiting.\n");
1513 rx_ring->rx_dropped++;
1514 put_page(lbq_desc->p.pg_chunk.page);
1517 prefetch(lbq_desc->p.pg_chunk.va);
1518 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1519 lbq_desc->p.pg_chunk.page,
1520 lbq_desc->p.pg_chunk.offset,
1524 skb->data_len += length;
1525 skb->truesize += length;
1526 skb_shinfo(skb)->nr_frags++;
1528 rx_ring->rx_packets++;
1529 rx_ring->rx_bytes += length;
1530 skb->ip_summed = CHECKSUM_UNNECESSARY;
1531 skb_record_rx_queue(skb, rx_ring->cq_id);
1532 if (vlan_id != 0xffff)
1533 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1534 napi_gro_frags(napi);
1537 /* Process an inbound completion from an rx ring. */
1538 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1539 struct rx_ring *rx_ring,
1540 struct ib_mac_iocb_rsp *ib_mac_rsp,
1544 struct net_device *ndev = qdev->ndev;
1545 struct sk_buff *skb = NULL;
1547 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1548 struct napi_struct *napi = &rx_ring->napi;
1549 size_t hlen = ETH_HLEN;
1551 skb = netdev_alloc_skb(ndev, length);
1553 rx_ring->rx_dropped++;
1554 put_page(lbq_desc->p.pg_chunk.page);
1558 addr = lbq_desc->p.pg_chunk.va;
1561 /* Frame error, so drop the packet. */
1562 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1563 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1567 /* Update the MAC header length*/
1568 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570 /* The max framesize filter on this chip is set higher than
1571 * MTU since FCoE uses 2k frames.
1573 if (skb->len > ndev->mtu + hlen) {
1574 netif_err(qdev, drv, qdev->ndev,
1575 "Segment too small, dropping.\n");
1576 rx_ring->rx_dropped++;
1579 skb_put_data(skb, addr, hlen);
1580 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1581 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1584 lbq_desc->p.pg_chunk.offset + hlen,
1586 skb->len += length - hlen;
1587 skb->data_len += length - hlen;
1588 skb->truesize += length - hlen;
1590 rx_ring->rx_packets++;
1591 rx_ring->rx_bytes += skb->len;
1592 skb->protocol = eth_type_trans(skb, ndev);
1593 skb_checksum_none_assert(skb);
1595 if ((ndev->features & NETIF_F_RXCSUM) &&
1596 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1599 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1600 "TCP checksum done!\n");
1601 skb->ip_summed = CHECKSUM_UNNECESSARY;
1602 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1603 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1604 /* Unfragmented ipv4 UDP frame. */
1606 (struct iphdr *)((u8 *)addr + hlen);
1607 if (!(iph->frag_off &
1608 htons(IP_MF|IP_OFFSET))) {
1609 skb->ip_summed = CHECKSUM_UNNECESSARY;
1610 netif_printk(qdev, rx_status, KERN_DEBUG,
1612 "UDP checksum done!\n");
1617 skb_record_rx_queue(skb, rx_ring->cq_id);
1618 if (vlan_id != 0xffff)
1619 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1620 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1621 napi_gro_receive(napi, skb);
1623 netif_receive_skb(skb);
1626 dev_kfree_skb_any(skb);
1627 put_page(lbq_desc->p.pg_chunk.page);
1630 /* Process an inbound completion from an rx ring. */
1631 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1632 struct rx_ring *rx_ring,
1633 struct ib_mac_iocb_rsp *ib_mac_rsp,
1637 struct net_device *ndev = qdev->ndev;
1638 struct sk_buff *skb = NULL;
1639 struct sk_buff *new_skb = NULL;
1640 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642 skb = sbq_desc->p.skb;
1643 /* Allocate new_skb and copy */
1644 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1645 if (new_skb == NULL) {
1646 rx_ring->rx_dropped++;
1649 skb_reserve(new_skb, NET_IP_ALIGN);
1651 pci_dma_sync_single_for_cpu(qdev->pdev,
1652 dma_unmap_addr(sbq_desc, mapaddr),
1653 dma_unmap_len(sbq_desc, maplen),
1654 PCI_DMA_FROMDEVICE);
1656 skb_put_data(new_skb, skb->data, length);
1658 pci_dma_sync_single_for_device(qdev->pdev,
1659 dma_unmap_addr(sbq_desc, mapaddr),
1660 dma_unmap_len(sbq_desc, maplen),
1661 PCI_DMA_FROMDEVICE);
1664 /* Frame error, so drop the packet. */
1665 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1666 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1667 dev_kfree_skb_any(skb);
1671 /* loopback self test for ethtool */
1672 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1673 ql_check_lb_frame(qdev, skb);
1674 dev_kfree_skb_any(skb);
1678 /* The max framesize filter on this chip is set higher than
1679 * MTU since FCoE uses 2k frames.
1681 if (skb->len > ndev->mtu + ETH_HLEN) {
1682 dev_kfree_skb_any(skb);
1683 rx_ring->rx_dropped++;
1687 prefetch(skb->data);
1688 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1691 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1692 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1693 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1694 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1695 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1696 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1698 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1699 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1700 "Promiscuous Packet.\n");
1702 rx_ring->rx_packets++;
1703 rx_ring->rx_bytes += skb->len;
1704 skb->protocol = eth_type_trans(skb, ndev);
1705 skb_checksum_none_assert(skb);
1707 /* If rx checksum is on, and there are no
1708 * csum or frame errors.
1710 if ((ndev->features & NETIF_F_RXCSUM) &&
1711 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1713 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1714 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715 "TCP checksum done!\n");
1716 skb->ip_summed = CHECKSUM_UNNECESSARY;
1717 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1718 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1719 /* Unfragmented ipv4 UDP frame. */
1720 struct iphdr *iph = (struct iphdr *) skb->data;
1721 if (!(iph->frag_off &
1722 htons(IP_MF|IP_OFFSET))) {
1723 skb->ip_summed = CHECKSUM_UNNECESSARY;
1724 netif_printk(qdev, rx_status, KERN_DEBUG,
1726 "UDP checksum done!\n");
1731 skb_record_rx_queue(skb, rx_ring->cq_id);
1732 if (vlan_id != 0xffff)
1733 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1734 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1735 napi_gro_receive(&rx_ring->napi, skb);
1737 netif_receive_skb(skb);
1740 static void ql_realign_skb(struct sk_buff *skb, int len)
1742 void *temp_addr = skb->data;
1744 /* Undo the skb_reserve(skb,32) we did before
1745 * giving to hardware, and realign data on
1746 * a 2-byte boundary.
1748 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1749 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1750 memmove(skb->data, temp_addr, len);
1754 * This function builds an skb for the given inbound
1755 * completion. It will be rewritten for readability in the near
1756 * future, but for not it works well.
1758 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1759 struct rx_ring *rx_ring,
1760 struct ib_mac_iocb_rsp *ib_mac_rsp)
1762 struct bq_desc *lbq_desc;
1763 struct bq_desc *sbq_desc;
1764 struct sk_buff *skb = NULL;
1765 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1766 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1767 size_t hlen = ETH_HLEN;
1770 * Handle the header buffer if present.
1772 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1773 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 "Header of %d bytes in small buffer.\n", hdr_len);
1777 * Headers fit nicely into a small buffer.
1779 sbq_desc = ql_get_curr_sbuf(rx_ring);
1780 pci_unmap_single(qdev->pdev,
1781 dma_unmap_addr(sbq_desc, mapaddr),
1782 dma_unmap_len(sbq_desc, maplen),
1783 PCI_DMA_FROMDEVICE);
1784 skb = sbq_desc->p.skb;
1785 ql_realign_skb(skb, hdr_len);
1786 skb_put(skb, hdr_len);
1787 sbq_desc->p.skb = NULL;
1791 * Handle the data buffer(s).
1793 if (unlikely(!length)) { /* Is there data too? */
1794 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1795 "No Data buffer in this packet.\n");
1799 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1800 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1801 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802 "Headers in small, data of %d bytes in small, combine them.\n",
1805 * Data is less than small buffer size so it's
1806 * stuffed in a small buffer.
1807 * For this case we append the data
1808 * from the "data" small buffer to the "header" small
1811 sbq_desc = ql_get_curr_sbuf(rx_ring);
1812 pci_dma_sync_single_for_cpu(qdev->pdev,
1814 (sbq_desc, mapaddr),
1817 PCI_DMA_FROMDEVICE);
1818 skb_put_data(skb, sbq_desc->p.skb->data, length);
1819 pci_dma_sync_single_for_device(qdev->pdev,
1826 PCI_DMA_FROMDEVICE);
1828 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1829 "%d bytes in a single small buffer.\n",
1831 sbq_desc = ql_get_curr_sbuf(rx_ring);
1832 skb = sbq_desc->p.skb;
1833 ql_realign_skb(skb, length);
1834 skb_put(skb, length);
1835 pci_unmap_single(qdev->pdev,
1836 dma_unmap_addr(sbq_desc,
1838 dma_unmap_len(sbq_desc,
1840 PCI_DMA_FROMDEVICE);
1841 sbq_desc->p.skb = NULL;
1843 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1844 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1845 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846 "Header in small, %d bytes in large. Chain large to small!\n",
1849 * The data is in a single large buffer. We
1850 * chain it to the header buffer's skb and let
1853 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1855 "Chaining page at offset = %d, for %d bytes to skb.\n",
1856 lbq_desc->p.pg_chunk.offset, length);
1857 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1858 lbq_desc->p.pg_chunk.offset,
1861 skb->data_len += length;
1862 skb->truesize += length;
1865 * The headers and data are in a single large buffer. We
1866 * copy it to a new skb and let it go. This can happen with
1867 * jumbo mtu on a non-TCP/UDP frame.
1869 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1870 skb = netdev_alloc_skb(qdev->ndev, length);
1872 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1873 "No skb available, drop the packet.\n");
1876 pci_unmap_page(qdev->pdev,
1877 dma_unmap_addr(lbq_desc,
1879 dma_unmap_len(lbq_desc, maplen),
1880 PCI_DMA_FROMDEVICE);
1881 skb_reserve(skb, NET_IP_ALIGN);
1882 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1883 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1885 skb_fill_page_desc(skb, 0,
1886 lbq_desc->p.pg_chunk.page,
1887 lbq_desc->p.pg_chunk.offset,
1890 skb->data_len += length;
1891 skb->truesize += length;
1892 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1893 lbq_desc->p.pg_chunk.va,
1895 __pskb_pull_tail(skb, hlen);
1899 * The data is in a chain of large buffers
1900 * pointed to by a small buffer. We loop
1901 * thru and chain them to the our small header
1903 * frags: There are 18 max frags and our small
1904 * buffer will hold 32 of them. The thing is,
1905 * we'll use 3 max for our 9000 byte jumbo
1906 * frames. If the MTU goes up we could
1907 * eventually be in trouble.
1910 sbq_desc = ql_get_curr_sbuf(rx_ring);
1911 pci_unmap_single(qdev->pdev,
1912 dma_unmap_addr(sbq_desc, mapaddr),
1913 dma_unmap_len(sbq_desc, maplen),
1914 PCI_DMA_FROMDEVICE);
1915 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1917 * This is an non TCP/UDP IP frame, so
1918 * the headers aren't split into a small
1919 * buffer. We have to use the small buffer
1920 * that contains our sg list as our skb to
1921 * send upstairs. Copy the sg list here to
1922 * a local buffer and use it to find the
1925 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1926 "%d bytes of headers & data in chain of large.\n",
1928 skb = sbq_desc->p.skb;
1929 sbq_desc->p.skb = NULL;
1930 skb_reserve(skb, NET_IP_ALIGN);
1933 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1934 size = (length < rx_ring->lbq_buf_size) ? length :
1935 rx_ring->lbq_buf_size;
1937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 "Adding page %d to skb for %d bytes.\n",
1940 skb_fill_page_desc(skb, i,
1941 lbq_desc->p.pg_chunk.page,
1942 lbq_desc->p.pg_chunk.offset,
1945 skb->data_len += size;
1946 skb->truesize += size;
1949 } while (length > 0);
1950 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1952 __pskb_pull_tail(skb, hlen);
1957 /* Process an inbound completion from an rx ring. */
1958 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1959 struct rx_ring *rx_ring,
1960 struct ib_mac_iocb_rsp *ib_mac_rsp,
1963 struct net_device *ndev = qdev->ndev;
1964 struct sk_buff *skb = NULL;
1966 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1968 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1969 if (unlikely(!skb)) {
1970 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1971 "No skb available, drop packet.\n");
1972 rx_ring->rx_dropped++;
1976 /* Frame error, so drop the packet. */
1977 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1978 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1979 dev_kfree_skb_any(skb);
1983 /* The max framesize filter on this chip is set higher than
1984 * MTU since FCoE uses 2k frames.
1986 if (skb->len > ndev->mtu + ETH_HLEN) {
1987 dev_kfree_skb_any(skb);
1988 rx_ring->rx_dropped++;
1992 /* loopback self test for ethtool */
1993 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1994 ql_check_lb_frame(qdev, skb);
1995 dev_kfree_skb_any(skb);
1999 prefetch(skb->data);
2000 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
2001 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2002 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2003 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2004 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2005 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2006 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2007 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2008 rx_ring->rx_multicast++;
2010 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012 "Promiscuous Packet.\n");
2015 skb->protocol = eth_type_trans(skb, ndev);
2016 skb_checksum_none_assert(skb);
2018 /* If rx checksum is on, and there are no
2019 * csum or frame errors.
2021 if ((ndev->features & NETIF_F_RXCSUM) &&
2022 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2024 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2025 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2026 "TCP checksum done!\n");
2027 skb->ip_summed = CHECKSUM_UNNECESSARY;
2028 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2029 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2030 /* Unfragmented ipv4 UDP frame. */
2031 struct iphdr *iph = (struct iphdr *) skb->data;
2032 if (!(iph->frag_off &
2033 htons(IP_MF|IP_OFFSET))) {
2034 skb->ip_summed = CHECKSUM_UNNECESSARY;
2035 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2036 "TCP checksum done!\n");
2041 rx_ring->rx_packets++;
2042 rx_ring->rx_bytes += skb->len;
2043 skb_record_rx_queue(skb, rx_ring->cq_id);
2044 if (vlan_id != 0xffff)
2045 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2046 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2047 napi_gro_receive(&rx_ring->napi, skb);
2049 netif_receive_skb(skb);
2052 /* Process an inbound completion from an rx ring. */
2053 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2054 struct rx_ring *rx_ring,
2055 struct ib_mac_iocb_rsp *ib_mac_rsp)
2057 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2058 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2059 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2060 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2061 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2063 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2065 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2066 /* The data and headers are split into
2069 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2071 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2072 /* The data fit in a single small buffer.
2073 * Allocate a new skb, copy the data and
2074 * return the buffer to the free pool.
2076 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2078 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2079 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2080 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2081 /* TCP packet in a page chunk that's been checksummed.
2082 * Tack it on to our GRO skb and let it go.
2084 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2086 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2087 /* Non-TCP packet in a page chunk. Allocate an
2088 * skb, tack it on frags, and send it up.
2090 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2093 /* Non-TCP/UDP large frames that span multiple buffers
2094 * can be processed corrrectly by the split frame logic.
2096 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2100 return (unsigned long)length;
2103 /* Process an outbound completion from an rx ring. */
2104 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2105 struct ob_mac_iocb_rsp *mac_rsp)
2107 struct tx_ring *tx_ring;
2108 struct tx_ring_desc *tx_ring_desc;
2110 QL_DUMP_OB_MAC_RSP(mac_rsp);
2111 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2112 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2113 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2114 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2115 tx_ring->tx_packets++;
2116 dev_kfree_skb(tx_ring_desc->skb);
2117 tx_ring_desc->skb = NULL;
2119 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2122 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2123 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2124 netif_warn(qdev, tx_done, qdev->ndev,
2125 "Total descriptor length did not match transfer length.\n");
2127 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2128 netif_warn(qdev, tx_done, qdev->ndev,
2129 "Frame too short to be valid, not sent.\n");
2131 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2132 netif_warn(qdev, tx_done, qdev->ndev,
2133 "Frame too long, but sent anyway.\n");
2135 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2136 netif_warn(qdev, tx_done, qdev->ndev,
2137 "PCI backplane error. Frame not sent.\n");
2140 atomic_inc(&tx_ring->tx_count);
2143 /* Fire up a handler to reset the MPI processor. */
2144 void ql_queue_fw_error(struct ql_adapter *qdev)
2147 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2150 void ql_queue_asic_error(struct ql_adapter *qdev)
2153 ql_disable_interrupts(qdev);
2154 /* Clear adapter up bit to signal the recovery
2155 * process that it shouldn't kill the reset worker
2158 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2159 /* Set asic recovery bit to indicate reset process that we are
2160 * in fatal error recovery process rather than normal close
2162 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2163 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2166 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2167 struct ib_ae_iocb_rsp *ib_ae_rsp)
2169 switch (ib_ae_rsp->event) {
2170 case MGMT_ERR_EVENT:
2171 netif_err(qdev, rx_err, qdev->ndev,
2172 "Management Processor Fatal Error.\n");
2173 ql_queue_fw_error(qdev);
2176 case CAM_LOOKUP_ERR_EVENT:
2177 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2178 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2179 ql_queue_asic_error(qdev);
2182 case SOFT_ECC_ERROR_EVENT:
2183 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2184 ql_queue_asic_error(qdev);
2187 case PCI_ERR_ANON_BUF_RD:
2188 netdev_err(qdev->ndev, "PCI error occurred when reading "
2189 "anonymous buffers from rx_ring %d.\n",
2191 ql_queue_asic_error(qdev);
2195 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2197 ql_queue_asic_error(qdev);
2202 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2204 struct ql_adapter *qdev = rx_ring->qdev;
2205 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2206 struct ob_mac_iocb_rsp *net_rsp = NULL;
2209 struct tx_ring *tx_ring;
2210 /* While there are entries in the completion queue. */
2211 while (prod != rx_ring->cnsmr_idx) {
2213 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2214 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2215 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2217 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2219 switch (net_rsp->opcode) {
2221 case OPCODE_OB_MAC_TSO_IOCB:
2222 case OPCODE_OB_MAC_IOCB:
2223 ql_process_mac_tx_intr(qdev, net_rsp);
2226 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2227 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2231 ql_update_cq(rx_ring);
2232 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2236 ql_write_cq_idx(rx_ring);
2237 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2238 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2239 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2241 * The queue got stopped because the tx_ring was full.
2242 * Wake it up, because it's now at least 25% empty.
2244 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2250 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2252 struct ql_adapter *qdev = rx_ring->qdev;
2253 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2254 struct ql_net_rsp_iocb *net_rsp;
2257 /* While there are entries in the completion queue. */
2258 while (prod != rx_ring->cnsmr_idx) {
2260 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2261 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2262 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2264 net_rsp = rx_ring->curr_entry;
2266 switch (net_rsp->opcode) {
2267 case OPCODE_IB_MAC_IOCB:
2268 ql_process_mac_rx_intr(qdev, rx_ring,
2269 (struct ib_mac_iocb_rsp *)
2273 case OPCODE_IB_AE_IOCB:
2274 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2279 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2284 ql_update_cq(rx_ring);
2285 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2286 if (count == budget)
2289 ql_update_buffer_queues(qdev, rx_ring);
2290 ql_write_cq_idx(rx_ring);
2294 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2296 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2297 struct ql_adapter *qdev = rx_ring->qdev;
2298 struct rx_ring *trx_ring;
2299 int i, work_done = 0;
2300 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2302 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2303 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2305 /* Service the TX rings first. They start
2306 * right after the RSS rings. */
2307 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2308 trx_ring = &qdev->rx_ring[i];
2309 /* If this TX completion ring belongs to this vector and
2310 * it's not empty then service it.
2312 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2313 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2314 trx_ring->cnsmr_idx)) {
2315 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2316 "%s: Servicing TX completion ring %d.\n",
2317 __func__, trx_ring->cq_id);
2318 ql_clean_outbound_rx_ring(trx_ring);
2323 * Now service the RSS ring if it's active.
2325 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2326 rx_ring->cnsmr_idx) {
2327 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2328 "%s: Servicing RX completion ring %d.\n",
2329 __func__, rx_ring->cq_id);
2330 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2333 if (work_done < budget) {
2334 napi_complete_done(napi, work_done);
2335 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2340 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2342 struct ql_adapter *qdev = netdev_priv(ndev);
2344 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2345 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2346 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2353 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2354 * based on the features to enable/disable hardware vlan accel
2356 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2357 netdev_features_t features)
2359 struct ql_adapter *qdev = netdev_priv(ndev);
2361 bool need_restart = netif_running(ndev);
2364 status = ql_adapter_down(qdev);
2366 netif_err(qdev, link, qdev->ndev,
2367 "Failed to bring down the adapter\n");
2372 /* update the features with resent change */
2373 ndev->features = features;
2376 status = ql_adapter_up(qdev);
2378 netif_err(qdev, link, qdev->ndev,
2379 "Failed to bring up the adapter\n");
2387 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2388 netdev_features_t features)
2392 /* Update the behavior of vlan accel in the adapter */
2393 err = qlge_update_hw_vlan_features(ndev, features);
2400 static int qlge_set_features(struct net_device *ndev,
2401 netdev_features_t features)
2403 netdev_features_t changed = ndev->features ^ features;
2405 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2406 qlge_vlan_mode(ndev, features);
2411 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2413 u32 enable_bit = MAC_ADDR_E;
2416 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2417 MAC_ADDR_TYPE_VLAN, vid);
2419 netif_err(qdev, ifup, qdev->ndev,
2420 "Failed to init vlan address.\n");
2424 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2426 struct ql_adapter *qdev = netdev_priv(ndev);
2430 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2434 err = __qlge_vlan_rx_add_vid(qdev, vid);
2435 set_bit(vid, qdev->active_vlans);
2437 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2442 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2447 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2448 MAC_ADDR_TYPE_VLAN, vid);
2450 netif_err(qdev, ifup, qdev->ndev,
2451 "Failed to clear vlan address.\n");
2455 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2457 struct ql_adapter *qdev = netdev_priv(ndev);
2461 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2465 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2466 clear_bit(vid, qdev->active_vlans);
2468 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2473 static void qlge_restore_vlan(struct ql_adapter *qdev)
2478 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2482 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2483 __qlge_vlan_rx_add_vid(qdev, vid);
2485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2488 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2489 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2491 struct rx_ring *rx_ring = dev_id;
2492 napi_schedule(&rx_ring->napi);
2496 /* This handles a fatal error, MPI activity, and the default
2497 * rx_ring in an MSI-X multiple vector environment.
2498 * In MSI/Legacy environment it also process the rest of
2501 static irqreturn_t qlge_isr(int irq, void *dev_id)
2503 struct rx_ring *rx_ring = dev_id;
2504 struct ql_adapter *qdev = rx_ring->qdev;
2505 struct intr_context *intr_context = &qdev->intr_context[0];
2509 spin_lock(&qdev->hw_lock);
2510 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2511 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2512 "Shared Interrupt, Not ours!\n");
2513 spin_unlock(&qdev->hw_lock);
2516 spin_unlock(&qdev->hw_lock);
2518 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2521 * Check for fatal error.
2524 ql_queue_asic_error(qdev);
2525 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2526 var = ql_read32(qdev, ERR_STS);
2527 netdev_err(qdev->ndev, "Resetting chip. "
2528 "Error Status Register = 0x%x\n", var);
2533 * Check MPI processor activity.
2535 if ((var & STS_PI) &&
2536 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2538 * We've got an async event or mailbox completion.
2539 * Handle it and clear the source of the interrupt.
2541 netif_err(qdev, intr, qdev->ndev,
2542 "Got MPI processor interrupt.\n");
2543 ql_disable_completion_interrupt(qdev, intr_context->intr);
2544 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2545 queue_delayed_work_on(smp_processor_id(),
2546 qdev->workqueue, &qdev->mpi_work, 0);
2551 * Get the bit-mask that shows the active queues for this
2552 * pass. Compare it to the queues that this irq services
2553 * and call napi if there's a match.
2555 var = ql_read32(qdev, ISR1);
2556 if (var & intr_context->irq_mask) {
2557 netif_info(qdev, intr, qdev->ndev,
2558 "Waking handler for rx_ring[0].\n");
2559 ql_disable_completion_interrupt(qdev, intr_context->intr);
2560 napi_schedule(&rx_ring->napi);
2563 ql_enable_completion_interrupt(qdev, intr_context->intr);
2564 return work_done ? IRQ_HANDLED : IRQ_NONE;
2567 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2570 if (skb_is_gso(skb)) {
2572 __be16 l3_proto = vlan_get_protocol(skb);
2574 err = skb_cow_head(skb, 0);
2578 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2579 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2580 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2581 mac_iocb_ptr->total_hdrs_len =
2582 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2583 mac_iocb_ptr->net_trans_offset =
2584 cpu_to_le16(skb_network_offset(skb) |
2585 skb_transport_offset(skb)
2586 << OB_MAC_TRANSPORT_HDR_SHIFT);
2587 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2588 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2589 if (likely(l3_proto == htons(ETH_P_IP))) {
2590 struct iphdr *iph = ip_hdr(skb);
2592 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2593 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2597 } else if (l3_proto == htons(ETH_P_IPV6)) {
2598 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2599 tcp_hdr(skb)->check =
2600 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2601 &ipv6_hdr(skb)->daddr,
2609 static void ql_hw_csum_setup(struct sk_buff *skb,
2610 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2613 struct iphdr *iph = ip_hdr(skb);
2615 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2616 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2617 mac_iocb_ptr->net_trans_offset =
2618 cpu_to_le16(skb_network_offset(skb) |
2619 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2621 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2622 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2623 if (likely(iph->protocol == IPPROTO_TCP)) {
2624 check = &(tcp_hdr(skb)->check);
2625 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2626 mac_iocb_ptr->total_hdrs_len =
2627 cpu_to_le16(skb_transport_offset(skb) +
2628 (tcp_hdr(skb)->doff << 2));
2630 check = &(udp_hdr(skb)->check);
2631 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2632 mac_iocb_ptr->total_hdrs_len =
2633 cpu_to_le16(skb_transport_offset(skb) +
2634 sizeof(struct udphdr));
2636 *check = ~csum_tcpudp_magic(iph->saddr,
2637 iph->daddr, len, iph->protocol, 0);
2640 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2642 struct tx_ring_desc *tx_ring_desc;
2643 struct ob_mac_iocb_req *mac_iocb_ptr;
2644 struct ql_adapter *qdev = netdev_priv(ndev);
2646 struct tx_ring *tx_ring;
2647 u32 tx_ring_idx = (u32) skb->queue_mapping;
2649 tx_ring = &qdev->tx_ring[tx_ring_idx];
2651 if (skb_padto(skb, ETH_ZLEN))
2652 return NETDEV_TX_OK;
2654 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2655 netif_info(qdev, tx_queued, qdev->ndev,
2656 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2657 __func__, tx_ring_idx);
2658 netif_stop_subqueue(ndev, tx_ring->wq_id);
2659 tx_ring->tx_errors++;
2660 return NETDEV_TX_BUSY;
2662 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2663 mac_iocb_ptr = tx_ring_desc->queue_entry;
2664 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2666 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2667 mac_iocb_ptr->tid = tx_ring_desc->index;
2668 /* We use the upper 32-bits to store the tx queue for this IO.
2669 * When we get the completion we can use it to establish the context.
2671 mac_iocb_ptr->txq_idx = tx_ring_idx;
2672 tx_ring_desc->skb = skb;
2674 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2676 if (skb_vlan_tag_present(skb)) {
2677 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2678 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2679 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2680 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2682 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2684 dev_kfree_skb_any(skb);
2685 return NETDEV_TX_OK;
2686 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2687 ql_hw_csum_setup(skb,
2688 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2690 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2692 netif_err(qdev, tx_queued, qdev->ndev,
2693 "Could not map the segments.\n");
2694 tx_ring->tx_errors++;
2695 return NETDEV_TX_BUSY;
2697 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2698 tx_ring->prod_idx++;
2699 if (tx_ring->prod_idx == tx_ring->wq_len)
2700 tx_ring->prod_idx = 0;
2703 ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2705 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2706 "tx queued, slot %d, len %d\n",
2707 tx_ring->prod_idx, skb->len);
2709 atomic_dec(&tx_ring->tx_count);
2711 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2712 netif_stop_subqueue(ndev, tx_ring->wq_id);
2713 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2715 * The queue got stopped because the tx_ring was full.
2716 * Wake it up, because it's now at least 25% empty.
2718 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2720 return NETDEV_TX_OK;
2724 static void ql_free_shadow_space(struct ql_adapter *qdev)
2726 if (qdev->rx_ring_shadow_reg_area) {
2727 pci_free_consistent(qdev->pdev,
2729 qdev->rx_ring_shadow_reg_area,
2730 qdev->rx_ring_shadow_reg_dma);
2731 qdev->rx_ring_shadow_reg_area = NULL;
2733 if (qdev->tx_ring_shadow_reg_area) {
2734 pci_free_consistent(qdev->pdev,
2736 qdev->tx_ring_shadow_reg_area,
2737 qdev->tx_ring_shadow_reg_dma);
2738 qdev->tx_ring_shadow_reg_area = NULL;
2742 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2744 qdev->rx_ring_shadow_reg_area =
2745 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2746 &qdev->rx_ring_shadow_reg_dma);
2747 if (qdev->rx_ring_shadow_reg_area == NULL) {
2748 netif_err(qdev, ifup, qdev->ndev,
2749 "Allocation of RX shadow space failed.\n");
2753 qdev->tx_ring_shadow_reg_area =
2754 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2755 &qdev->tx_ring_shadow_reg_dma);
2756 if (qdev->tx_ring_shadow_reg_area == NULL) {
2757 netif_err(qdev, ifup, qdev->ndev,
2758 "Allocation of TX shadow space failed.\n");
2759 goto err_wqp_sh_area;
2764 pci_free_consistent(qdev->pdev,
2766 qdev->rx_ring_shadow_reg_area,
2767 qdev->rx_ring_shadow_reg_dma);
2771 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2773 struct tx_ring_desc *tx_ring_desc;
2775 struct ob_mac_iocb_req *mac_iocb_ptr;
2777 mac_iocb_ptr = tx_ring->wq_base;
2778 tx_ring_desc = tx_ring->q;
2779 for (i = 0; i < tx_ring->wq_len; i++) {
2780 tx_ring_desc->index = i;
2781 tx_ring_desc->skb = NULL;
2782 tx_ring_desc->queue_entry = mac_iocb_ptr;
2786 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2789 static void ql_free_tx_resources(struct ql_adapter *qdev,
2790 struct tx_ring *tx_ring)
2792 if (tx_ring->wq_base) {
2793 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2794 tx_ring->wq_base, tx_ring->wq_base_dma);
2795 tx_ring->wq_base = NULL;
2801 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2802 struct tx_ring *tx_ring)
2805 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2806 &tx_ring->wq_base_dma);
2808 if ((tx_ring->wq_base == NULL) ||
2809 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2813 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2814 if (tx_ring->q == NULL)
2819 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2820 tx_ring->wq_base, tx_ring->wq_base_dma);
2821 tx_ring->wq_base = NULL;
2823 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2827 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2829 struct bq_desc *lbq_desc;
2831 uint32_t curr_idx, clean_idx;
2833 curr_idx = rx_ring->lbq_curr_idx;
2834 clean_idx = rx_ring->lbq_clean_idx;
2835 while (curr_idx != clean_idx) {
2836 lbq_desc = &rx_ring->lbq[curr_idx];
2838 if (lbq_desc->p.pg_chunk.last_flag) {
2839 pci_unmap_page(qdev->pdev,
2840 lbq_desc->p.pg_chunk.map,
2841 ql_lbq_block_size(qdev),
2842 PCI_DMA_FROMDEVICE);
2843 lbq_desc->p.pg_chunk.last_flag = 0;
2846 put_page(lbq_desc->p.pg_chunk.page);
2847 lbq_desc->p.pg_chunk.page = NULL;
2849 if (++curr_idx == rx_ring->lbq_len)
2853 if (rx_ring->pg_chunk.page) {
2854 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2855 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2856 put_page(rx_ring->pg_chunk.page);
2857 rx_ring->pg_chunk.page = NULL;
2861 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2864 struct bq_desc *sbq_desc;
2866 for (i = 0; i < rx_ring->sbq_len; i++) {
2867 sbq_desc = &rx_ring->sbq[i];
2868 if (sbq_desc == NULL) {
2869 netif_err(qdev, ifup, qdev->ndev,
2870 "sbq_desc %d is NULL.\n", i);
2873 if (sbq_desc->p.skb) {
2874 pci_unmap_single(qdev->pdev,
2875 dma_unmap_addr(sbq_desc, mapaddr),
2876 dma_unmap_len(sbq_desc, maplen),
2877 PCI_DMA_FROMDEVICE);
2878 dev_kfree_skb(sbq_desc->p.skb);
2879 sbq_desc->p.skb = NULL;
2884 /* Free all large and small rx buffers associated
2885 * with the completion queues for this device.
2887 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2890 struct rx_ring *rx_ring;
2892 for (i = 0; i < qdev->rx_ring_count; i++) {
2893 rx_ring = &qdev->rx_ring[i];
2895 ql_free_lbq_buffers(qdev, rx_ring);
2897 ql_free_sbq_buffers(qdev, rx_ring);
2901 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2903 struct rx_ring *rx_ring;
2906 for (i = 0; i < qdev->rx_ring_count; i++) {
2907 rx_ring = &qdev->rx_ring[i];
2908 if (rx_ring->type != TX_Q)
2909 ql_update_buffer_queues(qdev, rx_ring);
2913 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2914 struct rx_ring *rx_ring)
2917 struct bq_desc *lbq_desc;
2918 __le64 *bq = rx_ring->lbq_base;
2920 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2921 for (i = 0; i < rx_ring->lbq_len; i++) {
2922 lbq_desc = &rx_ring->lbq[i];
2923 memset(lbq_desc, 0, sizeof(*lbq_desc));
2924 lbq_desc->index = i;
2925 lbq_desc->addr = bq;
2930 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2931 struct rx_ring *rx_ring)
2934 struct bq_desc *sbq_desc;
2935 __le64 *bq = rx_ring->sbq_base;
2937 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2938 for (i = 0; i < rx_ring->sbq_len; i++) {
2939 sbq_desc = &rx_ring->sbq[i];
2940 memset(sbq_desc, 0, sizeof(*sbq_desc));
2941 sbq_desc->index = i;
2942 sbq_desc->addr = bq;
2947 static void ql_free_rx_resources(struct ql_adapter *qdev,
2948 struct rx_ring *rx_ring)
2950 /* Free the small buffer queue. */
2951 if (rx_ring->sbq_base) {
2952 pci_free_consistent(qdev->pdev,
2954 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2955 rx_ring->sbq_base = NULL;
2958 /* Free the small buffer queue control blocks. */
2959 kfree(rx_ring->sbq);
2960 rx_ring->sbq = NULL;
2962 /* Free the large buffer queue. */
2963 if (rx_ring->lbq_base) {
2964 pci_free_consistent(qdev->pdev,
2966 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2967 rx_ring->lbq_base = NULL;
2970 /* Free the large buffer queue control blocks. */
2971 kfree(rx_ring->lbq);
2972 rx_ring->lbq = NULL;
2974 /* Free the rx queue. */
2975 if (rx_ring->cq_base) {
2976 pci_free_consistent(qdev->pdev,
2978 rx_ring->cq_base, rx_ring->cq_base_dma);
2979 rx_ring->cq_base = NULL;
2983 /* Allocate queues and buffers for this completions queue based
2984 * on the values in the parameter structure. */
2985 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2986 struct rx_ring *rx_ring)
2990 * Allocate the completion queue for this rx_ring.
2993 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2994 &rx_ring->cq_base_dma);
2996 if (rx_ring->cq_base == NULL) {
2997 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
3001 if (rx_ring->sbq_len) {
3003 * Allocate small buffer queue.
3006 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3007 &rx_ring->sbq_base_dma);
3009 if (rx_ring->sbq_base == NULL) {
3010 netif_err(qdev, ifup, qdev->ndev,
3011 "Small buffer queue allocation failed.\n");
3016 * Allocate small buffer queue control blocks.
3018 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3019 sizeof(struct bq_desc),
3021 if (rx_ring->sbq == NULL)
3024 ql_init_sbq_ring(qdev, rx_ring);
3027 if (rx_ring->lbq_len) {
3029 * Allocate large buffer queue.
3032 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3033 &rx_ring->lbq_base_dma);
3035 if (rx_ring->lbq_base == NULL) {
3036 netif_err(qdev, ifup, qdev->ndev,
3037 "Large buffer queue allocation failed.\n");
3041 * Allocate large buffer queue control blocks.
3043 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3044 sizeof(struct bq_desc),
3046 if (rx_ring->lbq == NULL)
3049 ql_init_lbq_ring(qdev, rx_ring);
3055 ql_free_rx_resources(qdev, rx_ring);
3059 static void ql_tx_ring_clean(struct ql_adapter *qdev)
3061 struct tx_ring *tx_ring;
3062 struct tx_ring_desc *tx_ring_desc;
3066 * Loop through all queues and free
3069 for (j = 0; j < qdev->tx_ring_count; j++) {
3070 tx_ring = &qdev->tx_ring[j];
3071 for (i = 0; i < tx_ring->wq_len; i++) {
3072 tx_ring_desc = &tx_ring->q[i];
3073 if (tx_ring_desc && tx_ring_desc->skb) {
3074 netif_err(qdev, ifdown, qdev->ndev,
3075 "Freeing lost SKB %p, from queue %d, index %d.\n",
3076 tx_ring_desc->skb, j,
3077 tx_ring_desc->index);
3078 ql_unmap_send(qdev, tx_ring_desc,
3079 tx_ring_desc->map_cnt);
3080 dev_kfree_skb(tx_ring_desc->skb);
3081 tx_ring_desc->skb = NULL;
3087 static void ql_free_mem_resources(struct ql_adapter *qdev)
3091 for (i = 0; i < qdev->tx_ring_count; i++)
3092 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3093 for (i = 0; i < qdev->rx_ring_count; i++)
3094 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3095 ql_free_shadow_space(qdev);
3098 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3102 /* Allocate space for our shadow registers and such. */
3103 if (ql_alloc_shadow_space(qdev))
3106 for (i = 0; i < qdev->rx_ring_count; i++) {
3107 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3108 netif_err(qdev, ifup, qdev->ndev,
3109 "RX resource allocation failed.\n");
3113 /* Allocate tx queue resources */
3114 for (i = 0; i < qdev->tx_ring_count; i++) {
3115 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3116 netif_err(qdev, ifup, qdev->ndev,
3117 "TX resource allocation failed.\n");
3124 ql_free_mem_resources(qdev);
3128 /* Set up the rx ring control block and pass it to the chip.
3129 * The control block is defined as
3130 * "Completion Queue Initialization Control Block", or cqicb.
3132 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3134 struct cqicb *cqicb = &rx_ring->cqicb;
3135 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3136 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3137 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3138 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3139 void __iomem *doorbell_area =
3140 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3144 __le64 *base_indirect_ptr;
3147 /* Set up the shadow registers for this ring. */
3148 rx_ring->prod_idx_sh_reg = shadow_reg;
3149 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3150 *rx_ring->prod_idx_sh_reg = 0;
3151 shadow_reg += sizeof(u64);
3152 shadow_reg_dma += sizeof(u64);
3153 rx_ring->lbq_base_indirect = shadow_reg;
3154 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3155 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3156 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3157 rx_ring->sbq_base_indirect = shadow_reg;
3158 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3160 /* PCI doorbell mem area + 0x00 for consumer index register */
3161 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3162 rx_ring->cnsmr_idx = 0;
3163 rx_ring->curr_entry = rx_ring->cq_base;
3165 /* PCI doorbell mem area + 0x04 for valid register */
3166 rx_ring->valid_db_reg = doorbell_area + 0x04;
3168 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3169 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3171 /* PCI doorbell mem area + 0x1c */
3172 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3174 memset((void *)cqicb, 0, sizeof(struct cqicb));
3175 cqicb->msix_vect = rx_ring->irq;
3177 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3178 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3180 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3182 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3185 * Set up the control block load flags.
3187 cqicb->flags = FLAGS_LC | /* Load queue base address */
3188 FLAGS_LV | /* Load MSI-X vector */
3189 FLAGS_LI; /* Load irq delay values */
3190 if (rx_ring->lbq_len) {
3191 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3192 tmp = (u64)rx_ring->lbq_base_dma;
3193 base_indirect_ptr = rx_ring->lbq_base_indirect;
3196 *base_indirect_ptr = cpu_to_le64(tmp);
3197 tmp += DB_PAGE_SIZE;
3198 base_indirect_ptr++;
3200 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3202 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3203 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3204 (u16) rx_ring->lbq_buf_size;
3205 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3206 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3207 (u16) rx_ring->lbq_len;
3208 cqicb->lbq_len = cpu_to_le16(bq_len);
3209 rx_ring->lbq_prod_idx = 0;
3210 rx_ring->lbq_curr_idx = 0;
3211 rx_ring->lbq_clean_idx = 0;
3212 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3214 if (rx_ring->sbq_len) {
3215 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3216 tmp = (u64)rx_ring->sbq_base_dma;
3217 base_indirect_ptr = rx_ring->sbq_base_indirect;
3220 *base_indirect_ptr = cpu_to_le64(tmp);
3221 tmp += DB_PAGE_SIZE;
3222 base_indirect_ptr++;
3224 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3226 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3227 cqicb->sbq_buf_size =
3228 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3229 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3230 (u16) rx_ring->sbq_len;
3231 cqicb->sbq_len = cpu_to_le16(bq_len);
3232 rx_ring->sbq_prod_idx = 0;
3233 rx_ring->sbq_curr_idx = 0;
3234 rx_ring->sbq_clean_idx = 0;
3235 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3237 switch (rx_ring->type) {
3239 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3240 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3243 /* Inbound completion handling rx_rings run in
3244 * separate NAPI contexts.
3246 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3248 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3249 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3252 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3253 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3255 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3256 CFG_LCQ, rx_ring->cq_id);
3258 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3264 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3266 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3267 void __iomem *doorbell_area =
3268 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3269 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3270 (tx_ring->wq_id * sizeof(u64));
3271 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3272 (tx_ring->wq_id * sizeof(u64));
3276 * Assign doorbell registers for this tx_ring.
3278 /* TX PCI doorbell mem area for tx producer index */
3279 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3280 tx_ring->prod_idx = 0;
3281 /* TX PCI doorbell mem area + 0x04 */
3282 tx_ring->valid_db_reg = doorbell_area + 0x04;
3285 * Assign shadow registers for this tx_ring.
3287 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3288 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3290 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3291 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3292 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3293 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3295 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3297 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3299 ql_init_tx_ring(qdev, tx_ring);
3301 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3302 (u16) tx_ring->wq_id);
3304 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3310 static void ql_disable_msix(struct ql_adapter *qdev)
3312 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3313 pci_disable_msix(qdev->pdev);
3314 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3315 kfree(qdev->msi_x_entry);
3316 qdev->msi_x_entry = NULL;
3317 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3318 pci_disable_msi(qdev->pdev);
3319 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3323 /* We start by trying to get the number of vectors
3324 * stored in qdev->intr_count. If we don't get that
3325 * many then we reduce the count and try again.
3327 static void ql_enable_msix(struct ql_adapter *qdev)
3331 /* Get the MSIX vectors. */
3332 if (qlge_irq_type == MSIX_IRQ) {
3333 /* Try to alloc space for the msix struct,
3334 * if it fails then go to MSI/legacy.
3336 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3337 sizeof(struct msix_entry),
3339 if (!qdev->msi_x_entry) {
3340 qlge_irq_type = MSI_IRQ;
3344 for (i = 0; i < qdev->intr_count; i++)
3345 qdev->msi_x_entry[i].entry = i;
3347 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3348 1, qdev->intr_count);
3350 kfree(qdev->msi_x_entry);
3351 qdev->msi_x_entry = NULL;
3352 netif_warn(qdev, ifup, qdev->ndev,
3353 "MSI-X Enable failed, trying MSI.\n");
3354 qlge_irq_type = MSI_IRQ;
3356 qdev->intr_count = err;
3357 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3358 netif_info(qdev, ifup, qdev->ndev,
3359 "MSI-X Enabled, got %d vectors.\n",
3365 qdev->intr_count = 1;
3366 if (qlge_irq_type == MSI_IRQ) {
3367 if (!pci_enable_msi(qdev->pdev)) {
3368 set_bit(QL_MSI_ENABLED, &qdev->flags);
3369 netif_info(qdev, ifup, qdev->ndev,
3370 "Running with MSI interrupts.\n");
3374 qlge_irq_type = LEG_IRQ;
3375 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3376 "Running with legacy interrupts.\n");
3379 /* Each vector services 1 RSS ring and and 1 or more
3380 * TX completion rings. This function loops through
3381 * the TX completion rings and assigns the vector that
3382 * will service it. An example would be if there are
3383 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3384 * This would mean that vector 0 would service RSS ring 0
3385 * and TX completion rings 0,1,2 and 3. Vector 1 would
3386 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3388 static void ql_set_tx_vect(struct ql_adapter *qdev)
3391 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3393 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3394 /* Assign irq vectors to TX rx_rings.*/
3395 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3396 i < qdev->rx_ring_count; i++) {
3397 if (j == tx_rings_per_vector) {
3401 qdev->rx_ring[i].irq = vect;
3405 /* For single vector all rings have an irq
3408 for (i = 0; i < qdev->rx_ring_count; i++)
3409 qdev->rx_ring[i].irq = 0;
3413 /* Set the interrupt mask for this vector. Each vector
3414 * will service 1 RSS ring and 1 or more TX completion
3415 * rings. This function sets up a bit mask per vector
3416 * that indicates which rings it services.
3418 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3420 int j, vect = ctx->intr;
3421 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3423 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3424 /* Add the RSS ring serviced by this vector
3427 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3428 /* Add the TX ring(s) serviced by this vector
3430 for (j = 0; j < tx_rings_per_vector; j++) {
3432 (1 << qdev->rx_ring[qdev->rss_ring_count +
3433 (vect * tx_rings_per_vector) + j].cq_id);
3436 /* For single vector we just shift each queue's
3439 for (j = 0; j < qdev->rx_ring_count; j++)
3440 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3445 * Here we build the intr_context structures based on
3446 * our rx_ring count and intr vector count.
3447 * The intr_context structure is used to hook each vector
3448 * to possibly different handlers.
3450 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3453 struct intr_context *intr_context = &qdev->intr_context[0];
3455 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3456 /* Each rx_ring has it's
3457 * own intr_context since we have separate
3458 * vectors for each queue.
3460 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3461 qdev->rx_ring[i].irq = i;
3462 intr_context->intr = i;
3463 intr_context->qdev = qdev;
3464 /* Set up this vector's bit-mask that indicates
3465 * which queues it services.
3467 ql_set_irq_mask(qdev, intr_context);
3469 * We set up each vectors enable/disable/read bits so
3470 * there's no bit/mask calculations in the critical path.
3472 intr_context->intr_en_mask =
3473 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3474 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3476 intr_context->intr_dis_mask =
3477 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3478 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3480 intr_context->intr_read_mask =
3481 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3482 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3485 /* The first vector/queue handles
3486 * broadcast/multicast, fatal errors,
3487 * and firmware events. This in addition
3488 * to normal inbound NAPI processing.
3490 intr_context->handler = qlge_isr;
3491 sprintf(intr_context->name, "%s-rx-%d",
3492 qdev->ndev->name, i);
3495 * Inbound queues handle unicast frames only.
3497 intr_context->handler = qlge_msix_rx_isr;
3498 sprintf(intr_context->name, "%s-rx-%d",
3499 qdev->ndev->name, i);
3504 * All rx_rings use the same intr_context since
3505 * there is only one vector.
3507 intr_context->intr = 0;
3508 intr_context->qdev = qdev;
3510 * We set up each vectors enable/disable/read bits so
3511 * there's no bit/mask calculations in the critical path.
3513 intr_context->intr_en_mask =
3514 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3515 intr_context->intr_dis_mask =
3516 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3517 INTR_EN_TYPE_DISABLE;
3518 intr_context->intr_read_mask =
3519 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3521 * Single interrupt means one handler for all rings.
3523 intr_context->handler = qlge_isr;
3524 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3525 /* Set up this vector's bit-mask that indicates
3526 * which queues it services. In this case there is
3527 * a single vector so it will service all RSS and
3528 * TX completion rings.
3530 ql_set_irq_mask(qdev, intr_context);
3532 /* Tell the TX completion rings which MSIx vector
3533 * they will be using.
3535 ql_set_tx_vect(qdev);
3538 static void ql_free_irq(struct ql_adapter *qdev)
3541 struct intr_context *intr_context = &qdev->intr_context[0];
3543 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3544 if (intr_context->hooked) {
3545 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3546 free_irq(qdev->msi_x_entry[i].vector,
3549 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3553 ql_disable_msix(qdev);
3556 static int ql_request_irq(struct ql_adapter *qdev)
3560 struct pci_dev *pdev = qdev->pdev;
3561 struct intr_context *intr_context = &qdev->intr_context[0];
3563 ql_resolve_queues_to_irqs(qdev);
3565 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3566 atomic_set(&intr_context->irq_cnt, 0);
3567 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3568 status = request_irq(qdev->msi_x_entry[i].vector,
3569 intr_context->handler,
3574 netif_err(qdev, ifup, qdev->ndev,
3575 "Failed request for MSIX interrupt %d.\n",
3580 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3581 "trying msi or legacy interrupts.\n");
3582 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3583 "%s: irq = %d.\n", __func__, pdev->irq);
3584 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3585 "%s: context->name = %s.\n", __func__,
3586 intr_context->name);
3587 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3588 "%s: dev_id = 0x%p.\n", __func__,
3591 request_irq(pdev->irq, qlge_isr,
3592 test_bit(QL_MSI_ENABLED,
3594 flags) ? 0 : IRQF_SHARED,
3595 intr_context->name, &qdev->rx_ring[0]);
3599 netif_err(qdev, ifup, qdev->ndev,
3600 "Hooked intr %d, queue type %s, with name %s.\n",
3602 qdev->rx_ring[0].type == DEFAULT_Q ?
3604 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3605 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3606 intr_context->name);
3608 intr_context->hooked = 1;
3612 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3617 static int ql_start_rss(struct ql_adapter *qdev)
3619 static const u8 init_hash_seed[] = {
3620 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3621 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3622 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3623 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3624 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3626 struct ricb *ricb = &qdev->ricb;
3629 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3631 memset((void *)ricb, 0, sizeof(*ricb));
3633 ricb->base_cq = RSS_L4K;
3635 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3636 ricb->mask = cpu_to_le16((u16)(0x3ff));
3639 * Fill out the Indirection Table.
3641 for (i = 0; i < 1024; i++)
3642 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3644 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3645 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3647 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3649 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3655 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3659 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3662 /* Clear all the entries in the routing table. */
3663 for (i = 0; i < 16; i++) {
3664 status = ql_set_routing_reg(qdev, i, 0, 0);
3666 netif_err(qdev, ifup, qdev->ndev,
3667 "Failed to init routing register for CAM packets.\n");
3671 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3675 /* Initialize the frame-to-queue routing. */
3676 static int ql_route_initialize(struct ql_adapter *qdev)
3680 /* Clear all the entries in the routing table. */
3681 status = ql_clear_routing_entries(qdev);
3685 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3689 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3690 RT_IDX_IP_CSUM_ERR, 1);
3692 netif_err(qdev, ifup, qdev->ndev,
3693 "Failed to init routing register "
3694 "for IP CSUM error packets.\n");
3697 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3698 RT_IDX_TU_CSUM_ERR, 1);
3700 netif_err(qdev, ifup, qdev->ndev,
3701 "Failed to init routing register "
3702 "for TCP/UDP CSUM error packets.\n");
3705 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3707 netif_err(qdev, ifup, qdev->ndev,
3708 "Failed to init routing register for broadcast packets.\n");
3711 /* If we have more than one inbound queue, then turn on RSS in the
3714 if (qdev->rss_ring_count > 1) {
3715 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3716 RT_IDX_RSS_MATCH, 1);
3718 netif_err(qdev, ifup, qdev->ndev,
3719 "Failed to init routing register for MATCH RSS packets.\n");
3724 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3727 netif_err(qdev, ifup, qdev->ndev,
3728 "Failed to init routing register for CAM packets.\n");
3730 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3734 int ql_cam_route_initialize(struct ql_adapter *qdev)
3738 /* If check if the link is up and use to
3739 * determine if we are setting or clearing
3740 * the MAC address in the CAM.
3742 set = ql_read32(qdev, STS);
3743 set &= qdev->port_link_up;
3744 status = ql_set_mac_addr(qdev, set);
3746 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3750 status = ql_route_initialize(qdev);
3752 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3757 static int ql_adapter_initialize(struct ql_adapter *qdev)
3764 * Set up the System register to halt on errors.
3766 value = SYS_EFE | SYS_FAE;
3768 ql_write32(qdev, SYS, mask | value);
3770 /* Set the default queue, and VLAN behavior. */
3771 value = NIC_RCV_CFG_DFQ;
3772 mask = NIC_RCV_CFG_DFQ_MASK;
3773 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3774 value |= NIC_RCV_CFG_RV;
3775 mask |= (NIC_RCV_CFG_RV << 16);
3777 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3779 /* Set the MPI interrupt to enabled. */
3780 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3782 /* Enable the function, set pagesize, enable error checking. */
3783 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3784 FSC_EC | FSC_VM_PAGE_4K;
3785 value |= SPLT_SETTING;
3787 /* Set/clear header splitting. */
3788 mask = FSC_VM_PAGESIZE_MASK |
3789 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3790 ql_write32(qdev, FSC, mask | value);
3792 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3794 /* Set RX packet routing to use port/pci function on which the
3795 * packet arrived on in addition to usual frame routing.
3796 * This is helpful on bonding where both interfaces can have
3797 * the same MAC address.
3799 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3800 /* Reroute all packets to our Interface.
3801 * They may have been routed to MPI firmware
3804 value = ql_read32(qdev, MGMT_RCV_CFG);
3805 value &= ~MGMT_RCV_CFG_RM;
3808 /* Sticky reg needs clearing due to WOL. */
3809 ql_write32(qdev, MGMT_RCV_CFG, mask);
3810 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3812 /* Default WOL is enable on Mezz cards */
3813 if (qdev->pdev->subsystem_device == 0x0068 ||
3814 qdev->pdev->subsystem_device == 0x0180)
3815 qdev->wol = WAKE_MAGIC;
3817 /* Start up the rx queues. */
3818 for (i = 0; i < qdev->rx_ring_count; i++) {
3819 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3821 netif_err(qdev, ifup, qdev->ndev,
3822 "Failed to start rx ring[%d].\n", i);
3827 /* If there is more than one inbound completion queue
3828 * then download a RICB to configure RSS.
3830 if (qdev->rss_ring_count > 1) {
3831 status = ql_start_rss(qdev);
3833 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3838 /* Start up the tx queues. */
3839 for (i = 0; i < qdev->tx_ring_count; i++) {
3840 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3842 netif_err(qdev, ifup, qdev->ndev,
3843 "Failed to start tx ring[%d].\n", i);
3848 /* Initialize the port and set the max framesize. */
3849 status = qdev->nic_ops->port_initialize(qdev);
3851 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3853 /* Set up the MAC address and frame routing filter. */
3854 status = ql_cam_route_initialize(qdev);
3856 netif_err(qdev, ifup, qdev->ndev,
3857 "Failed to init CAM/Routing tables.\n");
3861 /* Start NAPI for the RSS queues. */
3862 for (i = 0; i < qdev->rss_ring_count; i++)
3863 napi_enable(&qdev->rx_ring[i].napi);
3868 /* Issue soft reset to chip. */
3869 static int ql_adapter_reset(struct ql_adapter *qdev)
3873 unsigned long end_jiffies;
3875 /* Clear all the entries in the routing table. */
3876 status = ql_clear_routing_entries(qdev);
3878 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3882 /* Check if bit is set then skip the mailbox command and
3883 * clear the bit, else we are in normal reset process.
3885 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3886 /* Stop management traffic. */
3887 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3889 /* Wait for the NIC and MGMNT FIFOs to empty. */
3890 ql_wait_fifo_empty(qdev);
3892 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3894 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3896 end_jiffies = jiffies + usecs_to_jiffies(30);
3898 value = ql_read32(qdev, RST_FO);
3899 if ((value & RST_FO_FR) == 0)
3902 } while (time_before(jiffies, end_jiffies));
3904 if (value & RST_FO_FR) {
3905 netif_err(qdev, ifdown, qdev->ndev,
3906 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3907 status = -ETIMEDOUT;
3910 /* Resume management traffic. */
3911 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3915 static void ql_display_dev_info(struct net_device *ndev)
3917 struct ql_adapter *qdev = netdev_priv(ndev);
3919 netif_info(qdev, probe, qdev->ndev,
3920 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3921 "XG Roll = %d, XG Rev = %d.\n",
3924 qdev->chip_rev_id & 0x0000000f,
3925 qdev->chip_rev_id >> 4 & 0x0000000f,
3926 qdev->chip_rev_id >> 8 & 0x0000000f,
3927 qdev->chip_rev_id >> 12 & 0x0000000f);
3928 netif_info(qdev, probe, qdev->ndev,
3929 "MAC address %pM\n", ndev->dev_addr);
3932 static int ql_wol(struct ql_adapter *qdev)
3935 u32 wol = MB_WOL_DISABLE;
3937 /* The CAM is still intact after a reset, but if we
3938 * are doing WOL, then we may need to program the
3939 * routing regs. We would also need to issue the mailbox
3940 * commands to instruct the MPI what to do per the ethtool
3944 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3945 WAKE_MCAST | WAKE_BCAST)) {
3946 netif_err(qdev, ifdown, qdev->ndev,
3947 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3952 if (qdev->wol & WAKE_MAGIC) {
3953 status = ql_mb_wol_set_magic(qdev, 1);
3955 netif_err(qdev, ifdown, qdev->ndev,
3956 "Failed to set magic packet on %s.\n",
3960 netif_info(qdev, drv, qdev->ndev,
3961 "Enabled magic packet successfully on %s.\n",
3964 wol |= MB_WOL_MAGIC_PKT;
3968 wol |= MB_WOL_MODE_ON;
3969 status = ql_mb_wol_mode(qdev, wol);
3970 netif_err(qdev, drv, qdev->ndev,
3971 "WOL %s (wol code 0x%x) on %s\n",
3972 (status == 0) ? "Successfully set" : "Failed",
3973 wol, qdev->ndev->name);
3979 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3982 /* Don't kill the reset worker thread if we
3983 * are in the process of recovery.
3985 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3986 cancel_delayed_work_sync(&qdev->asic_reset_work);
3987 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3988 cancel_delayed_work_sync(&qdev->mpi_work);
3989 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3990 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3991 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3994 static int ql_adapter_down(struct ql_adapter *qdev)
4000 ql_cancel_all_work_sync(qdev);
4002 for (i = 0; i < qdev->rss_ring_count; i++)
4003 napi_disable(&qdev->rx_ring[i].napi);
4005 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4007 ql_disable_interrupts(qdev);
4009 ql_tx_ring_clean(qdev);
4011 /* Call netif_napi_del() from common point.
4013 for (i = 0; i < qdev->rss_ring_count; i++)
4014 netif_napi_del(&qdev->rx_ring[i].napi);
4016 status = ql_adapter_reset(qdev);
4018 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4020 ql_free_rx_buffers(qdev);
4025 static int ql_adapter_up(struct ql_adapter *qdev)
4029 err = ql_adapter_initialize(qdev);
4031 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4034 set_bit(QL_ADAPTER_UP, &qdev->flags);
4035 ql_alloc_rx_buffers(qdev);
4036 /* If the port is initialized and the
4037 * link is up the turn on the carrier.
4039 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4040 (ql_read32(qdev, STS) & qdev->port_link_up))
4042 /* Restore rx mode. */
4043 clear_bit(QL_ALLMULTI, &qdev->flags);
4044 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4045 qlge_set_multicast_list(qdev->ndev);
4047 /* Restore vlan setting. */
4048 qlge_restore_vlan(qdev);
4050 ql_enable_interrupts(qdev);
4051 ql_enable_all_completion_interrupts(qdev);
4052 netif_tx_start_all_queues(qdev->ndev);
4056 ql_adapter_reset(qdev);
4060 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4062 ql_free_mem_resources(qdev);
4066 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4070 if (ql_alloc_mem_resources(qdev)) {
4071 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4074 status = ql_request_irq(qdev);
4078 static int qlge_close(struct net_device *ndev)
4080 struct ql_adapter *qdev = netdev_priv(ndev);
4082 /* If we hit pci_channel_io_perm_failure
4083 * failure condition, then we already
4084 * brought the adapter down.
4086 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4087 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4088 clear_bit(QL_EEH_FATAL, &qdev->flags);
4093 * Wait for device to recover from a reset.
4094 * (Rarely happens, but possible.)
4096 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4098 ql_adapter_down(qdev);
4099 ql_release_adapter_resources(qdev);
4103 static int ql_configure_rings(struct ql_adapter *qdev)
4106 struct rx_ring *rx_ring;
4107 struct tx_ring *tx_ring;
4108 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4109 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4110 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4112 qdev->lbq_buf_order = get_order(lbq_buf_len);
4114 /* In a perfect world we have one RSS ring for each CPU
4115 * and each has it's own vector. To do that we ask for
4116 * cpu_cnt vectors. ql_enable_msix() will adjust the
4117 * vector count to what we actually get. We then
4118 * allocate an RSS ring for each.
4119 * Essentially, we are doing min(cpu_count, msix_vector_count).
4121 qdev->intr_count = cpu_cnt;
4122 ql_enable_msix(qdev);
4123 /* Adjust the RSS ring count to the actual vector count. */
4124 qdev->rss_ring_count = qdev->intr_count;
4125 qdev->tx_ring_count = cpu_cnt;
4126 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4128 for (i = 0; i < qdev->tx_ring_count; i++) {
4129 tx_ring = &qdev->tx_ring[i];
4130 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4131 tx_ring->qdev = qdev;
4133 tx_ring->wq_len = qdev->tx_ring_size;
4135 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4138 * The completion queue ID for the tx rings start
4139 * immediately after the rss rings.
4141 tx_ring->cq_id = qdev->rss_ring_count + i;
4144 for (i = 0; i < qdev->rx_ring_count; i++) {
4145 rx_ring = &qdev->rx_ring[i];
4146 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4147 rx_ring->qdev = qdev;
4149 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4150 if (i < qdev->rss_ring_count) {
4152 * Inbound (RSS) queues.
4154 rx_ring->cq_len = qdev->rx_ring_size;
4156 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4157 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4159 rx_ring->lbq_len * sizeof(__le64);
4160 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4161 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4163 rx_ring->sbq_len * sizeof(__le64);
4164 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4165 rx_ring->type = RX_Q;
4168 * Outbound queue handles outbound completions only.
4170 /* outbound cq is same size as tx_ring it services. */
4171 rx_ring->cq_len = qdev->tx_ring_size;
4173 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4174 rx_ring->lbq_len = 0;
4175 rx_ring->lbq_size = 0;
4176 rx_ring->lbq_buf_size = 0;
4177 rx_ring->sbq_len = 0;
4178 rx_ring->sbq_size = 0;
4179 rx_ring->sbq_buf_size = 0;
4180 rx_ring->type = TX_Q;
4186 static int qlge_open(struct net_device *ndev)
4189 struct ql_adapter *qdev = netdev_priv(ndev);
4191 err = ql_adapter_reset(qdev);
4195 err = ql_configure_rings(qdev);
4199 err = ql_get_adapter_resources(qdev);
4203 err = ql_adapter_up(qdev);
4210 ql_release_adapter_resources(qdev);
4214 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4216 struct rx_ring *rx_ring;
4220 /* Wait for an outstanding reset to complete. */
4221 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4224 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4225 netif_err(qdev, ifup, qdev->ndev,
4226 "Waiting for adapter UP...\n");
4231 netif_err(qdev, ifup, qdev->ndev,
4232 "Timed out waiting for adapter UP\n");
4237 status = ql_adapter_down(qdev);
4241 /* Get the new rx buffer size. */
4242 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4243 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4244 qdev->lbq_buf_order = get_order(lbq_buf_len);
4246 for (i = 0; i < qdev->rss_ring_count; i++) {
4247 rx_ring = &qdev->rx_ring[i];
4248 /* Set the new size. */
4249 rx_ring->lbq_buf_size = lbq_buf_len;
4252 status = ql_adapter_up(qdev);
4258 netif_alert(qdev, ifup, qdev->ndev,
4259 "Driver up/down cycle failed, closing device.\n");
4260 set_bit(QL_ADAPTER_UP, &qdev->flags);
4261 dev_close(qdev->ndev);
4265 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4267 struct ql_adapter *qdev = netdev_priv(ndev);
4270 if (ndev->mtu == 1500 && new_mtu == 9000) {
4271 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4272 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4273 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4277 queue_delayed_work(qdev->workqueue,
4278 &qdev->mpi_port_cfg_work, 3*HZ);
4280 ndev->mtu = new_mtu;
4282 if (!netif_running(qdev->ndev)) {
4286 status = ql_change_rx_buffers(qdev);
4288 netif_err(qdev, ifup, qdev->ndev,
4289 "Changing MTU failed.\n");
4295 static struct net_device_stats *qlge_get_stats(struct net_device
4298 struct ql_adapter *qdev = netdev_priv(ndev);
4299 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4300 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4301 unsigned long pkts, mcast, dropped, errors, bytes;
4305 pkts = mcast = dropped = errors = bytes = 0;
4306 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4307 pkts += rx_ring->rx_packets;
4308 bytes += rx_ring->rx_bytes;
4309 dropped += rx_ring->rx_dropped;
4310 errors += rx_ring->rx_errors;
4311 mcast += rx_ring->rx_multicast;
4313 ndev->stats.rx_packets = pkts;
4314 ndev->stats.rx_bytes = bytes;
4315 ndev->stats.rx_dropped = dropped;
4316 ndev->stats.rx_errors = errors;
4317 ndev->stats.multicast = mcast;
4320 pkts = errors = bytes = 0;
4321 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4322 pkts += tx_ring->tx_packets;
4323 bytes += tx_ring->tx_bytes;
4324 errors += tx_ring->tx_errors;
4326 ndev->stats.tx_packets = pkts;
4327 ndev->stats.tx_bytes = bytes;
4328 ndev->stats.tx_errors = errors;
4329 return &ndev->stats;
4332 static void qlge_set_multicast_list(struct net_device *ndev)
4334 struct ql_adapter *qdev = netdev_priv(ndev);
4335 struct netdev_hw_addr *ha;
4338 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4342 * Set or clear promiscuous mode if a
4343 * transition is taking place.
4345 if (ndev->flags & IFF_PROMISC) {
4346 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4347 if (ql_set_routing_reg
4348 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4349 netif_err(qdev, hw, qdev->ndev,
4350 "Failed to set promiscuous mode.\n");
4352 set_bit(QL_PROMISCUOUS, &qdev->flags);
4356 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4357 if (ql_set_routing_reg
4358 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4359 netif_err(qdev, hw, qdev->ndev,
4360 "Failed to clear promiscuous mode.\n");
4362 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4368 * Set or clear all multicast mode if a
4369 * transition is taking place.
4371 if ((ndev->flags & IFF_ALLMULTI) ||
4372 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4373 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4374 if (ql_set_routing_reg
4375 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4376 netif_err(qdev, hw, qdev->ndev,
4377 "Failed to set all-multi mode.\n");
4379 set_bit(QL_ALLMULTI, &qdev->flags);
4383 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4384 if (ql_set_routing_reg
4385 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4386 netif_err(qdev, hw, qdev->ndev,
4387 "Failed to clear all-multi mode.\n");
4389 clear_bit(QL_ALLMULTI, &qdev->flags);
4394 if (!netdev_mc_empty(ndev)) {
4395 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4399 netdev_for_each_mc_addr(ha, ndev) {
4400 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4401 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4402 netif_err(qdev, hw, qdev->ndev,
4403 "Failed to loadmulticast address.\n");
4404 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4409 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4410 if (ql_set_routing_reg
4411 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4412 netif_err(qdev, hw, qdev->ndev,
4413 "Failed to set multicast match mode.\n");
4415 set_bit(QL_ALLMULTI, &qdev->flags);
4419 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4422 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4424 struct ql_adapter *qdev = netdev_priv(ndev);
4425 struct sockaddr *addr = p;
4428 if (!is_valid_ether_addr(addr->sa_data))
4429 return -EADDRNOTAVAIL;
4430 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4431 /* Update local copy of current mac address. */
4432 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4434 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4437 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4438 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4440 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4441 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4445 static void qlge_tx_timeout(struct net_device *ndev)
4447 struct ql_adapter *qdev = netdev_priv(ndev);
4448 ql_queue_asic_error(qdev);
4451 static void ql_asic_reset_work(struct work_struct *work)
4453 struct ql_adapter *qdev =
4454 container_of(work, struct ql_adapter, asic_reset_work.work);
4457 status = ql_adapter_down(qdev);
4461 status = ql_adapter_up(qdev);
4465 /* Restore rx mode. */
4466 clear_bit(QL_ALLMULTI, &qdev->flags);
4467 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4468 qlge_set_multicast_list(qdev->ndev);
4473 netif_alert(qdev, ifup, qdev->ndev,
4474 "Driver up/down cycle failed, closing device\n");
4476 set_bit(QL_ADAPTER_UP, &qdev->flags);
4477 dev_close(qdev->ndev);
4481 static const struct nic_operations qla8012_nic_ops = {
4482 .get_flash = ql_get_8012_flash_params,
4483 .port_initialize = ql_8012_port_initialize,
4486 static const struct nic_operations qla8000_nic_ops = {
4487 .get_flash = ql_get_8000_flash_params,
4488 .port_initialize = ql_8000_port_initialize,
4491 /* Find the pcie function number for the other NIC
4492 * on this chip. Since both NIC functions share a
4493 * common firmware we have the lowest enabled function
4494 * do any common work. Examples would be resetting
4495 * after a fatal firmware error, or doing a firmware
4498 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4502 u32 nic_func1, nic_func2;
4504 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4509 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4510 MPI_TEST_NIC_FUNC_MASK);
4511 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4512 MPI_TEST_NIC_FUNC_MASK);
4514 if (qdev->func == nic_func1)
4515 qdev->alt_func = nic_func2;
4516 else if (qdev->func == nic_func2)
4517 qdev->alt_func = nic_func1;
4524 static int ql_get_board_info(struct ql_adapter *qdev)
4528 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4532 status = ql_get_alt_pcie_func(qdev);
4536 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4538 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4539 qdev->port_link_up = STS_PL1;
4540 qdev->port_init = STS_PI1;
4541 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4542 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4544 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4545 qdev->port_link_up = STS_PL0;
4546 qdev->port_init = STS_PI0;
4547 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4548 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4550 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4551 qdev->device_id = qdev->pdev->device;
4552 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4553 qdev->nic_ops = &qla8012_nic_ops;
4554 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4555 qdev->nic_ops = &qla8000_nic_ops;
4559 static void ql_release_all(struct pci_dev *pdev)
4561 struct net_device *ndev = pci_get_drvdata(pdev);
4562 struct ql_adapter *qdev = netdev_priv(ndev);
4564 if (qdev->workqueue) {
4565 destroy_workqueue(qdev->workqueue);
4566 qdev->workqueue = NULL;
4570 iounmap(qdev->reg_base);
4571 if (qdev->doorbell_area)
4572 iounmap(qdev->doorbell_area);
4573 vfree(qdev->mpi_coredump);
4574 pci_release_regions(pdev);
4577 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4580 struct ql_adapter *qdev = netdev_priv(ndev);
4583 memset((void *)qdev, 0, sizeof(*qdev));
4584 err = pci_enable_device(pdev);
4586 dev_err(&pdev->dev, "PCI device enable failed.\n");
4592 pci_set_drvdata(pdev, ndev);
4594 /* Set PCIe read request size */
4595 err = pcie_set_readrq(pdev, 4096);
4597 dev_err(&pdev->dev, "Set readrq failed.\n");
4601 err = pci_request_regions(pdev, DRV_NAME);
4603 dev_err(&pdev->dev, "PCI region request failed.\n");
4607 pci_set_master(pdev);
4608 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4609 set_bit(QL_DMA64, &qdev->flags);
4610 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4612 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4614 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4618 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4622 /* Set PCIe reset type for EEH to fundamental. */
4623 pdev->needs_freset = 1;
4624 pci_save_state(pdev);
4626 ioremap_nocache(pci_resource_start(pdev, 1),
4627 pci_resource_len(pdev, 1));
4628 if (!qdev->reg_base) {
4629 dev_err(&pdev->dev, "Register mapping failed.\n");
4634 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4635 qdev->doorbell_area =
4636 ioremap_nocache(pci_resource_start(pdev, 3),
4637 pci_resource_len(pdev, 3));
4638 if (!qdev->doorbell_area) {
4639 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4644 err = ql_get_board_info(qdev);
4646 dev_err(&pdev->dev, "Register access failed.\n");
4650 qdev->msg_enable = netif_msg_init(debug, default_msg);
4651 spin_lock_init(&qdev->hw_lock);
4652 spin_lock_init(&qdev->stats_lock);
4654 if (qlge_mpi_coredump) {
4655 qdev->mpi_coredump =
4656 vmalloc(sizeof(struct ql_mpi_coredump));
4657 if (qdev->mpi_coredump == NULL) {
4661 if (qlge_force_coredump)
4662 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4664 /* make sure the EEPROM is good */
4665 err = qdev->nic_ops->get_flash(qdev);
4667 dev_err(&pdev->dev, "Invalid FLASH.\n");
4671 /* Keep local copy of current mac address. */
4672 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4674 /* Set up the default ring sizes. */
4675 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4676 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4678 /* Set up the coalescing parameters. */
4679 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4680 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4681 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4682 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4685 * Set up the operating parameters.
4687 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4689 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4690 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4691 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4692 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4693 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4694 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4695 init_completion(&qdev->ide_completion);
4696 mutex_init(&qdev->mpi_mutex);
4699 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4700 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4701 DRV_NAME, DRV_VERSION);
4705 ql_release_all(pdev);
4707 pci_disable_device(pdev);
4711 static const struct net_device_ops qlge_netdev_ops = {
4712 .ndo_open = qlge_open,
4713 .ndo_stop = qlge_close,
4714 .ndo_start_xmit = qlge_send,
4715 .ndo_change_mtu = qlge_change_mtu,
4716 .ndo_get_stats = qlge_get_stats,
4717 .ndo_set_rx_mode = qlge_set_multicast_list,
4718 .ndo_set_mac_address = qlge_set_mac_address,
4719 .ndo_validate_addr = eth_validate_addr,
4720 .ndo_tx_timeout = qlge_tx_timeout,
4721 .ndo_fix_features = qlge_fix_features,
4722 .ndo_set_features = qlge_set_features,
4723 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4724 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4727 static void ql_timer(struct timer_list *t)
4729 struct ql_adapter *qdev = from_timer(qdev, t, timer);
4732 var = ql_read32(qdev, STS);
4733 if (pci_channel_offline(qdev->pdev)) {
4734 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4738 mod_timer(&qdev->timer, jiffies + (5*HZ));
4741 static int qlge_probe(struct pci_dev *pdev,
4742 const struct pci_device_id *pci_entry)
4744 struct net_device *ndev = NULL;
4745 struct ql_adapter *qdev = NULL;
4746 static int cards_found = 0;
4749 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4750 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4754 err = ql_init_device(pdev, ndev, cards_found);
4760 qdev = netdev_priv(ndev);
4761 SET_NETDEV_DEV(ndev, &pdev->dev);
4762 ndev->hw_features = NETIF_F_SG |
4766 NETIF_F_HW_VLAN_CTAG_TX |
4767 NETIF_F_HW_VLAN_CTAG_RX |
4768 NETIF_F_HW_VLAN_CTAG_FILTER |
4770 ndev->features = ndev->hw_features;
4771 ndev->vlan_features = ndev->hw_features;
4772 /* vlan gets same features (except vlan filter) */
4773 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4774 NETIF_F_HW_VLAN_CTAG_TX |
4775 NETIF_F_HW_VLAN_CTAG_RX);
4777 if (test_bit(QL_DMA64, &qdev->flags))
4778 ndev->features |= NETIF_F_HIGHDMA;
4781 * Set up net_device structure.
4783 ndev->tx_queue_len = qdev->tx_ring_size;
4784 ndev->irq = pdev->irq;
4786 ndev->netdev_ops = &qlge_netdev_ops;
4787 ndev->ethtool_ops = &qlge_ethtool_ops;
4788 ndev->watchdog_timeo = 10 * HZ;
4790 /* MTU range: this driver only supports 1500 or 9000, so this only
4791 * filters out values above or below, and we'll rely on
4792 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4794 ndev->min_mtu = ETH_DATA_LEN;
4795 ndev->max_mtu = 9000;
4797 err = register_netdev(ndev);
4799 dev_err(&pdev->dev, "net device registration failed.\n");
4800 ql_release_all(pdev);
4801 pci_disable_device(pdev);
4805 /* Start up the timer to trigger EEH if
4808 timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4809 mod_timer(&qdev->timer, jiffies + (5*HZ));
4811 ql_display_dev_info(ndev);
4812 atomic_set(&qdev->lb_count, 0);
4817 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4819 return qlge_send(skb, ndev);
4822 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4824 return ql_clean_inbound_rx_ring(rx_ring, budget);
4827 static void qlge_remove(struct pci_dev *pdev)
4829 struct net_device *ndev = pci_get_drvdata(pdev);
4830 struct ql_adapter *qdev = netdev_priv(ndev);
4831 del_timer_sync(&qdev->timer);
4832 ql_cancel_all_work_sync(qdev);
4833 unregister_netdev(ndev);
4834 ql_release_all(pdev);
4835 pci_disable_device(pdev);
4839 /* Clean up resources without touching hardware. */
4840 static void ql_eeh_close(struct net_device *ndev)
4843 struct ql_adapter *qdev = netdev_priv(ndev);
4845 if (netif_carrier_ok(ndev)) {
4846 netif_carrier_off(ndev);
4847 netif_stop_queue(ndev);
4850 /* Disabling the timer */
4851 ql_cancel_all_work_sync(qdev);
4853 for (i = 0; i < qdev->rss_ring_count; i++)
4854 netif_napi_del(&qdev->rx_ring[i].napi);
4856 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4857 ql_tx_ring_clean(qdev);
4858 ql_free_rx_buffers(qdev);
4859 ql_release_adapter_resources(qdev);
4863 * This callback is called by the PCI subsystem whenever
4864 * a PCI bus error is detected.
4866 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4867 enum pci_channel_state state)
4869 struct net_device *ndev = pci_get_drvdata(pdev);
4870 struct ql_adapter *qdev = netdev_priv(ndev);
4873 case pci_channel_io_normal:
4874 return PCI_ERS_RESULT_CAN_RECOVER;
4875 case pci_channel_io_frozen:
4876 netif_device_detach(ndev);
4877 del_timer_sync(&qdev->timer);
4878 if (netif_running(ndev))
4880 pci_disable_device(pdev);
4881 return PCI_ERS_RESULT_NEED_RESET;
4882 case pci_channel_io_perm_failure:
4884 "%s: pci_channel_io_perm_failure.\n", __func__);
4885 del_timer_sync(&qdev->timer);
4887 set_bit(QL_EEH_FATAL, &qdev->flags);
4888 return PCI_ERS_RESULT_DISCONNECT;
4891 /* Request a slot reset. */
4892 return PCI_ERS_RESULT_NEED_RESET;
4896 * This callback is called after the PCI buss has been reset.
4897 * Basically, this tries to restart the card from scratch.
4898 * This is a shortened version of the device probe/discovery code,
4899 * it resembles the first-half of the () routine.
4901 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4903 struct net_device *ndev = pci_get_drvdata(pdev);
4904 struct ql_adapter *qdev = netdev_priv(ndev);
4906 pdev->error_state = pci_channel_io_normal;
4908 pci_restore_state(pdev);
4909 if (pci_enable_device(pdev)) {
4910 netif_err(qdev, ifup, qdev->ndev,
4911 "Cannot re-enable PCI device after reset.\n");
4912 return PCI_ERS_RESULT_DISCONNECT;
4914 pci_set_master(pdev);
4916 if (ql_adapter_reset(qdev)) {
4917 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4918 set_bit(QL_EEH_FATAL, &qdev->flags);
4919 return PCI_ERS_RESULT_DISCONNECT;
4922 return PCI_ERS_RESULT_RECOVERED;
4925 static void qlge_io_resume(struct pci_dev *pdev)
4927 struct net_device *ndev = pci_get_drvdata(pdev);
4928 struct ql_adapter *qdev = netdev_priv(ndev);
4931 if (netif_running(ndev)) {
4932 err = qlge_open(ndev);
4934 netif_err(qdev, ifup, qdev->ndev,
4935 "Device initialization failed after reset.\n");
4939 netif_err(qdev, ifup, qdev->ndev,
4940 "Device was not running prior to EEH.\n");
4942 mod_timer(&qdev->timer, jiffies + (5*HZ));
4943 netif_device_attach(ndev);
4946 static const struct pci_error_handlers qlge_err_handler = {
4947 .error_detected = qlge_io_error_detected,
4948 .slot_reset = qlge_io_slot_reset,
4949 .resume = qlge_io_resume,
4952 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4954 struct net_device *ndev = pci_get_drvdata(pdev);
4955 struct ql_adapter *qdev = netdev_priv(ndev);
4958 netif_device_detach(ndev);
4959 del_timer_sync(&qdev->timer);
4961 if (netif_running(ndev)) {
4962 err = ql_adapter_down(qdev);
4968 err = pci_save_state(pdev);
4972 pci_disable_device(pdev);
4974 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4980 static int qlge_resume(struct pci_dev *pdev)
4982 struct net_device *ndev = pci_get_drvdata(pdev);
4983 struct ql_adapter *qdev = netdev_priv(ndev);
4986 pci_set_power_state(pdev, PCI_D0);
4987 pci_restore_state(pdev);
4988 err = pci_enable_device(pdev);
4990 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4993 pci_set_master(pdev);
4995 pci_enable_wake(pdev, PCI_D3hot, 0);
4996 pci_enable_wake(pdev, PCI_D3cold, 0);
4998 if (netif_running(ndev)) {
4999 err = ql_adapter_up(qdev);
5004 mod_timer(&qdev->timer, jiffies + (5*HZ));
5005 netif_device_attach(ndev);
5009 #endif /* CONFIG_PM */
5011 static void qlge_shutdown(struct pci_dev *pdev)
5013 qlge_suspend(pdev, PMSG_SUSPEND);
5016 static struct pci_driver qlge_driver = {
5018 .id_table = qlge_pci_tbl,
5019 .probe = qlge_probe,
5020 .remove = qlge_remove,
5022 .suspend = qlge_suspend,
5023 .resume = qlge_resume,
5025 .shutdown = qlge_shutdown,
5026 .err_handler = &qlge_err_handler
5029 module_pci_driver(qlge_driver);