1 // SPDX-License-Identifier: GPL-2.0
2 /* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
4 * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle
5 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
8 * o IOC3 ASIC specification 4.51, 1996-04-18
9 * o IEEE 802.3 specification, 2000 edition
10 * o DP38840A Specification, National Semiconductor, March 1997
14 * o Use prefetching for large packets. What is a good lower limit for
16 * o Use hardware checksums.
17 * o Convert to using a IOC3 meta driver.
18 * o Which PHYs might possibly be attached to the IOC3 in real live,
19 * which workarounds are required for them? Do we ever have Lucent's?
20 * o For the 2.5 branch kill the mii-tool ioctls.
23 #define IOC3_NAME "ioc3-eth"
24 #define IOC3_VERSION "2.6.3-4"
26 #include <linux/delay.h>
27 #include <linux/kernel.h>
29 #include <linux/errno.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/crc32.h>
33 #include <linux/mii.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/gfp.h>
41 #ifdef CONFIG_SERIAL_8250
42 #include <linux/serial_core.h>
43 #include <linux/serial_8250.h>
44 #include <linux/serial_reg.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/ethtool.h>
50 #include <linux/skbuff.h>
51 #include <linux/dma-mapping.h>
55 #include <asm/byteorder.h>
56 #include <asm/pgtable.h>
57 #include <linux/uaccess.h>
58 #include <asm/sn/types.h>
59 #include <asm/sn/ioc3.h>
60 #include <asm/pci/bridge.h>
62 /* Number of RX buffers. This is tunable in the range of 16 <= x < 512.
63 * The value must be a power of two.
66 #define RX_RING_ENTRIES 512 /* fixed in hardware */
67 #define RX_RING_MASK (RX_RING_ENTRIES - 1)
68 #define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64))
70 /* 128 TX buffers (not tunable) */
71 #define TX_RING_ENTRIES 128
72 #define TX_RING_MASK (TX_RING_ENTRIES - 1)
73 #define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd))
75 /* IOC3 does dma transfers in 128 byte blocks */
76 #define IOC3_DMA_XFER_LEN 128UL
78 /* Every RX buffer starts with 8 byte descriptor data */
79 #define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN)
80 #define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN)
82 #define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21)
83 #define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21)
85 /* Private per NIC data of the driver. */
87 struct ioc3_ethregs *regs;
88 struct ioc3 *all_regs;
89 struct device *dma_dev;
91 unsigned long *rxr; /* pointer to receiver ring */
93 struct ioc3_etxd *txr;
96 struct sk_buff *rx_skbs[RX_RING_ENTRIES];
97 struct sk_buff *tx_skbs[TX_RING_ENTRIES];
98 int rx_ci; /* RX consumer index */
99 int rx_pi; /* RX producer index */
100 int tx_ci; /* TX consumer index */
101 int tx_pi; /* TX producer index */
103 u32 emcr, ehar_h, ehar_l;
104 spinlock_t ioc3_lock;
105 struct mii_if_info mii;
107 struct net_device *dev;
108 struct pci_dev *pdev;
110 /* Members used by autonegotiation */
111 struct timer_list ioc3_timer;
114 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
115 static void ioc3_set_multicast_list(struct net_device *dev);
116 static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
117 static void ioc3_timeout(struct net_device *dev);
118 static inline unsigned int ioc3_hash(const unsigned char *addr);
119 static void ioc3_start(struct ioc3_private *ip);
120 static inline void ioc3_stop(struct ioc3_private *ip);
121 static void ioc3_init(struct net_device *dev);
122 static int ioc3_alloc_rx_bufs(struct net_device *dev);
123 static void ioc3_free_rx_bufs(struct ioc3_private *ip);
124 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip);
126 static const char ioc3_str[] = "IOC3 Ethernet";
127 static const struct ethtool_ops ioc3_ethtool_ops;
130 static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
132 return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL);
135 static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb,
136 struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma)
138 struct sk_buff *new_skb;
142 new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC);
146 /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */
147 offset = aligned_rx_skb_addr((unsigned long)new_skb->data);
149 skb_reserve(new_skb, offset);
151 d = dma_map_single(ip->dma_dev, new_skb->data,
152 RX_BUF_SIZE, DMA_FROM_DEVICE);
154 if (dma_mapping_error(ip->dma_dev, d)) {
155 dev_kfree_skb_any(new_skb);
159 *rxb = (struct ioc3_erxbuf *)new_skb->data;
160 skb_reserve(new_skb, RX_OFFSET);
166 #ifdef CONFIG_PCI_XTALK_BRIDGE
167 static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
169 return (addr & ~PCI64_ATTR_BAR) | attr;
172 #define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT)
174 static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
182 #define IOC3_SIZE 0x100000
184 static inline u32 mcr_pack(u32 pulse, u32 sample)
186 return (pulse << 10) | (sample << 2);
189 static int nic_wait(u32 __iomem *mcr)
200 static int nic_reset(u32 __iomem *mcr)
204 writel(mcr_pack(500, 65), mcr);
205 presence = nic_wait(mcr);
207 writel(mcr_pack(0, 500), mcr);
213 static inline int nic_read_bit(u32 __iomem *mcr)
217 writel(mcr_pack(6, 13), mcr);
218 result = nic_wait(mcr);
219 writel(mcr_pack(0, 100), mcr);
225 static inline void nic_write_bit(u32 __iomem *mcr, int bit)
228 writel(mcr_pack(6, 110), mcr);
230 writel(mcr_pack(80, 30), mcr);
235 /* Read a byte from an iButton device
237 static u32 nic_read_byte(u32 __iomem *mcr)
242 for (i = 0; i < 8; i++)
243 result = (result >> 1) | (nic_read_bit(mcr) << 7);
248 /* Write a byte to an iButton device
250 static void nic_write_byte(u32 __iomem *mcr, int byte)
254 for (i = 8; i; i--) {
258 nic_write_bit(mcr, bit);
262 static u64 nic_find(u32 __iomem *mcr, int *last)
264 int a, b, index, disc;
269 nic_write_byte(mcr, 0xf0);
271 /* Algorithm from ``Book of iButton Standards''. */
272 for (index = 0, disc = 0; index < 64; index++) {
273 a = nic_read_bit(mcr);
274 b = nic_read_bit(mcr);
277 pr_warn("NIC search failed (not fatal).\n");
283 if (index == *last) {
284 address |= 1UL << index;
285 } else if (index > *last) {
286 address &= ~(1UL << index);
288 } else if ((address & (1UL << index)) == 0) {
291 nic_write_bit(mcr, address & (1UL << index));
295 address |= 1UL << index;
297 address &= ~(1UL << index);
298 nic_write_bit(mcr, a);
308 static int nic_init(u32 __iomem *mcr)
310 const char *unknown = "unknown";
311 const char *type = unknown;
319 reg = nic_find(mcr, &save);
321 switch (reg & 0xff) {
327 /* Let the caller try again. */
336 nic_write_byte(mcr, 0x55);
337 for (i = 0; i < 8; i++)
338 nic_write_byte(mcr, (reg >> (i << 3)) & 0xff);
340 reg >>= 8; /* Shift out type. */
341 for (i = 0; i < 6; i++) {
342 serial[i] = reg & 0xff;
349 pr_info("Found %s NIC", type);
351 pr_cont(" registration number %pM, CRC %02x", serial, crc);
357 /* Read the NIC (Number-In-a-Can) device used to store the MAC address on
358 * SN0 / SN00 nodeboards and PCI cards.
360 static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
362 u32 __iomem *mcr = &ip->all_regs->mcr;
363 int tries = 2; /* There may be some problem with the battery? */
367 writel(1 << 21, &ip->all_regs->gpcr_s);
376 pr_err("Failed to read MAC address\n");
381 nic_write_byte(mcr, 0xf0);
382 nic_write_byte(mcr, 0x00);
383 nic_write_byte(mcr, 0x00);
385 for (i = 13; i >= 0; i--)
386 nic[i] = nic_read_byte(mcr);
388 for (i = 2; i < 8; i++)
389 ip->dev->dev_addr[i - 2] = nic[i];
392 /* Ok, this is hosed by design. It's necessary to know what machine the
393 * NIC is in in order to know how to read the NIC address. We also have
394 * to know if it's a PCI card or a NIC in on the node board ...
396 static void ioc3_get_eaddr(struct ioc3_private *ip)
398 ioc3_get_eaddr_nic(ip);
400 pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr);
403 static void __ioc3_set_mac_address(struct net_device *dev)
405 struct ioc3_private *ip = netdev_priv(dev);
407 writel((dev->dev_addr[5] << 8) |
410 writel((dev->dev_addr[3] << 24) |
411 (dev->dev_addr[2] << 16) |
412 (dev->dev_addr[1] << 8) |
417 static int ioc3_set_mac_address(struct net_device *dev, void *addr)
419 struct ioc3_private *ip = netdev_priv(dev);
420 struct sockaddr *sa = addr;
422 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
424 spin_lock_irq(&ip->ioc3_lock);
425 __ioc3_set_mac_address(dev);
426 spin_unlock_irq(&ip->ioc3_lock);
431 /* Caller must hold the ioc3_lock ever for MII readers. This is also
432 * used to protect the transmitter side but it's low contention.
434 static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
436 struct ioc3_private *ip = netdev_priv(dev);
437 struct ioc3_ethregs *regs = ip->regs;
439 while (readl(®s->micr) & MICR_BUSY)
441 writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG,
443 while (readl(®s->micr) & MICR_BUSY)
446 return readl(®s->midr_r) & MIDR_DATA_MASK;
449 static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
451 struct ioc3_private *ip = netdev_priv(dev);
452 struct ioc3_ethregs *regs = ip->regs;
454 while (readl(®s->micr) & MICR_BUSY)
456 writel(data, ®s->midr_w);
457 writel((phy << MICR_PHYADDR_SHIFT) | reg, ®s->micr);
458 while (readl(®s->micr) & MICR_BUSY)
462 static int ioc3_mii_init(struct ioc3_private *ip);
464 static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
466 struct ioc3_private *ip = netdev_priv(dev);
467 struct ioc3_ethregs *regs = ip->regs;
469 dev->stats.collisions += readl(®s->etcdc) & ETCDC_COLLCNT_MASK;
473 static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len)
475 struct ethhdr *eh = eth_hdr(skb);
482 /* Did hardware handle the checksum at all? The cases we can handle
485 * - TCP and UDP checksums of IPv4 only.
486 * - IPv6 would be doable but we keep that for later ...
487 * - Only unfragmented packets. Did somebody already tell you
488 * fragmentation is evil?
489 * - don't care about packet size. Worst case when processing a
490 * malformed packet we'll try to access the packet at ip header +
491 * 64 bytes which is still inside the skb. Even in the unlikely
492 * case where the checksum is right the higher layers will still
493 * drop the packet as appropriate.
495 if (eh->h_proto != htons(ETH_P_IP))
498 ih = (struct iphdr *)((char *)eh + ETH_HLEN);
499 if (ip_is_fragment(ih))
502 proto = ih->protocol;
503 if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
506 /* Same as tx - compute csum of pseudo header */
508 (ih->tot_len - (ih->ihl << 2)) +
509 htons((u16)ih->protocol) +
510 (ih->saddr >> 16) + (ih->saddr & 0xffff) +
511 (ih->daddr >> 16) + (ih->daddr & 0xffff);
513 /* Sum up ethernet dest addr, src addr and protocol */
515 ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
517 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
518 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
520 csum += 0xffff ^ ehsum;
522 /* In the next step we also subtract the 1's complement
523 * checksum of the trailing ethernet CRC.
525 cp = (char *)eh + len; /* points at trailing CRC */
527 csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]);
528 csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]);
530 csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]);
531 csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]);
534 csum = (csum & 0xffff) + (csum >> 16);
535 csum = (csum & 0xffff) + (csum >> 16);
538 skb->ip_summed = CHECKSUM_UNNECESSARY;
541 static inline void ioc3_rx(struct net_device *dev)
543 struct ioc3_private *ip = netdev_priv(dev);
544 struct sk_buff *skb, *new_skb;
545 int rx_entry, n_entry, len;
546 struct ioc3_erxbuf *rxb;
551 rxr = ip->rxr; /* Ring base */
552 rx_entry = ip->rx_ci; /* RX consume index */
555 skb = ip->rx_skbs[rx_entry];
556 rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
557 w0 = be32_to_cpu(rxb->w0);
559 while (w0 & ERXBUF_V) {
560 err = be32_to_cpu(rxb->err); /* It's valid ... */
561 if (err & ERXBUF_GOODPKT) {
562 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
564 skb->protocol = eth_type_trans(skb, dev);
566 if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) {
567 /* Ouch, drop packet and just recycle packet
568 * to keep the ring filled.
570 dev->stats.rx_dropped++;
576 if (likely(dev->features & NETIF_F_RXCSUM))
577 ioc3_tcpudp_checksum(skb,
578 w0 & ERXBUF_IPCKSUM_MASK,
581 dma_unmap_single(ip->dma_dev, rxr[rx_entry],
582 RX_BUF_SIZE, DMA_FROM_DEVICE);
586 ip->rx_skbs[rx_entry] = NULL; /* Poison */
588 dev->stats.rx_packets++; /* Statistics */
589 dev->stats.rx_bytes += len;
591 /* The frame is invalid and the skb never
592 * reached the network layer so we can just
597 dev->stats.rx_errors++;
599 if (err & ERXBUF_CRCERR) /* Statistics */
600 dev->stats.rx_crc_errors++;
601 if (err & ERXBUF_FRAMERR)
602 dev->stats.rx_frame_errors++;
605 ip->rx_skbs[n_entry] = new_skb;
606 rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
607 rxb->w0 = 0; /* Clear valid flag */
608 n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */
610 /* Now go on to the next ring entry. */
611 rx_entry = (rx_entry + 1) & RX_RING_MASK;
612 skb = ip->rx_skbs[rx_entry];
613 rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
614 w0 = be32_to_cpu(rxb->w0);
616 writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir);
618 ip->rx_ci = rx_entry;
621 static inline void ioc3_tx(struct net_device *dev)
623 struct ioc3_private *ip = netdev_priv(dev);
624 struct ioc3_ethregs *regs = ip->regs;
625 unsigned long packets, bytes;
626 int tx_entry, o_entry;
630 spin_lock(&ip->ioc3_lock);
631 etcir = readl(®s->etcir);
633 tx_entry = (etcir >> 7) & TX_RING_MASK;
638 while (o_entry != tx_entry) {
640 skb = ip->tx_skbs[o_entry];
642 dev_consume_skb_irq(skb);
643 ip->tx_skbs[o_entry] = NULL;
645 o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */
647 etcir = readl(®s->etcir); /* More pkts sent? */
648 tx_entry = (etcir >> 7) & TX_RING_MASK;
651 dev->stats.tx_packets += packets;
652 dev->stats.tx_bytes += bytes;
653 ip->txqlen -= packets;
655 if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES)
656 netif_wake_queue(dev);
659 spin_unlock(&ip->ioc3_lock);
662 /* Deal with fatal IOC3 errors. This condition might be caused by a hard or
663 * software problems, so we should try to recover
664 * more gracefully if this ever happens. In theory we might be flooded
665 * with such error interrupts if something really goes wrong, so we might
666 * also consider to take the interface down.
668 static void ioc3_error(struct net_device *dev, u32 eisr)
670 struct ioc3_private *ip = netdev_priv(dev);
672 spin_lock(&ip->ioc3_lock);
674 if (eisr & EISR_RXOFLO)
675 net_err_ratelimited("%s: RX overflow.\n", dev->name);
676 if (eisr & EISR_RXBUFOFLO)
677 net_err_ratelimited("%s: RX buffer overflow.\n", dev->name);
678 if (eisr & EISR_RXMEMERR)
679 net_err_ratelimited("%s: RX PCI error.\n", dev->name);
680 if (eisr & EISR_RXPARERR)
681 net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name);
682 if (eisr & EISR_TXBUFUFLO)
683 net_err_ratelimited("%s: TX buffer underflow.\n", dev->name);
684 if (eisr & EISR_TXMEMERR)
685 net_err_ratelimited("%s: TX PCI error.\n", dev->name);
688 ioc3_free_rx_bufs(ip);
689 ioc3_clean_tx_ring(ip);
692 if (ioc3_alloc_rx_bufs(dev)) {
693 netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
694 spin_unlock(&ip->ioc3_lock);
700 netif_wake_queue(dev);
702 spin_unlock(&ip->ioc3_lock);
705 /* The interrupt handler does all of the Rx thread work and cleans up
706 * after the Tx thread.
708 static irqreturn_t ioc3_interrupt(int irq, void *dev_id)
710 struct ioc3_private *ip = netdev_priv(dev_id);
711 struct ioc3_ethregs *regs = ip->regs;
714 eisr = readl(®s->eisr);
715 writel(eisr, ®s->eisr);
716 readl(®s->eisr); /* Flush */
718 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
719 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
720 ioc3_error(dev_id, eisr);
721 if (eisr & EISR_RXTIMERINT)
723 if (eisr & EISR_TXEXPLICIT)
729 static inline void ioc3_setup_duplex(struct ioc3_private *ip)
731 struct ioc3_ethregs *regs = ip->regs;
733 spin_lock_irq(&ip->ioc3_lock);
735 if (ip->mii.full_duplex) {
736 writel(ETCSR_FD, ®s->etcsr);
737 ip->emcr |= EMCR_DUPLEX;
739 writel(ETCSR_HD, ®s->etcsr);
740 ip->emcr &= ~EMCR_DUPLEX;
742 writel(ip->emcr, ®s->emcr);
744 spin_unlock_irq(&ip->ioc3_lock);
747 static void ioc3_timer(struct timer_list *t)
749 struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
751 /* Print the link status if it has changed */
752 mii_check_media(&ip->mii, 1, 0);
753 ioc3_setup_duplex(ip);
755 ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */
756 add_timer(&ip->ioc3_timer);
759 /* Try to find a PHY. There is no apparent relation between the MII addresses
760 * in the SGI documentation and what we find in reality, so we simply probe
761 * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
762 * onboard IOC3s has the special oddity that probing doesn't seem to find it
763 * yet the interface seems to work fine, so if probing fails we for now will
764 * simply default to PHY 31 instead of bailing out.
766 static int ioc3_mii_init(struct ioc3_private *ip)
768 int ioc3_phy_workaround = 1;
769 int i, found = 0, res = 0;
772 for (i = 0; i < 32; i++) {
773 word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
775 if (word != 0xffff && word != 0x0000) {
777 break; /* Found a PHY */
782 if (ioc3_phy_workaround) {
797 static void ioc3_mii_start(struct ioc3_private *ip)
799 ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */
800 add_timer(&ip->ioc3_timer);
803 static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry)
805 struct ioc3_etxd *desc;
806 u32 cmd, bufcnt, len;
808 desc = &ip->txr[entry];
809 cmd = be32_to_cpu(desc->cmd);
810 bufcnt = be32_to_cpu(desc->bufcnt);
811 if (cmd & ETXD_B1V) {
812 len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT;
813 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1),
816 if (cmd & ETXD_B2V) {
817 len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT;
818 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2),
823 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
828 for (i = 0; i < TX_RING_ENTRIES; i++) {
829 skb = ip->tx_skbs[i];
831 ioc3_tx_unmap(ip, i);
832 ip->tx_skbs[i] = NULL;
833 dev_kfree_skb_any(skb);
841 static void ioc3_free_rx_bufs(struct ioc3_private *ip)
843 int rx_entry, n_entry;
847 rx_entry = ip->rx_pi;
849 while (n_entry != rx_entry) {
850 skb = ip->rx_skbs[n_entry];
852 dma_unmap_single(ip->dma_dev,
853 be64_to_cpu(ip->rxr[n_entry]),
854 RX_BUF_SIZE, DMA_FROM_DEVICE);
855 dev_kfree_skb_any(skb);
857 n_entry = (n_entry + 1) & RX_RING_MASK;
861 static int ioc3_alloc_rx_bufs(struct net_device *dev)
863 struct ioc3_private *ip = netdev_priv(dev);
864 struct ioc3_erxbuf *rxb;
868 /* Now the rx buffers. The RX ring may be larger but
869 * we only allocate 16 buffers for now. Need to tune
870 * this for performance and memory later.
872 for (i = 0; i < RX_BUFFS; i++) {
873 if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d))
876 rxb->w0 = 0; /* Clear valid flag */
877 ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
880 ip->rx_pi = RX_BUFFS;
885 static inline void ioc3_ssram_disc(struct ioc3_private *ip)
887 struct ioc3_ethregs *regs = ip->regs;
888 u32 *ssram0 = &ip->ssram[0x0000];
889 u32 *ssram1 = &ip->ssram[0x4000];
890 u32 pattern = 0x5555;
892 /* Assume the larger size SSRAM and enable parity checking */
893 writel(readl(®s->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), ®s->emcr);
894 readl(®s->emcr); /* Flush */
896 writel(pattern, ssram0);
897 writel(~pattern & IOC3_SSRAM_DM, ssram1);
899 if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern ||
900 (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
901 /* set ssram size to 64 KB */
902 ip->emcr |= EMCR_RAMPAR;
903 writel(readl(®s->emcr) & ~EMCR_BUFSIZ, ®s->emcr);
905 ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR;
909 static void ioc3_init(struct net_device *dev)
911 struct ioc3_private *ip = netdev_priv(dev);
912 struct ioc3_ethregs *regs = ip->regs;
914 del_timer_sync(&ip->ioc3_timer); /* Kill if running */
916 writel(EMCR_RST, ®s->emcr); /* Reset */
917 readl(®s->emcr); /* Flush WB */
918 udelay(4); /* Give it time ... */
919 writel(0, ®s->emcr);
923 writel(ERBAR_VAL, ®s->erbar);
924 readl(®s->etcdc); /* Clear on read */
925 writel(15, ®s->ercsr); /* RX low watermark */
926 writel(0, ®s->ertr); /* Interrupt immediately */
927 __ioc3_set_mac_address(dev);
928 writel(ip->ehar_h, ®s->ehar_h);
929 writel(ip->ehar_l, ®s->ehar_l);
930 writel(42, ®s->ersr); /* XXX should be random */
933 static void ioc3_start(struct ioc3_private *ip)
935 struct ioc3_ethregs *regs = ip->regs;
938 /* Now the rx ring base, consume & produce registers. */
939 ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC);
940 writel(ring >> 32, ®s->erbr_h);
941 writel(ring & 0xffffffff, ®s->erbr_l);
942 writel(ip->rx_ci << 3, ®s->ercir);
943 writel((ip->rx_pi << 3) | ERPIR_ARM, ®s->erpir);
945 ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC);
947 ip->txqlen = 0; /* nothing queued */
949 /* Now the tx ring base, consume & produce registers. */
950 writel(ring >> 32, ®s->etbr_h);
951 writel(ring & 0xffffffff, ®s->etbr_l);
952 writel(ip->tx_pi << 7, ®s->etpir);
953 writel(ip->tx_ci << 7, ®s->etcir);
954 readl(®s->etcir); /* Flush */
956 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
957 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
958 writel(ip->emcr, ®s->emcr);
959 writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
960 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
961 EISR_TXEXPLICIT | EISR_TXMEMERR, ®s->eier);
965 static inline void ioc3_stop(struct ioc3_private *ip)
967 struct ioc3_ethregs *regs = ip->regs;
969 writel(0, ®s->emcr); /* Shutup */
970 writel(0, ®s->eier); /* Disable interrupts */
971 readl(®s->eier); /* Flush */
974 static int ioc3_open(struct net_device *dev)
976 struct ioc3_private *ip = netdev_priv(dev);
978 if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
979 netdev_err(dev, "Can't get irq %d\n", dev->irq);
988 if (ioc3_alloc_rx_bufs(dev)) {
989 netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
995 netif_start_queue(dev);
999 static int ioc3_close(struct net_device *dev)
1001 struct ioc3_private *ip = netdev_priv(dev);
1003 del_timer_sync(&ip->ioc3_timer);
1005 netif_stop_queue(dev);
1008 free_irq(dev->irq, dev);
1010 ioc3_free_rx_bufs(ip);
1011 ioc3_clean_tx_ring(ip);
1016 /* MENET cards have four IOC3 chips, which are attached to two sets of
1017 * PCI slot resources each: the primary connections are on slots
1018 * 0..3 and the secondaries are on 4..7
1020 * All four ethernets are brought out to connectors; six serial ports
1021 * (a pair from each of the first three IOC3s) are brought out to
1022 * MiniDINs; all other subdevices are left swinging in the wind, leave
1026 static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
1028 struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
1032 if (dev->vendor == PCI_VENDOR_ID_SGI &&
1033 dev->device == PCI_DEVICE_ID_SGI_IOC3)
1041 static int ioc3_is_menet(struct pci_dev *pdev)
1043 return !pdev->bus->parent &&
1044 ioc3_adjacent_is_ioc3(pdev, 0) &&
1045 ioc3_adjacent_is_ioc3(pdev, 1) &&
1046 ioc3_adjacent_is_ioc3(pdev, 2);
1049 #ifdef CONFIG_SERIAL_8250
1050 /* Note about serial ports and consoles:
1051 * For console output, everyone uses the IOC3 UARTA (offset 0x178)
1052 * connected to the master node (look in ip27_setup_console() and
1053 * ip27prom_console_write()).
1055 * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port
1056 * addresses on a partitioned machine. Since we currently use the ioc3
1057 * serial ports, we use dynamic serial port discovery that the serial.c
1058 * driver uses for pci/pnp ports (there is an entry for the SGI ioc3
1059 * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater
1060 * than UARTB's, although UARTA on o200s has traditionally been known as
1061 * port 0. So, we just use one serial port from each ioc3 (since the
1062 * serial driver adds addresses to get to higher ports).
1064 * The first one to do a register_console becomes the preferred console
1065 * (if there is no kernel command line console= directive). /dev/console
1066 * (ie 5, 1) is then "aliased" into the device number returned by the
1067 * "device" routine referred to in this console structure
1068 * (ip27prom_console_dev).
1070 * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working
1071 * around ioc3 oddities in this respect.
1073 * The IOC3 serials use a 22MHz clock rate with an additional divider which
1074 * can be programmed in the SCR register if the DLAB bit is set.
1076 * Register to interrupt zero because we share the interrupt with
1077 * the serial driver which we don't properly support yet.
1079 * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been
1082 static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
1084 #define COSMISC_CONSTANT 6
1086 struct uart_8250_port port = {
1089 .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
1092 .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
1094 .membase = (unsigned char __iomem *)uart,
1095 .mapbase = (unsigned long)uart,
1100 lcr = readb(&uart->iu_lcr);
1101 writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr);
1102 writeb(COSMISC_CONSTANT, &uart->iu_scr);
1103 writeb(lcr, &uart->iu_lcr);
1104 readb(&uart->iu_lcr);
1105 serial8250_register_8250_port(&port);
1108 static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
1112 /* We need to recognice and treat the fourth MENET serial as it
1113 * does not have an SuperIO chip attached to it, therefore attempting
1114 * to access it will result in bus errors. We call something an
1115 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
1116 * in it. This is paranoid but we want to avoid blowing up on a
1117 * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
1118 * not paranoid enough ...
1120 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
1123 /* Switch IOC3 to PIO mode. It probably already was but let's be
1126 writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s);
1127 readl(&ioc3->gpcr_s);
1128 writel(0, &ioc3->gppr[6]);
1129 readl(&ioc3->gppr[6]);
1130 writel(0, &ioc3->gppr[7]);
1131 readl(&ioc3->gppr[7]);
1132 writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr);
1133 readl(&ioc3->port_a.sscr);
1134 writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr);
1135 readl(&ioc3->port_b.sscr);
1136 /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */
1137 sio_iec = readl(&ioc3->sio_iec);
1138 sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
1139 SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
1140 SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
1141 SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
1142 sio_iec |= SIO_IR_SA_INT;
1143 sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
1144 SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
1145 SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
1146 SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
1147 sio_iec |= SIO_IR_SB_INT;
1148 writel(sio_iec, &ioc3->sio_iec);
1149 writel(0, &ioc3->port_a.sscr);
1150 writel(0, &ioc3->port_b.sscr);
1152 ioc3_8250_register(&ioc3->sregs.uarta);
1153 ioc3_8250_register(&ioc3->sregs.uartb);
1157 static const struct net_device_ops ioc3_netdev_ops = {
1158 .ndo_open = ioc3_open,
1159 .ndo_stop = ioc3_close,
1160 .ndo_start_xmit = ioc3_start_xmit,
1161 .ndo_tx_timeout = ioc3_timeout,
1162 .ndo_get_stats = ioc3_get_stats,
1163 .ndo_set_rx_mode = ioc3_set_multicast_list,
1164 .ndo_do_ioctl = ioc3_ioctl,
1165 .ndo_validate_addr = eth_validate_addr,
1166 .ndo_set_mac_address = ioc3_set_mac_address,
1169 static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1171 unsigned int sw_physid1, sw_physid2;
1172 struct net_device *dev = NULL;
1173 struct ioc3_private *ip;
1175 unsigned long ioc3_base, ioc3_size;
1176 u32 vendor, model, rev;
1179 /* Configure DMA attributes. */
1180 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1182 pr_err("%s: No usable DMA configuration, aborting.\n",
1187 if (pci_enable_device(pdev))
1190 dev = alloc_etherdev(sizeof(struct ioc3_private));
1196 err = pci_request_regions(pdev, "ioc3");
1200 SET_NETDEV_DEV(dev, &pdev->dev);
1202 ip = netdev_priv(dev);
1204 ip->dma_dev = &pdev->dev;
1206 dev->irq = pdev->irq;
1208 ioc3_base = pci_resource_start(pdev, 0);
1209 ioc3_size = pci_resource_len(pdev, 0);
1210 ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size);
1212 pr_err("ioc3eth(%s): ioremap failed, goodbye.\n",
1217 ip->regs = &ioc3->eth;
1218 ip->ssram = ioc3->ssram;
1219 ip->all_regs = ioc3;
1221 #ifdef CONFIG_SERIAL_8250
1222 ioc3_serial_probe(pdev, ioc3);
1225 spin_lock_init(&ip->ioc3_lock);
1226 timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
1230 /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */
1231 ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma,
1234 pr_err("ioc3-eth: rx ring allocation failed\n");
1239 /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */
1240 ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1,
1241 &ip->txr_dma, GFP_KERNEL);
1243 pr_err("ioc3-eth: tx ring allocation failed\n");
1248 ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K);
1249 ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K);
1255 ip->mii.phy_id_mask = 0x1f;
1256 ip->mii.reg_num_mask = 0x1f;
1258 ip->mii.mdio_read = ioc3_mdio_read;
1259 ip->mii.mdio_write = ioc3_mdio_write;
1263 if (ip->mii.phy_id == -1) {
1264 pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1271 ioc3_ssram_disc(ip);
1274 /* The IOC3-specific entries in the device structure. */
1275 dev->watchdog_timeo = 5 * HZ;
1276 dev->netdev_ops = &ioc3_netdev_ops;
1277 dev->ethtool_ops = &ioc3_ethtool_ops;
1278 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1279 dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
1281 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
1282 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
1284 err = register_netdev(dev);
1288 mii_check_media(&ip->mii, 1, 1);
1289 ioc3_setup_duplex(ip);
1291 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
1292 model = (sw_physid2 >> 4) & 0x3f;
1293 rev = sw_physid2 & 0xf;
1294 netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n",
1295 ip->mii.phy_id, vendor, model, rev);
1296 netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n",
1297 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
1302 del_timer_sync(&ip->ioc3_timer);
1304 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
1307 dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
1310 pci_release_regions(pdev);
1314 /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
1315 * such a weird device ...
1321 static void ioc3_remove_one(struct pci_dev *pdev)
1323 struct net_device *dev = pci_get_drvdata(pdev);
1324 struct ioc3_private *ip = netdev_priv(dev);
1326 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
1327 dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma);
1329 unregister_netdev(dev);
1330 del_timer_sync(&ip->ioc3_timer);
1332 iounmap(ip->all_regs);
1333 pci_release_regions(pdev);
1335 /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
1336 * such a weird device ...
1340 static const struct pci_device_id ioc3_pci_tbl[] = {
1341 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1344 MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
1346 static struct pci_driver ioc3_driver = {
1348 .id_table = ioc3_pci_tbl,
1349 .probe = ioc3_probe,
1350 .remove = ioc3_remove_one,
1353 static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1355 struct ioc3_private *ip = netdev_priv(dev);
1356 struct ioc3_etxd *desc;
1362 /* IOC3 has a fairly simple minded checksumming hardware which simply
1363 * adds up the 1's complement checksum for the entire packet and
1364 * inserts it at an offset which can be specified in the descriptor
1365 * into the transmit packet. This means we have to compensate for the
1366 * MAC header which should not be summed and the TCP/UDP pseudo headers
1369 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1370 const struct iphdr *ih = ip_hdr(skb);
1371 const int proto = ntohs(ih->protocol);
1376 /* The MAC header. skb->mac seem the logic approach
1377 * to find the MAC header - except it's a NULL pointer ...
1379 eh = (u16 *)skb->data;
1381 /* Sum up dest addr, src addr and protocol */
1382 ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
1384 /* Skip IP header; it's sum is always zero and was
1385 * already filled in by ip_output.c
1387 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
1388 ih->tot_len - (ih->ihl << 2),
1389 proto, csum_fold(ehsum));
1391 csum = (csum & 0xffff) + (csum >> 16); /* Fold again */
1392 csum = (csum & 0xffff) + (csum >> 16);
1394 csoff = ETH_HLEN + (ih->ihl << 2);
1395 if (proto == IPPROTO_UDP) {
1396 csoff += offsetof(struct udphdr, check);
1397 udp_hdr(skb)->check = csum;
1399 if (proto == IPPROTO_TCP) {
1400 csoff += offsetof(struct tcphdr, check);
1401 tcp_hdr(skb)->check = csum;
1404 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
1407 spin_lock_irq(&ip->ioc3_lock);
1409 data = (unsigned long)skb->data;
1412 produce = ip->tx_pi;
1413 desc = &ip->txr[produce];
1416 /* Short packet, let's copy it directly into the ring. */
1417 skb_copy_from_linear_data(skb, desc->data, skb->len);
1418 if (len < ETH_ZLEN) {
1419 /* Very short packet, pad with zeros at the end. */
1420 memset(desc->data + len, 0, ETH_ZLEN - len);
1423 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
1424 desc->bufcnt = cpu_to_be32(len);
1425 } else if ((data ^ (data + len - 1)) & 0x4000) {
1426 unsigned long b2 = (data | 0x3fffUL) + 1UL;
1427 unsigned long s1 = b2 - data;
1428 unsigned long s2 = data + len - b2;
1431 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
1432 ETXD_B1V | ETXD_B2V | w0);
1433 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
1434 (s2 << ETXD_B2CNT_SHIFT));
1435 d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE);
1436 if (dma_mapping_error(ip->dma_dev, d1))
1438 d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE);
1439 if (dma_mapping_error(ip->dma_dev, d2)) {
1440 dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE);
1443 desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF));
1444 desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF));
1448 /* Normal sized packet that doesn't cross a page boundary. */
1449 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
1450 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1451 d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE);
1452 if (dma_mapping_error(ip->dma_dev, d))
1454 desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF));
1457 mb(); /* make sure all descriptor changes are visible */
1459 ip->tx_skbs[produce] = skb; /* Remember skb */
1460 produce = (produce + 1) & TX_RING_MASK;
1461 ip->tx_pi = produce;
1462 writel(produce << 7, &ip->regs->etpir); /* Fire ... */
1466 if (ip->txqlen >= (TX_RING_ENTRIES - 1))
1467 netif_stop_queue(dev);
1469 spin_unlock_irq(&ip->ioc3_lock);
1471 return NETDEV_TX_OK;
1474 dev_kfree_skb_any(skb);
1475 dev->stats.tx_dropped++;
1477 spin_unlock_irq(&ip->ioc3_lock);
1479 return NETDEV_TX_OK;
1482 static void ioc3_timeout(struct net_device *dev)
1484 struct ioc3_private *ip = netdev_priv(dev);
1486 netdev_err(dev, "transmit timed out, resetting\n");
1488 spin_lock_irq(&ip->ioc3_lock);
1491 ioc3_free_rx_bufs(ip);
1492 ioc3_clean_tx_ring(ip);
1495 if (ioc3_alloc_rx_bufs(dev)) {
1496 netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
1497 spin_unlock_irq(&ip->ioc3_lock);
1504 spin_unlock_irq(&ip->ioc3_lock);
1506 netif_wake_queue(dev);
1509 /* Given a multicast ethernet address, this routine calculates the
1510 * address's bit index in the logical address filter mask
1512 static inline unsigned int ioc3_hash(const unsigned char *addr)
1514 unsigned int temp = 0;
1518 crc = ether_crc_le(ETH_ALEN, addr);
1520 crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
1521 for (bits = 6; --bits >= 0; ) {
1523 temp |= (crc & 0x1);
1530 static void ioc3_get_drvinfo(struct net_device *dev,
1531 struct ethtool_drvinfo *info)
1533 struct ioc3_private *ip = netdev_priv(dev);
1535 strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
1536 strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
1537 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
1540 static int ioc3_get_link_ksettings(struct net_device *dev,
1541 struct ethtool_link_ksettings *cmd)
1543 struct ioc3_private *ip = netdev_priv(dev);
1545 spin_lock_irq(&ip->ioc3_lock);
1546 mii_ethtool_get_link_ksettings(&ip->mii, cmd);
1547 spin_unlock_irq(&ip->ioc3_lock);
1552 static int ioc3_set_link_ksettings(struct net_device *dev,
1553 const struct ethtool_link_ksettings *cmd)
1555 struct ioc3_private *ip = netdev_priv(dev);
1558 spin_lock_irq(&ip->ioc3_lock);
1559 rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
1560 spin_unlock_irq(&ip->ioc3_lock);
1565 static int ioc3_nway_reset(struct net_device *dev)
1567 struct ioc3_private *ip = netdev_priv(dev);
1570 spin_lock_irq(&ip->ioc3_lock);
1571 rc = mii_nway_restart(&ip->mii);
1572 spin_unlock_irq(&ip->ioc3_lock);
1577 static u32 ioc3_get_link(struct net_device *dev)
1579 struct ioc3_private *ip = netdev_priv(dev);
1582 spin_lock_irq(&ip->ioc3_lock);
1583 rc = mii_link_ok(&ip->mii);
1584 spin_unlock_irq(&ip->ioc3_lock);
1589 static const struct ethtool_ops ioc3_ethtool_ops = {
1590 .get_drvinfo = ioc3_get_drvinfo,
1591 .nway_reset = ioc3_nway_reset,
1592 .get_link = ioc3_get_link,
1593 .get_link_ksettings = ioc3_get_link_ksettings,
1594 .set_link_ksettings = ioc3_set_link_ksettings,
1597 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1599 struct ioc3_private *ip = netdev_priv(dev);
1602 spin_lock_irq(&ip->ioc3_lock);
1603 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
1604 spin_unlock_irq(&ip->ioc3_lock);
1609 static void ioc3_set_multicast_list(struct net_device *dev)
1611 struct ioc3_private *ip = netdev_priv(dev);
1612 struct ioc3_ethregs *regs = ip->regs;
1613 struct netdev_hw_addr *ha;
1616 spin_lock_irq(&ip->ioc3_lock);
1618 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1619 ip->emcr |= EMCR_PROMISC;
1620 writel(ip->emcr, ®s->emcr);
1623 ip->emcr &= ~EMCR_PROMISC;
1624 writel(ip->emcr, ®s->emcr); /* Clear promiscuous. */
1627 if ((dev->flags & IFF_ALLMULTI) ||
1628 (netdev_mc_count(dev) > 64)) {
1629 /* Too many for hashing to make sense or we want all
1630 * multicast packets anyway, so skip computing all the
1631 * hashes and just accept all packets.
1633 ip->ehar_h = 0xffffffff;
1634 ip->ehar_l = 0xffffffff;
1636 netdev_for_each_mc_addr(ha, dev) {
1637 ehar |= (1UL << ioc3_hash(ha->addr));
1639 ip->ehar_h = ehar >> 32;
1640 ip->ehar_l = ehar & 0xffffffff;
1642 writel(ip->ehar_h, ®s->ehar_h);
1643 writel(ip->ehar_l, ®s->ehar_l);
1646 spin_unlock_irq(&ip->ioc3_lock);
1649 module_pci_driver(ioc3_driver);
1650 MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1651 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1652 MODULE_LICENSE("GPL");