2 * Driver for BCM963xx builtin Ethernet mac
4 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/clk.h>
24 #include <linux/etherdevice.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/ethtool.h>
28 #include <linux/crc32.h>
29 #include <linux/err.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/if_vlan.h>
34 #include <bcm63xx_dev_enet.h>
35 #include "bcm63xx_enet.h"
37 static char bcm_enet_driver_name[] = "bcm63xx_enet";
38 static char bcm_enet_driver_version[] = "1.0";
40 static int copybreak __read_mostly = 128;
41 module_param(copybreak, int, 0);
42 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
44 /* io registers memory shared between all devices */
45 static void __iomem *bcm_enet_shared_base[3];
48 * io helpers to access mac registers
50 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
52 return bcm_readl(priv->base + off);
55 static inline void enet_writel(struct bcm_enet_priv *priv,
58 bcm_writel(val, priv->base + off);
62 * io helpers to access switch registers
64 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
66 return bcm_readl(priv->base + off);
69 static inline void enetsw_writel(struct bcm_enet_priv *priv,
72 bcm_writel(val, priv->base + off);
75 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
77 return bcm_readw(priv->base + off);
80 static inline void enetsw_writew(struct bcm_enet_priv *priv,
83 bcm_writew(val, priv->base + off);
86 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
88 return bcm_readb(priv->base + off);
91 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
94 bcm_writeb(val, priv->base + off);
98 /* io helpers to access shared registers */
99 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
101 return bcm_readl(bcm_enet_shared_base[0] + off);
104 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
107 bcm_writel(val, bcm_enet_shared_base[0] + off);
110 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off)
112 return bcm_readl(bcm_enet_shared_base[1] + off);
115 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
118 bcm_writel(val, bcm_enet_shared_base[1] + off);
121 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off)
123 return bcm_readl(bcm_enet_shared_base[2] + off);
126 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
129 bcm_writel(val, bcm_enet_shared_base[2] + off);
133 * write given data into mii register and wait for transfer to end
134 * with timeout (average measured transfer time is 25us)
136 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
140 /* make sure mii interrupt status is cleared */
141 enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
143 enet_writel(priv, data, ENET_MIIDATA_REG);
146 /* busy wait on mii interrupt bit, with timeout */
149 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
152 } while (limit-- > 0);
154 return (limit < 0) ? 1 : 0;
158 * MII internal read callback
160 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
165 tmp = regnum << ENET_MIIDATA_REG_SHIFT;
166 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
167 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
168 tmp |= ENET_MIIDATA_OP_READ_MASK;
170 if (do_mdio_op(priv, tmp))
173 val = enet_readl(priv, ENET_MIIDATA_REG);
179 * MII internal write callback
181 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
182 int regnum, u16 value)
186 tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
187 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
188 tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
189 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
190 tmp |= ENET_MIIDATA_OP_WRITE_MASK;
192 (void)do_mdio_op(priv, tmp);
197 * MII read callback from phylib
199 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
202 return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
206 * MII write callback from phylib
208 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
209 int regnum, u16 value)
211 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
215 * MII read callback from mii core
217 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
220 return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
224 * MII write callback from mii core
226 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
227 int regnum, int value)
229 bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
235 static int bcm_enet_refill_rx(struct net_device *dev)
237 struct bcm_enet_priv *priv;
239 priv = netdev_priv(dev);
241 while (priv->rx_desc_count < priv->rx_ring_size) {
242 struct bcm_enet_desc *desc;
248 desc_idx = priv->rx_dirty_desc;
249 desc = &priv->rx_desc_cpu[desc_idx];
251 if (!priv->rx_skb[desc_idx]) {
252 skb = netdev_alloc_skb(dev, priv->rx_skb_size);
255 priv->rx_skb[desc_idx] = skb;
256 p = dma_map_single(&priv->pdev->dev, skb->data,
262 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
263 len_stat |= DMADESC_OWNER_MASK;
264 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
265 len_stat |= DMADESC_WRAP_MASK;
266 priv->rx_dirty_desc = 0;
268 priv->rx_dirty_desc++;
271 desc->len_stat = len_stat;
273 priv->rx_desc_count++;
275 /* tell dma engine we allocated one buffer */
276 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
279 /* If rx ring is still empty, set a timer to try allocating
280 * again at a later time. */
281 if (priv->rx_desc_count == 0 && netif_running(dev)) {
282 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
283 priv->rx_timeout.expires = jiffies + HZ;
284 add_timer(&priv->rx_timeout);
291 * timer callback to defer refill rx queue in case we're OOM
293 static void bcm_enet_refill_rx_timer(unsigned long data)
295 struct net_device *dev;
296 struct bcm_enet_priv *priv;
298 dev = (struct net_device *)data;
299 priv = netdev_priv(dev);
301 spin_lock(&priv->rx_lock);
302 bcm_enet_refill_rx((struct net_device *)data);
303 spin_unlock(&priv->rx_lock);
307 * extract packet from rx queue
309 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
311 struct bcm_enet_priv *priv;
315 priv = netdev_priv(dev);
316 kdev = &priv->pdev->dev;
319 /* don't scan ring further than number of refilled
321 if (budget > priv->rx_desc_count)
322 budget = priv->rx_desc_count;
325 struct bcm_enet_desc *desc;
331 desc_idx = priv->rx_curr_desc;
332 desc = &priv->rx_desc_cpu[desc_idx];
334 /* make sure we actually read the descriptor status at
338 len_stat = desc->len_stat;
340 /* break if dma ownership belongs to hw */
341 if (len_stat & DMADESC_OWNER_MASK)
345 priv->rx_curr_desc++;
346 if (priv->rx_curr_desc == priv->rx_ring_size)
347 priv->rx_curr_desc = 0;
348 priv->rx_desc_count--;
350 /* if the packet does not have start of packet _and_
351 * end of packet flag set, then just recycle it */
352 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
353 dev->stats.rx_dropped++;
357 /* recycle packet if it's marked as bad */
358 if (!priv->enet_is_sw &&
359 unlikely(len_stat & DMADESC_ERR_MASK)) {
360 dev->stats.rx_errors++;
362 if (len_stat & DMADESC_OVSIZE_MASK)
363 dev->stats.rx_length_errors++;
364 if (len_stat & DMADESC_CRC_MASK)
365 dev->stats.rx_crc_errors++;
366 if (len_stat & DMADESC_UNDER_MASK)
367 dev->stats.rx_frame_errors++;
368 if (len_stat & DMADESC_OV_MASK)
369 dev->stats.rx_fifo_errors++;
374 skb = priv->rx_skb[desc_idx];
375 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
376 /* don't include FCS */
379 if (len < copybreak) {
380 struct sk_buff *nskb;
382 nskb = netdev_alloc_skb_ip_align(dev, len);
384 /* forget packet, just rearm desc */
385 dev->stats.rx_dropped++;
389 dma_sync_single_for_cpu(kdev, desc->address,
390 len, DMA_FROM_DEVICE);
391 memcpy(nskb->data, skb->data, len);
392 dma_sync_single_for_device(kdev, desc->address,
393 len, DMA_FROM_DEVICE);
396 dma_unmap_single(&priv->pdev->dev, desc->address,
397 priv->rx_skb_size, DMA_FROM_DEVICE);
398 priv->rx_skb[desc_idx] = NULL;
402 skb->protocol = eth_type_trans(skb, dev);
403 dev->stats.rx_packets++;
404 dev->stats.rx_bytes += len;
405 netif_receive_skb(skb);
407 } while (--budget > 0);
409 if (processed || !priv->rx_desc_count) {
410 bcm_enet_refill_rx(dev);
413 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
414 ENETDMAC_CHANCFG_REG(priv->rx_chan));
422 * try to or force reclaim of transmitted buffers
424 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
426 struct bcm_enet_priv *priv;
429 priv = netdev_priv(dev);
432 while (priv->tx_desc_count < priv->tx_ring_size) {
433 struct bcm_enet_desc *desc;
436 /* We run in a bh and fight against start_xmit, which
437 * is called with bh disabled */
438 spin_lock(&priv->tx_lock);
440 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
442 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
443 spin_unlock(&priv->tx_lock);
447 /* ensure other field of the descriptor were not read
448 * before we checked ownership */
451 skb = priv->tx_skb[priv->tx_dirty_desc];
452 priv->tx_skb[priv->tx_dirty_desc] = NULL;
453 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
456 priv->tx_dirty_desc++;
457 if (priv->tx_dirty_desc == priv->tx_ring_size)
458 priv->tx_dirty_desc = 0;
459 priv->tx_desc_count++;
461 spin_unlock(&priv->tx_lock);
463 if (desc->len_stat & DMADESC_UNDER_MASK)
464 dev->stats.tx_errors++;
470 if (netif_queue_stopped(dev) && released)
471 netif_wake_queue(dev);
477 * poll func, called by network core
479 static int bcm_enet_poll(struct napi_struct *napi, int budget)
481 struct bcm_enet_priv *priv;
482 struct net_device *dev;
483 int tx_work_done, rx_work_done;
485 priv = container_of(napi, struct bcm_enet_priv, napi);
489 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
490 ENETDMAC_IR_REG(priv->rx_chan));
491 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
492 ENETDMAC_IR_REG(priv->tx_chan));
494 /* reclaim sent skb */
495 tx_work_done = bcm_enet_tx_reclaim(dev, 0);
497 spin_lock(&priv->rx_lock);
498 rx_work_done = bcm_enet_receive_queue(dev, budget);
499 spin_unlock(&priv->rx_lock);
501 if (rx_work_done >= budget || tx_work_done > 0) {
502 /* rx/tx queue is not yet empty/clean */
506 /* no more packet in rx/tx queue, remove device from poll
510 /* restore rx/tx interrupt */
511 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
512 ENETDMAC_IRMASK_REG(priv->rx_chan));
513 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
514 ENETDMAC_IRMASK_REG(priv->tx_chan));
520 * mac interrupt handler
522 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
524 struct net_device *dev;
525 struct bcm_enet_priv *priv;
529 priv = netdev_priv(dev);
531 stat = enet_readl(priv, ENET_IR_REG);
532 if (!(stat & ENET_IR_MIB))
535 /* clear & mask interrupt */
536 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
537 enet_writel(priv, 0, ENET_IRMASK_REG);
539 /* read mib registers in workqueue */
540 schedule_work(&priv->mib_update_task);
546 * rx/tx dma interrupt handler
548 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
550 struct net_device *dev;
551 struct bcm_enet_priv *priv;
554 priv = netdev_priv(dev);
556 /* mask rx/tx interrupts */
557 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
558 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
560 napi_schedule(&priv->napi);
566 * tx request callback
568 static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
570 struct bcm_enet_priv *priv;
571 struct bcm_enet_desc *desc;
575 priv = netdev_priv(dev);
577 /* lock against tx reclaim */
578 spin_lock(&priv->tx_lock);
580 /* make sure the tx hw queue is not full, should not happen
581 * since we stop queue before it's the case */
582 if (unlikely(!priv->tx_desc_count)) {
583 netif_stop_queue(dev);
584 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
586 ret = NETDEV_TX_BUSY;
590 /* pad small packets sent on a switch device */
591 if (priv->enet_is_sw && skb->len < 64) {
592 int needed = 64 - skb->len;
595 if (unlikely(skb_tailroom(skb) < needed)) {
596 struct sk_buff *nskb;
598 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
600 ret = NETDEV_TX_BUSY;
606 data = skb_put(skb, needed);
607 memset(data, 0, needed);
610 /* point to the next available desc */
611 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
612 priv->tx_skb[priv->tx_curr_desc] = skb;
614 /* fill descriptor */
615 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
618 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
619 len_stat |= DMADESC_ESOP_MASK |
623 priv->tx_curr_desc++;
624 if (priv->tx_curr_desc == priv->tx_ring_size) {
625 priv->tx_curr_desc = 0;
626 len_stat |= DMADESC_WRAP_MASK;
628 priv->tx_desc_count--;
630 /* dma might be already polling, make sure we update desc
631 * fields in correct order */
633 desc->len_stat = len_stat;
637 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
638 ENETDMAC_CHANCFG_REG(priv->tx_chan));
640 /* stop queue if no more desc available */
641 if (!priv->tx_desc_count)
642 netif_stop_queue(dev);
644 dev->stats.tx_bytes += skb->len;
645 dev->stats.tx_packets++;
649 spin_unlock(&priv->tx_lock);
654 * Change the interface's mac address.
656 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
658 struct bcm_enet_priv *priv;
659 struct sockaddr *addr = p;
662 priv = netdev_priv(dev);
663 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
665 /* use perfect match register 0 to store my mac address */
666 val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
667 (dev->dev_addr[4] << 8) | dev->dev_addr[5];
668 enet_writel(priv, val, ENET_PML_REG(0));
670 val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
671 val |= ENET_PMH_DATAVALID_MASK;
672 enet_writel(priv, val, ENET_PMH_REG(0));
678 * Change rx mode (promiscuous/allmulti) and update multicast list
680 static void bcm_enet_set_multicast_list(struct net_device *dev)
682 struct bcm_enet_priv *priv;
683 struct netdev_hw_addr *ha;
687 priv = netdev_priv(dev);
689 val = enet_readl(priv, ENET_RXCFG_REG);
691 if (dev->flags & IFF_PROMISC)
692 val |= ENET_RXCFG_PROMISC_MASK;
694 val &= ~ENET_RXCFG_PROMISC_MASK;
696 /* only 3 perfect match registers left, first one is used for
698 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
699 val |= ENET_RXCFG_ALLMCAST_MASK;
701 val &= ~ENET_RXCFG_ALLMCAST_MASK;
703 /* no need to set perfect match registers if we catch all
705 if (val & ENET_RXCFG_ALLMCAST_MASK) {
706 enet_writel(priv, val, ENET_RXCFG_REG);
711 netdev_for_each_mc_addr(ha, dev) {
717 /* update perfect match registers */
719 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
720 (dmi_addr[4] << 8) | dmi_addr[5];
721 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
723 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
724 tmp |= ENET_PMH_DATAVALID_MASK;
725 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
729 enet_writel(priv, 0, ENET_PML_REG(i + 1));
730 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
733 enet_writel(priv, val, ENET_RXCFG_REG);
737 * set mac duplex parameters
739 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
743 val = enet_readl(priv, ENET_TXCTL_REG);
745 val |= ENET_TXCTL_FD_MASK;
747 val &= ~ENET_TXCTL_FD_MASK;
748 enet_writel(priv, val, ENET_TXCTL_REG);
752 * set mac flow control parameters
754 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
758 /* rx flow control (pause frame handling) */
759 val = enet_readl(priv, ENET_RXCFG_REG);
761 val |= ENET_RXCFG_ENFLOW_MASK;
763 val &= ~ENET_RXCFG_ENFLOW_MASK;
764 enet_writel(priv, val, ENET_RXCFG_REG);
766 /* tx flow control (pause frame generation) */
767 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
769 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
771 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
772 enet_dma_writel(priv, val, ENETDMA_CFG_REG);
776 * link changed callback (from phylib)
778 static void bcm_enet_adjust_phy_link(struct net_device *dev)
780 struct bcm_enet_priv *priv;
781 struct phy_device *phydev;
784 priv = netdev_priv(dev);
785 phydev = priv->phydev;
788 if (priv->old_link != phydev->link) {
790 priv->old_link = phydev->link;
793 /* reflect duplex change in mac configuration */
794 if (phydev->link && phydev->duplex != priv->old_duplex) {
795 bcm_enet_set_duplex(priv,
796 (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
798 priv->old_duplex = phydev->duplex;
801 /* enable flow control if remote advertise it (trust phylib to
802 * check that duplex is full */
803 if (phydev->link && phydev->pause != priv->old_pause) {
804 int rx_pause_en, tx_pause_en;
807 /* pause was advertised by lpa and us */
810 } else if (!priv->pause_auto) {
811 /* pause setting overrided by user */
812 rx_pause_en = priv->pause_rx;
813 tx_pause_en = priv->pause_tx;
819 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
821 priv->old_pause = phydev->pause;
824 if (status_changed) {
825 pr_info("%s: link %s", dev->name, phydev->link ?
828 pr_cont(" - %d/%s - flow control %s", phydev->speed,
829 DUPLEX_FULL == phydev->duplex ? "full" : "half",
830 phydev->pause == 1 ? "rx&tx" : "off");
837 * link changed callback (if phylib is not used)
839 static void bcm_enet_adjust_link(struct net_device *dev)
841 struct bcm_enet_priv *priv;
843 priv = netdev_priv(dev);
844 bcm_enet_set_duplex(priv, priv->force_duplex_full);
845 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
846 netif_carrier_on(dev);
848 pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
850 priv->force_speed_100 ? 100 : 10,
851 priv->force_duplex_full ? "full" : "half",
852 priv->pause_rx ? "rx" : "off",
853 priv->pause_tx ? "tx" : "off");
857 * open callback, allocate dma rings & buffers and start rx operation
859 static int bcm_enet_open(struct net_device *dev)
861 struct bcm_enet_priv *priv;
862 struct sockaddr addr;
864 struct phy_device *phydev;
867 char phy_id[MII_BUS_ID_SIZE + 3];
871 priv = netdev_priv(dev);
872 kdev = &priv->pdev->dev;
876 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
877 priv->mii_bus->id, priv->phy_id);
879 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
880 PHY_INTERFACE_MODE_MII);
882 if (IS_ERR(phydev)) {
883 dev_err(kdev, "could not attach to PHY\n");
884 return PTR_ERR(phydev);
887 /* mask with MAC supported features */
888 phydev->supported &= (SUPPORTED_10baseT_Half |
889 SUPPORTED_10baseT_Full |
890 SUPPORTED_100baseT_Half |
891 SUPPORTED_100baseT_Full |
895 phydev->advertising = phydev->supported;
897 if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
898 phydev->advertising |= SUPPORTED_Pause;
900 phydev->advertising &= ~SUPPORTED_Pause;
902 dev_info(kdev, "attached PHY at address %d [%s]\n",
903 phydev->addr, phydev->drv->name);
906 priv->old_duplex = -1;
907 priv->old_pause = -1;
908 priv->phydev = phydev;
911 /* mask all interrupts and request them */
912 enet_writel(priv, 0, ENET_IRMASK_REG);
913 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
914 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
916 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
918 goto out_phy_disconnect;
920 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
925 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
926 IRQF_DISABLED, dev->name, dev);
930 /* initialize perfect match registers */
931 for (i = 0; i < 4; i++) {
932 enet_writel(priv, 0, ENET_PML_REG(i));
933 enet_writel(priv, 0, ENET_PMH_REG(i));
936 /* write device mac address */
937 memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
938 bcm_enet_set_mac_address(dev, &addr);
940 /* allocate rx dma ring */
941 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
942 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
943 GFP_KERNEL | __GFP_ZERO);
949 priv->rx_desc_alloc_size = size;
950 priv->rx_desc_cpu = p;
952 /* allocate tx dma ring */
953 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
954 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
955 GFP_KERNEL | __GFP_ZERO);
958 goto out_free_rx_ring;
961 priv->tx_desc_alloc_size = size;
962 priv->tx_desc_cpu = p;
964 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
968 goto out_free_tx_ring;
971 priv->tx_desc_count = priv->tx_ring_size;
972 priv->tx_dirty_desc = 0;
973 priv->tx_curr_desc = 0;
974 spin_lock_init(&priv->tx_lock);
976 /* init & fill rx ring with skbs */
977 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
981 goto out_free_tx_skb;
984 priv->rx_desc_count = 0;
985 priv->rx_dirty_desc = 0;
986 priv->rx_curr_desc = 0;
988 /* initialize flow control buffer allocation */
989 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
990 ENETDMA_BUFALLOC_REG(priv->rx_chan));
992 if (bcm_enet_refill_rx(dev)) {
993 dev_err(kdev, "cannot allocate rx skb queue\n");
998 /* write rx & tx ring addresses */
999 enet_dmas_writel(priv, priv->rx_desc_dma,
1000 ENETDMAS_RSTART_REG(priv->rx_chan));
1001 enet_dmas_writel(priv, priv->tx_desc_dma,
1002 ENETDMAS_RSTART_REG(priv->tx_chan));
1004 /* clear remaining state ram for rx & tx channel */
1005 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
1006 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
1007 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
1008 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
1009 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
1010 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
1012 /* set max rx/tx length */
1013 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1014 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1016 /* set dma maximum burst len */
1017 enet_dmac_writel(priv, priv->dma_maxburst,
1018 ENETDMAC_MAXBURST_REG(priv->rx_chan));
1019 enet_dmac_writel(priv, priv->dma_maxburst,
1020 ENETDMAC_MAXBURST_REG(priv->tx_chan));
1022 /* set correct transmit fifo watermark */
1023 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1025 /* set flow control low/high threshold to 1/3 / 2/3 */
1026 val = priv->rx_ring_size / 3;
1027 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1028 val = (priv->rx_ring_size * 2) / 3;
1029 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1031 /* all set, enable mac and interrupts, start dma engine and
1032 * kick rx dma channel */
1034 val = enet_readl(priv, ENET_CTL_REG);
1035 val |= ENET_CTL_ENABLE_MASK;
1036 enet_writel(priv, val, ENET_CTL_REG);
1037 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1038 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
1039 ENETDMAC_CHANCFG_REG(priv->rx_chan));
1041 /* watch "mib counters about to overflow" interrupt */
1042 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1043 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1045 /* watch "packet transferred" interrupt in rx and tx */
1046 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
1047 ENETDMAC_IR_REG(priv->rx_chan));
1048 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
1049 ENETDMAC_IR_REG(priv->tx_chan));
1051 /* make sure we enable napi before rx interrupt */
1052 napi_enable(&priv->napi);
1054 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
1055 ENETDMAC_IRMASK_REG(priv->rx_chan));
1056 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
1057 ENETDMAC_IRMASK_REG(priv->tx_chan));
1060 phy_start(priv->phydev);
1062 bcm_enet_adjust_link(dev);
1064 netif_start_queue(dev);
1068 for (i = 0; i < priv->rx_ring_size; i++) {
1069 struct bcm_enet_desc *desc;
1071 if (!priv->rx_skb[i])
1074 desc = &priv->rx_desc_cpu[i];
1075 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1077 kfree_skb(priv->rx_skb[i]);
1079 kfree(priv->rx_skb);
1082 kfree(priv->tx_skb);
1085 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1086 priv->tx_desc_cpu, priv->tx_desc_dma);
1089 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1090 priv->rx_desc_cpu, priv->rx_desc_dma);
1093 free_irq(priv->irq_tx, dev);
1096 free_irq(priv->irq_rx, dev);
1099 free_irq(dev->irq, dev);
1102 phy_disconnect(priv->phydev);
1110 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1115 val = enet_readl(priv, ENET_CTL_REG);
1116 val |= ENET_CTL_DISABLE_MASK;
1117 enet_writel(priv, val, ENET_CTL_REG);
1123 val = enet_readl(priv, ENET_CTL_REG);
1124 if (!(val & ENET_CTL_DISABLE_MASK))
1131 * disable dma in given channel
1133 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1137 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan));
1143 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan));
1144 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1153 static int bcm_enet_stop(struct net_device *dev)
1155 struct bcm_enet_priv *priv;
1156 struct device *kdev;
1159 priv = netdev_priv(dev);
1160 kdev = &priv->pdev->dev;
1162 netif_stop_queue(dev);
1163 napi_disable(&priv->napi);
1165 phy_stop(priv->phydev);
1166 del_timer_sync(&priv->rx_timeout);
1168 /* mask all interrupts */
1169 enet_writel(priv, 0, ENET_IRMASK_REG);
1170 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
1171 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
1173 /* make sure no mib update is scheduled */
1174 cancel_work_sync(&priv->mib_update_task);
1176 /* disable dma & mac */
1177 bcm_enet_disable_dma(priv, priv->tx_chan);
1178 bcm_enet_disable_dma(priv, priv->rx_chan);
1179 bcm_enet_disable_mac(priv);
1181 /* force reclaim of all tx buffers */
1182 bcm_enet_tx_reclaim(dev, 1);
1184 /* free the rx skb ring */
1185 for (i = 0; i < priv->rx_ring_size; i++) {
1186 struct bcm_enet_desc *desc;
1188 if (!priv->rx_skb[i])
1191 desc = &priv->rx_desc_cpu[i];
1192 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1194 kfree_skb(priv->rx_skb[i]);
1197 /* free remaining allocated memory */
1198 kfree(priv->rx_skb);
1199 kfree(priv->tx_skb);
1200 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1201 priv->rx_desc_cpu, priv->rx_desc_dma);
1202 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1203 priv->tx_desc_cpu, priv->tx_desc_dma);
1204 free_irq(priv->irq_tx, dev);
1205 free_irq(priv->irq_rx, dev);
1206 free_irq(dev->irq, dev);
1209 if (priv->has_phy) {
1210 phy_disconnect(priv->phydev);
1211 priv->phydev = NULL;
1220 struct bcm_enet_stats {
1221 char stat_string[ETH_GSTRING_LEN];
1227 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1228 offsetof(struct bcm_enet_priv, m)
1229 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
1230 offsetof(struct net_device_stats, m)
1232 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1233 { "rx_packets", DEV_STAT(rx_packets), -1 },
1234 { "tx_packets", DEV_STAT(tx_packets), -1 },
1235 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1236 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1237 { "rx_errors", DEV_STAT(rx_errors), -1 },
1238 { "tx_errors", DEV_STAT(tx_errors), -1 },
1239 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1240 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1242 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1243 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1244 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1245 { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1246 { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1247 { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1248 { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1249 { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1250 { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1251 { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1252 { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1253 { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1254 { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1255 { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1256 { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1257 { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1258 { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1259 { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1260 { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1261 { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1262 { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1264 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1265 { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1266 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1267 { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1268 { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1269 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1270 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1271 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1272 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1273 { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1274 { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1275 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1276 { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1277 { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1278 { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1279 { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1280 { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1281 { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1282 { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1283 { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1284 { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1285 { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1289 #define BCM_ENET_STATS_LEN \
1290 (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1292 static const u32 unused_mib_regs[] = {
1293 ETH_MIB_TX_ALL_OCTETS,
1294 ETH_MIB_TX_ALL_PKTS,
1295 ETH_MIB_RX_ALL_OCTETS,
1296 ETH_MIB_RX_ALL_PKTS,
1300 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1301 struct ethtool_drvinfo *drvinfo)
1303 strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1304 strlcpy(drvinfo->version, bcm_enet_driver_version,
1305 sizeof(drvinfo->version));
1306 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1307 strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1308 drvinfo->n_stats = BCM_ENET_STATS_LEN;
1311 static int bcm_enet_get_sset_count(struct net_device *netdev,
1314 switch (string_set) {
1316 return BCM_ENET_STATS_LEN;
1322 static void bcm_enet_get_strings(struct net_device *netdev,
1323 u32 stringset, u8 *data)
1327 switch (stringset) {
1329 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1330 memcpy(data + i * ETH_GSTRING_LEN,
1331 bcm_enet_gstrings_stats[i].stat_string,
1338 static void update_mib_counters(struct bcm_enet_priv *priv)
1342 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1343 const struct bcm_enet_stats *s;
1347 s = &bcm_enet_gstrings_stats[i];
1348 if (s->mib_reg == -1)
1351 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1352 p = (char *)priv + s->stat_offset;
1354 if (s->sizeof_stat == sizeof(u64))
1360 /* also empty unused mib counters to make sure mib counter
1361 * overflow interrupt is cleared */
1362 for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1363 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1366 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1368 struct bcm_enet_priv *priv;
1370 priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1371 mutex_lock(&priv->mib_update_lock);
1372 update_mib_counters(priv);
1373 mutex_unlock(&priv->mib_update_lock);
1375 /* reenable mib interrupt */
1376 if (netif_running(priv->net_dev))
1377 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1380 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1381 struct ethtool_stats *stats,
1384 struct bcm_enet_priv *priv;
1387 priv = netdev_priv(netdev);
1389 mutex_lock(&priv->mib_update_lock);
1390 update_mib_counters(priv);
1392 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1393 const struct bcm_enet_stats *s;
1396 s = &bcm_enet_gstrings_stats[i];
1397 if (s->mib_reg == -1)
1398 p = (char *)&netdev->stats;
1401 p += s->stat_offset;
1402 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1403 *(u64 *)p : *(u32 *)p;
1405 mutex_unlock(&priv->mib_update_lock);
1408 static int bcm_enet_nway_reset(struct net_device *dev)
1410 struct bcm_enet_priv *priv;
1412 priv = netdev_priv(dev);
1413 if (priv->has_phy) {
1416 return genphy_restart_aneg(priv->phydev);
1422 static int bcm_enet_get_settings(struct net_device *dev,
1423 struct ethtool_cmd *cmd)
1425 struct bcm_enet_priv *priv;
1427 priv = netdev_priv(dev);
1432 if (priv->has_phy) {
1435 return phy_ethtool_gset(priv->phydev, cmd);
1438 ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
1439 ? SPEED_100 : SPEED_10));
1440 cmd->duplex = (priv->force_duplex_full) ?
1441 DUPLEX_FULL : DUPLEX_HALF;
1442 cmd->supported = ADVERTISED_10baseT_Half |
1443 ADVERTISED_10baseT_Full |
1444 ADVERTISED_100baseT_Half |
1445 ADVERTISED_100baseT_Full;
1446 cmd->advertising = 0;
1447 cmd->port = PORT_MII;
1448 cmd->transceiver = XCVR_EXTERNAL;
1453 static int bcm_enet_set_settings(struct net_device *dev,
1454 struct ethtool_cmd *cmd)
1456 struct bcm_enet_priv *priv;
1458 priv = netdev_priv(dev);
1459 if (priv->has_phy) {
1462 return phy_ethtool_sset(priv->phydev, cmd);
1466 (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1467 cmd->port != PORT_MII)
1470 priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1471 priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1473 if (netif_running(dev))
1474 bcm_enet_adjust_link(dev);
1479 static void bcm_enet_get_ringparam(struct net_device *dev,
1480 struct ethtool_ringparam *ering)
1482 struct bcm_enet_priv *priv;
1484 priv = netdev_priv(dev);
1486 /* rx/tx ring is actually only limited by memory */
1487 ering->rx_max_pending = 8192;
1488 ering->tx_max_pending = 8192;
1489 ering->rx_pending = priv->rx_ring_size;
1490 ering->tx_pending = priv->tx_ring_size;
1493 static int bcm_enet_set_ringparam(struct net_device *dev,
1494 struct ethtool_ringparam *ering)
1496 struct bcm_enet_priv *priv;
1499 priv = netdev_priv(dev);
1502 if (netif_running(dev)) {
1507 priv->rx_ring_size = ering->rx_pending;
1508 priv->tx_ring_size = ering->tx_pending;
1513 err = bcm_enet_open(dev);
1517 bcm_enet_set_multicast_list(dev);
1522 static void bcm_enet_get_pauseparam(struct net_device *dev,
1523 struct ethtool_pauseparam *ecmd)
1525 struct bcm_enet_priv *priv;
1527 priv = netdev_priv(dev);
1528 ecmd->autoneg = priv->pause_auto;
1529 ecmd->rx_pause = priv->pause_rx;
1530 ecmd->tx_pause = priv->pause_tx;
1533 static int bcm_enet_set_pauseparam(struct net_device *dev,
1534 struct ethtool_pauseparam *ecmd)
1536 struct bcm_enet_priv *priv;
1538 priv = netdev_priv(dev);
1540 if (priv->has_phy) {
1541 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1542 /* asymetric pause mode not supported,
1543 * actually possible but integrated PHY has RO
1548 /* no pause autoneg on direct mii connection */
1553 priv->pause_auto = ecmd->autoneg;
1554 priv->pause_rx = ecmd->rx_pause;
1555 priv->pause_tx = ecmd->tx_pause;
1560 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1561 .get_strings = bcm_enet_get_strings,
1562 .get_sset_count = bcm_enet_get_sset_count,
1563 .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1564 .nway_reset = bcm_enet_nway_reset,
1565 .get_settings = bcm_enet_get_settings,
1566 .set_settings = bcm_enet_set_settings,
1567 .get_drvinfo = bcm_enet_get_drvinfo,
1568 .get_link = ethtool_op_get_link,
1569 .get_ringparam = bcm_enet_get_ringparam,
1570 .set_ringparam = bcm_enet_set_ringparam,
1571 .get_pauseparam = bcm_enet_get_pauseparam,
1572 .set_pauseparam = bcm_enet_set_pauseparam,
1575 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1577 struct bcm_enet_priv *priv;
1579 priv = netdev_priv(dev);
1580 if (priv->has_phy) {
1583 return phy_mii_ioctl(priv->phydev, rq, cmd);
1585 struct mii_if_info mii;
1588 mii.mdio_read = bcm_enet_mdio_read_mii;
1589 mii.mdio_write = bcm_enet_mdio_write_mii;
1591 mii.phy_id_mask = 0x3f;
1592 mii.reg_num_mask = 0x1f;
1593 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1598 * calculate actual hardware mtu
1600 static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1606 /* add ethernet header + vlan tag size */
1607 actual_mtu += VLAN_ETH_HLEN;
1609 if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
1613 * setup maximum size before we get overflow mark in
1614 * descriptor, note that this will not prevent reception of
1615 * big frames, they will be split into multiple buffers
1618 priv->hw_mtu = actual_mtu;
1621 * align rx buffer size to dma burst len, account FCS since
1624 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1625 priv->dma_maxburst * 4);
1630 * adjust mtu, can't be called while device is running
1632 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1636 if (netif_running(dev))
1639 ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
1647 * preinit hardware to allow mii operation while device is down
1649 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1654 /* make sure mac is disabled */
1655 bcm_enet_disable_mac(priv);
1657 /* soft reset mac */
1658 val = ENET_CTL_SRESET_MASK;
1659 enet_writel(priv, val, ENET_CTL_REG);
1664 val = enet_readl(priv, ENET_CTL_REG);
1665 if (!(val & ENET_CTL_SRESET_MASK))
1670 /* select correct mii interface */
1671 val = enet_readl(priv, ENET_CTL_REG);
1672 if (priv->use_external_mii)
1673 val |= ENET_CTL_EPHYSEL_MASK;
1675 val &= ~ENET_CTL_EPHYSEL_MASK;
1676 enet_writel(priv, val, ENET_CTL_REG);
1678 /* turn on mdc clock */
1679 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1680 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1682 /* set mib counters to self-clear when read */
1683 val = enet_readl(priv, ENET_MIBCTL_REG);
1684 val |= ENET_MIBCTL_RDCLEAR_MASK;
1685 enet_writel(priv, val, ENET_MIBCTL_REG);
1688 static const struct net_device_ops bcm_enet_ops = {
1689 .ndo_open = bcm_enet_open,
1690 .ndo_stop = bcm_enet_stop,
1691 .ndo_start_xmit = bcm_enet_start_xmit,
1692 .ndo_set_mac_address = bcm_enet_set_mac_address,
1693 .ndo_set_rx_mode = bcm_enet_set_multicast_list,
1694 .ndo_do_ioctl = bcm_enet_ioctl,
1695 .ndo_change_mtu = bcm_enet_change_mtu,
1696 #ifdef CONFIG_NET_POLL_CONTROLLER
1697 .ndo_poll_controller = bcm_enet_netpoll,
1702 * allocate netdevice, request register memory and register device.
1704 static int bcm_enet_probe(struct platform_device *pdev)
1706 struct bcm_enet_priv *priv;
1707 struct net_device *dev;
1708 struct bcm63xx_enet_platform_data *pd;
1709 struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1710 struct mii_bus *bus;
1711 const char *clk_name;
1714 /* stop if shared driver failed, assume driver->probe will be
1715 * called in the same order we register devices (correct ?) */
1716 if (!bcm_enet_shared_base[0])
1719 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1720 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1721 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1722 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1723 if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1727 dev = alloc_etherdev(sizeof(*priv));
1730 priv = netdev_priv(dev);
1732 priv->enet_is_sw = false;
1733 priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1735 ret = compute_hw_mtu(priv, dev->mtu);
1739 priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
1740 if (priv->base == NULL) {
1745 dev->irq = priv->irq = res_irq->start;
1746 priv->irq_rx = res_irq_rx->start;
1747 priv->irq_tx = res_irq_tx->start;
1748 priv->mac_id = pdev->id;
1750 /* get rx & tx dma channel id for this mac */
1751 if (priv->mac_id == 0) {
1761 priv->mac_clk = clk_get(&pdev->dev, clk_name);
1762 if (IS_ERR(priv->mac_clk)) {
1763 ret = PTR_ERR(priv->mac_clk);
1766 clk_prepare_enable(priv->mac_clk);
1768 /* initialize default and fetch platform data */
1769 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1770 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1772 pd = pdev->dev.platform_data;
1774 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1775 priv->has_phy = pd->has_phy;
1776 priv->phy_id = pd->phy_id;
1777 priv->has_phy_interrupt = pd->has_phy_interrupt;
1778 priv->phy_interrupt = pd->phy_interrupt;
1779 priv->use_external_mii = !pd->use_internal_phy;
1780 priv->pause_auto = pd->pause_auto;
1781 priv->pause_rx = pd->pause_rx;
1782 priv->pause_tx = pd->pause_tx;
1783 priv->force_duplex_full = pd->force_duplex_full;
1784 priv->force_speed_100 = pd->force_speed_100;
1787 if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1788 /* using internal PHY, enable clock */
1789 priv->phy_clk = clk_get(&pdev->dev, "ephy");
1790 if (IS_ERR(priv->phy_clk)) {
1791 ret = PTR_ERR(priv->phy_clk);
1792 priv->phy_clk = NULL;
1793 goto out_put_clk_mac;
1795 clk_prepare_enable(priv->phy_clk);
1798 /* do minimal hardware init to be able to probe mii bus */
1799 bcm_enet_hw_preinit(priv);
1801 /* MII bus registration */
1802 if (priv->has_phy) {
1804 priv->mii_bus = mdiobus_alloc();
1805 if (!priv->mii_bus) {
1810 bus = priv->mii_bus;
1811 bus->name = "bcm63xx_enet MII bus";
1812 bus->parent = &pdev->dev;
1814 bus->read = bcm_enet_mdio_read_phylib;
1815 bus->write = bcm_enet_mdio_write_phylib;
1816 sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
1818 /* only probe bus where we think the PHY is, because
1819 * the mdio read operation return 0 instead of 0xffff
1820 * if a slave is not present on hw */
1821 bus->phy_mask = ~(1 << priv->phy_id);
1823 bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
1830 if (priv->has_phy_interrupt)
1831 bus->irq[priv->phy_id] = priv->phy_interrupt;
1833 bus->irq[priv->phy_id] = PHY_POLL;
1835 ret = mdiobus_register(bus);
1837 dev_err(&pdev->dev, "unable to register mdio bus\n");
1842 /* run platform code to initialize PHY device */
1843 if (pd->mii_config &&
1844 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1845 bcm_enet_mdio_write_mii)) {
1846 dev_err(&pdev->dev, "unable to configure mdio bus\n");
1851 spin_lock_init(&priv->rx_lock);
1853 /* init rx timeout (used for oom) */
1854 init_timer(&priv->rx_timeout);
1855 priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1856 priv->rx_timeout.data = (unsigned long)dev;
1858 /* init the mib update lock&work */
1859 mutex_init(&priv->mib_update_lock);
1860 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1862 /* zero mib counters */
1863 for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1864 enet_writel(priv, 0, ENET_MIB_REG(i));
1866 /* register netdevice */
1867 dev->netdev_ops = &bcm_enet_ops;
1868 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1870 SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1871 SET_NETDEV_DEV(dev, &pdev->dev);
1873 ret = register_netdev(dev);
1875 goto out_unregister_mdio;
1877 netif_carrier_off(dev);
1878 platform_set_drvdata(pdev, dev);
1880 priv->net_dev = dev;
1884 out_unregister_mdio:
1886 mdiobus_unregister(priv->mii_bus);
1890 mdiobus_free(priv->mii_bus);
1893 /* turn off mdc clock */
1894 enet_writel(priv, 0, ENET_MIISC_REG);
1895 if (priv->phy_clk) {
1896 clk_disable_unprepare(priv->phy_clk);
1897 clk_put(priv->phy_clk);
1901 clk_disable_unprepare(priv->mac_clk);
1902 clk_put(priv->mac_clk);
1910 * exit func, stops hardware and unregisters netdevice
1912 static int bcm_enet_remove(struct platform_device *pdev)
1914 struct bcm_enet_priv *priv;
1915 struct net_device *dev;
1917 /* stop netdevice */
1918 dev = platform_get_drvdata(pdev);
1919 priv = netdev_priv(dev);
1920 unregister_netdev(dev);
1922 /* turn off mdc clock */
1923 enet_writel(priv, 0, ENET_MIISC_REG);
1925 if (priv->has_phy) {
1926 mdiobus_unregister(priv->mii_bus);
1927 mdiobus_free(priv->mii_bus);
1929 struct bcm63xx_enet_platform_data *pd;
1931 pd = pdev->dev.platform_data;
1932 if (pd && pd->mii_config)
1933 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1934 bcm_enet_mdio_write_mii);
1937 /* disable hw block clocks */
1938 if (priv->phy_clk) {
1939 clk_disable_unprepare(priv->phy_clk);
1940 clk_put(priv->phy_clk);
1942 clk_disable_unprepare(priv->mac_clk);
1943 clk_put(priv->mac_clk);
1949 struct platform_driver bcm63xx_enet_driver = {
1950 .probe = bcm_enet_probe,
1951 .remove = bcm_enet_remove,
1953 .name = "bcm63xx_enet",
1954 .owner = THIS_MODULE,
1959 * switch mii access callbacks
1961 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1962 int ext, int phy_id, int location)
1967 spin_lock_bh(&priv->enetsw_mdio_lock);
1968 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1970 reg = ENETSW_MDIOC_RD_MASK |
1971 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1972 (location << ENETSW_MDIOC_REG_SHIFT);
1975 reg |= ENETSW_MDIOC_EXT_MASK;
1977 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1979 ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1980 spin_unlock_bh(&priv->enetsw_mdio_lock);
1984 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1985 int ext, int phy_id, int location,
1990 spin_lock_bh(&priv->enetsw_mdio_lock);
1991 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1993 reg = ENETSW_MDIOC_WR_MASK |
1994 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1995 (location << ENETSW_MDIOC_REG_SHIFT);
1998 reg |= ENETSW_MDIOC_EXT_MASK;
2002 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2004 spin_unlock_bh(&priv->enetsw_mdio_lock);
2007 static inline int bcm_enet_port_is_rgmii(int portid)
2009 return portid >= ENETSW_RGMII_PORT0;
2013 * enet sw PHY polling
2015 static void swphy_poll_timer(unsigned long data)
2017 struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
2020 for (i = 0; i < priv->num_ports; i++) {
2021 struct bcm63xx_enetsw_port *port;
2022 int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
2023 int external_phy = bcm_enet_port_is_rgmii(i);
2026 port = &priv->used_ports[i];
2030 if (port->bypass_link)
2033 /* dummy read to clear */
2034 for (j = 0; j < 2; j++)
2035 val = bcmenet_sw_mdio_read(priv, external_phy,
2036 port->phy_id, MII_BMSR);
2041 up = (val & BMSR_LSTATUS) ? 1 : 0;
2042 if (!(up ^ priv->sw_port_link[i]))
2045 priv->sw_port_link[i] = up;
2049 dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2051 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2052 ENETSW_PORTOV_REG(i));
2053 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2054 ENETSW_PTCTRL_TXDIS_MASK,
2055 ENETSW_PTCTRL_REG(i));
2059 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2060 port->phy_id, MII_ADVERTISE);
2062 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2065 lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2068 /* figure out media and duplex from advertise and LPA values */
2069 media = mii_nway_result(lpa & advertise);
2070 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2071 if (lpa2 & LPA_1000FULL)
2074 if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
2077 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2083 dev_info(&priv->pdev->dev,
2084 "link UP on %s, %dMbps, %s-duplex\n",
2085 port->name, speed, duplex ? "full" : "half");
2087 override = ENETSW_PORTOV_ENABLE_MASK |
2088 ENETSW_PORTOV_LINKUP_MASK;
2091 override |= ENETSW_IMPOV_1000_MASK;
2092 else if (speed == 100)
2093 override |= ENETSW_IMPOV_100_MASK;
2095 override |= ENETSW_IMPOV_FDX_MASK;
2097 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2098 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2101 priv->swphy_poll.expires = jiffies + HZ;
2102 add_timer(&priv->swphy_poll);
2106 * open callback, allocate dma rings & buffers and start rx operation
2108 static int bcm_enetsw_open(struct net_device *dev)
2110 struct bcm_enet_priv *priv;
2111 struct device *kdev;
2117 priv = netdev_priv(dev);
2118 kdev = &priv->pdev->dev;
2120 /* mask all interrupts and request them */
2121 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
2122 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
2124 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2125 IRQF_DISABLED, dev->name, dev);
2129 if (priv->irq_tx != -1) {
2130 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2131 IRQF_DISABLED, dev->name, dev);
2133 goto out_freeirq_rx;
2136 /* allocate rx dma ring */
2137 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2138 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2140 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2142 goto out_freeirq_tx;
2146 priv->rx_desc_alloc_size = size;
2147 priv->rx_desc_cpu = p;
2149 /* allocate tx dma ring */
2150 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2151 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2153 dev_err(kdev, "cannot allocate tx ring\n");
2155 goto out_free_rx_ring;
2159 priv->tx_desc_alloc_size = size;
2160 priv->tx_desc_cpu = p;
2162 priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
2164 if (!priv->tx_skb) {
2165 dev_err(kdev, "cannot allocate rx skb queue\n");
2167 goto out_free_tx_ring;
2170 priv->tx_desc_count = priv->tx_ring_size;
2171 priv->tx_dirty_desc = 0;
2172 priv->tx_curr_desc = 0;
2173 spin_lock_init(&priv->tx_lock);
2175 /* init & fill rx ring with skbs */
2176 priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
2178 if (!priv->rx_skb) {
2179 dev_err(kdev, "cannot allocate rx skb queue\n");
2181 goto out_free_tx_skb;
2184 priv->rx_desc_count = 0;
2185 priv->rx_dirty_desc = 0;
2186 priv->rx_curr_desc = 0;
2188 /* disable all ports */
2189 for (i = 0; i < priv->num_ports; i++) {
2190 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2191 ENETSW_PORTOV_REG(i));
2192 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2193 ENETSW_PTCTRL_TXDIS_MASK,
2194 ENETSW_PTCTRL_REG(i));
2196 priv->sw_port_link[i] = 0;
2200 val = enetsw_readb(priv, ENETSW_GMCR_REG);
2201 val |= ENETSW_GMCR_RST_MIB_MASK;
2202 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2204 val &= ~ENETSW_GMCR_RST_MIB_MASK;
2205 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2208 /* force CPU port state */
2209 val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2210 val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2211 enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2213 /* enable switch forward engine */
2214 val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2215 val |= ENETSW_SWMODE_FWD_EN_MASK;
2216 enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2218 /* enable jumbo on all ports */
2219 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2220 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2222 /* initialize flow control buffer allocation */
2223 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2224 ENETDMA_BUFALLOC_REG(priv->rx_chan));
2226 if (bcm_enet_refill_rx(dev)) {
2227 dev_err(kdev, "cannot allocate rx skb queue\n");
2232 /* write rx & tx ring addresses */
2233 enet_dmas_writel(priv, priv->rx_desc_dma,
2234 ENETDMAS_RSTART_REG(priv->rx_chan));
2235 enet_dmas_writel(priv, priv->tx_desc_dma,
2236 ENETDMAS_RSTART_REG(priv->tx_chan));
2238 /* clear remaining state ram for rx & tx channel */
2239 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
2240 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
2241 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
2242 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
2243 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
2244 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
2246 /* set dma maximum burst len */
2247 enet_dmac_writel(priv, priv->dma_maxburst,
2248 ENETDMAC_MAXBURST_REG(priv->rx_chan));
2249 enet_dmac_writel(priv, priv->dma_maxburst,
2250 ENETDMAC_MAXBURST_REG(priv->tx_chan));
2252 /* set flow control low/high threshold to 1/3 / 2/3 */
2253 val = priv->rx_ring_size / 3;
2254 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2255 val = (priv->rx_ring_size * 2) / 3;
2256 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2258 /* all set, enable mac and interrupts, start dma engine and
2259 * kick rx dma channel
2262 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2263 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2264 ENETDMAC_CHANCFG_REG(priv->rx_chan));
2266 /* watch "packet transferred" interrupt in rx and tx */
2267 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2268 ENETDMAC_IR_REG(priv->rx_chan));
2269 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2270 ENETDMAC_IR_REG(priv->tx_chan));
2272 /* make sure we enable napi before rx interrupt */
2273 napi_enable(&priv->napi);
2275 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2276 ENETDMAC_IRMASK_REG(priv->rx_chan));
2277 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2278 ENETDMAC_IRMASK_REG(priv->tx_chan));
2280 netif_carrier_on(dev);
2281 netif_start_queue(dev);
2283 /* apply override config for bypass_link ports here. */
2284 for (i = 0; i < priv->num_ports; i++) {
2285 struct bcm63xx_enetsw_port *port;
2287 port = &priv->used_ports[i];
2291 if (!port->bypass_link)
2294 override = ENETSW_PORTOV_ENABLE_MASK |
2295 ENETSW_PORTOV_LINKUP_MASK;
2297 switch (port->force_speed) {
2299 override |= ENETSW_IMPOV_1000_MASK;
2302 override |= ENETSW_IMPOV_100_MASK;
2307 pr_warn("invalid forced speed on port %s: assume 10\n",
2312 if (port->force_duplex_full)
2313 override |= ENETSW_IMPOV_FDX_MASK;
2316 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2317 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2320 /* start phy polling timer */
2321 init_timer(&priv->swphy_poll);
2322 priv->swphy_poll.function = swphy_poll_timer;
2323 priv->swphy_poll.data = (unsigned long)priv;
2324 priv->swphy_poll.expires = jiffies;
2325 add_timer(&priv->swphy_poll);
2329 for (i = 0; i < priv->rx_ring_size; i++) {
2330 struct bcm_enet_desc *desc;
2332 if (!priv->rx_skb[i])
2335 desc = &priv->rx_desc_cpu[i];
2336 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2338 kfree_skb(priv->rx_skb[i]);
2340 kfree(priv->rx_skb);
2343 kfree(priv->tx_skb);
2346 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2347 priv->tx_desc_cpu, priv->tx_desc_dma);
2350 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2351 priv->rx_desc_cpu, priv->rx_desc_dma);
2354 if (priv->irq_tx != -1)
2355 free_irq(priv->irq_tx, dev);
2358 free_irq(priv->irq_rx, dev);
2365 static int bcm_enetsw_stop(struct net_device *dev)
2367 struct bcm_enet_priv *priv;
2368 struct device *kdev;
2371 priv = netdev_priv(dev);
2372 kdev = &priv->pdev->dev;
2374 del_timer_sync(&priv->swphy_poll);
2375 netif_stop_queue(dev);
2376 napi_disable(&priv->napi);
2377 del_timer_sync(&priv->rx_timeout);
2379 /* mask all interrupts */
2380 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
2381 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
2383 /* disable dma & mac */
2384 bcm_enet_disable_dma(priv, priv->tx_chan);
2385 bcm_enet_disable_dma(priv, priv->rx_chan);
2387 /* force reclaim of all tx buffers */
2388 bcm_enet_tx_reclaim(dev, 1);
2390 /* free the rx skb ring */
2391 for (i = 0; i < priv->rx_ring_size; i++) {
2392 struct bcm_enet_desc *desc;
2394 if (!priv->rx_skb[i])
2397 desc = &priv->rx_desc_cpu[i];
2398 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2400 kfree_skb(priv->rx_skb[i]);
2403 /* free remaining allocated memory */
2404 kfree(priv->rx_skb);
2405 kfree(priv->tx_skb);
2406 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2407 priv->rx_desc_cpu, priv->rx_desc_dma);
2408 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2409 priv->tx_desc_cpu, priv->tx_desc_dma);
2410 if (priv->irq_tx != -1)
2411 free_irq(priv->irq_tx, dev);
2412 free_irq(priv->irq_rx, dev);
2417 /* try to sort out phy external status by walking the used_port field
2418 * in the bcm_enet_priv structure. in case the phy address is not
2419 * assigned to any physical port on the switch, assume it is external
2420 * (and yell at the user).
2422 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2426 for (i = 0; i < priv->num_ports; ++i) {
2427 if (!priv->used_ports[i].used)
2429 if (priv->used_ports[i].phy_id == phy_id)
2430 return bcm_enet_port_is_rgmii(i);
2433 printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2438 /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2439 * external/internal status of the given phy_id first.
2441 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2444 struct bcm_enet_priv *priv;
2446 priv = netdev_priv(dev);
2447 return bcmenet_sw_mdio_read(priv,
2448 bcm_enetsw_phy_is_external(priv, phy_id),
2452 /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2453 * external/internal status of the given phy_id first.
2455 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2459 struct bcm_enet_priv *priv;
2461 priv = netdev_priv(dev);
2462 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2463 phy_id, location, val);
2466 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2468 struct mii_if_info mii;
2471 mii.mdio_read = bcm_enetsw_mii_mdio_read;
2472 mii.mdio_write = bcm_enetsw_mii_mdio_write;
2474 mii.phy_id_mask = 0x3f;
2475 mii.reg_num_mask = 0x1f;
2476 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2480 static const struct net_device_ops bcm_enetsw_ops = {
2481 .ndo_open = bcm_enetsw_open,
2482 .ndo_stop = bcm_enetsw_stop,
2483 .ndo_start_xmit = bcm_enet_start_xmit,
2484 .ndo_change_mtu = bcm_enet_change_mtu,
2485 .ndo_do_ioctl = bcm_enetsw_ioctl,
2489 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2490 { "rx_packets", DEV_STAT(rx_packets), -1 },
2491 { "tx_packets", DEV_STAT(tx_packets), -1 },
2492 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2493 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2494 { "rx_errors", DEV_STAT(rx_errors), -1 },
2495 { "tx_errors", DEV_STAT(tx_errors), -1 },
2496 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2497 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2499 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2500 { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2501 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2502 { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2503 { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2504 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2505 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2506 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2507 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2508 { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2509 ETHSW_MIB_RX_1024_1522 },
2510 { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2511 ETHSW_MIB_RX_1523_2047 },
2512 { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2513 ETHSW_MIB_RX_2048_4095 },
2514 { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2515 ETHSW_MIB_RX_4096_8191 },
2516 { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2517 ETHSW_MIB_RX_8192_9728 },
2518 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2519 { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2520 { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2521 { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2522 { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2524 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2525 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2526 { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2527 { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2528 { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2529 { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2533 #define BCM_ENETSW_STATS_LEN \
2534 (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2536 static void bcm_enetsw_get_strings(struct net_device *netdev,
2537 u32 stringset, u8 *data)
2541 switch (stringset) {
2543 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2544 memcpy(data + i * ETH_GSTRING_LEN,
2545 bcm_enetsw_gstrings_stats[i].stat_string,
2552 static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2555 switch (string_set) {
2557 return BCM_ENETSW_STATS_LEN;
2563 static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2564 struct ethtool_drvinfo *drvinfo)
2566 strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
2567 strncpy(drvinfo->version, bcm_enet_driver_version, 32);
2568 strncpy(drvinfo->fw_version, "N/A", 32);
2569 strncpy(drvinfo->bus_info, "bcm63xx", 32);
2570 drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
2573 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2574 struct ethtool_stats *stats,
2577 struct bcm_enet_priv *priv;
2580 priv = netdev_priv(netdev);
2582 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2583 const struct bcm_enet_stats *s;
2588 s = &bcm_enetsw_gstrings_stats[i];
2594 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2595 p = (char *)priv + s->stat_offset;
2597 if (s->sizeof_stat == sizeof(u64)) {
2598 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2599 *(u64 *)p = ((u64)hi << 32 | lo);
2605 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2606 const struct bcm_enet_stats *s;
2609 s = &bcm_enetsw_gstrings_stats[i];
2611 if (s->mib_reg == -1)
2612 p = (char *)&netdev->stats + s->stat_offset;
2614 p = (char *)priv + s->stat_offset;
2616 data[i] = (s->sizeof_stat == sizeof(u64)) ?
2617 *(u64 *)p : *(u32 *)p;
2621 static void bcm_enetsw_get_ringparam(struct net_device *dev,
2622 struct ethtool_ringparam *ering)
2624 struct bcm_enet_priv *priv;
2626 priv = netdev_priv(dev);
2628 /* rx/tx ring is actually only limited by memory */
2629 ering->rx_max_pending = 8192;
2630 ering->tx_max_pending = 8192;
2631 ering->rx_mini_max_pending = 0;
2632 ering->rx_jumbo_max_pending = 0;
2633 ering->rx_pending = priv->rx_ring_size;
2634 ering->tx_pending = priv->tx_ring_size;
2637 static int bcm_enetsw_set_ringparam(struct net_device *dev,
2638 struct ethtool_ringparam *ering)
2640 struct bcm_enet_priv *priv;
2643 priv = netdev_priv(dev);
2646 if (netif_running(dev)) {
2647 bcm_enetsw_stop(dev);
2651 priv->rx_ring_size = ering->rx_pending;
2652 priv->tx_ring_size = ering->tx_pending;
2657 err = bcm_enetsw_open(dev);
2664 static struct ethtool_ops bcm_enetsw_ethtool_ops = {
2665 .get_strings = bcm_enetsw_get_strings,
2666 .get_sset_count = bcm_enetsw_get_sset_count,
2667 .get_ethtool_stats = bcm_enetsw_get_ethtool_stats,
2668 .get_drvinfo = bcm_enetsw_get_drvinfo,
2669 .get_ringparam = bcm_enetsw_get_ringparam,
2670 .set_ringparam = bcm_enetsw_set_ringparam,
2673 /* allocate netdevice, request register memory and register device. */
2674 static int bcm_enetsw_probe(struct platform_device *pdev)
2676 struct bcm_enet_priv *priv;
2677 struct net_device *dev;
2678 struct bcm63xx_enetsw_platform_data *pd;
2679 struct resource *res_mem;
2680 int ret, irq_rx, irq_tx;
2682 /* stop if shared driver failed, assume driver->probe will be
2683 * called in the same order we register devices (correct ?)
2685 if (!bcm_enet_shared_base[0])
2688 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2689 irq_rx = platform_get_irq(pdev, 0);
2690 irq_tx = platform_get_irq(pdev, 1);
2691 if (!res_mem || irq_rx < 0)
2695 dev = alloc_etherdev(sizeof(*priv));
2698 priv = netdev_priv(dev);
2699 memset(priv, 0, sizeof(*priv));
2701 /* initialize default and fetch platform data */
2702 priv->enet_is_sw = true;
2703 priv->irq_rx = irq_rx;
2704 priv->irq_tx = irq_tx;
2705 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2706 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2707 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2709 pd = pdev->dev.platform_data;
2711 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2712 memcpy(priv->used_ports, pd->used_ports,
2713 sizeof(pd->used_ports));
2714 priv->num_ports = pd->num_ports;
2717 ret = compute_hw_mtu(priv, dev->mtu);
2721 if (!request_mem_region(res_mem->start, resource_size(res_mem),
2722 "bcm63xx_enetsw")) {
2727 priv->base = ioremap(res_mem->start, resource_size(res_mem));
2728 if (priv->base == NULL) {
2730 goto out_release_mem;
2733 priv->mac_clk = clk_get(&pdev->dev, "enetsw");
2734 if (IS_ERR(priv->mac_clk)) {
2735 ret = PTR_ERR(priv->mac_clk);
2738 clk_enable(priv->mac_clk);
2742 spin_lock_init(&priv->rx_lock);
2744 /* init rx timeout (used for oom) */
2745 init_timer(&priv->rx_timeout);
2746 priv->rx_timeout.function = bcm_enet_refill_rx_timer;
2747 priv->rx_timeout.data = (unsigned long)dev;
2749 /* register netdevice */
2750 dev->netdev_ops = &bcm_enetsw_ops;
2751 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2752 SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
2753 SET_NETDEV_DEV(dev, &pdev->dev);
2755 spin_lock_init(&priv->enetsw_mdio_lock);
2757 ret = register_netdev(dev);
2761 netif_carrier_off(dev);
2762 platform_set_drvdata(pdev, dev);
2764 priv->net_dev = dev;
2769 clk_put(priv->mac_clk);
2772 iounmap(priv->base);
2775 release_mem_region(res_mem->start, resource_size(res_mem));
2782 /* exit func, stops hardware and unregisters netdevice */
2783 static int bcm_enetsw_remove(struct platform_device *pdev)
2785 struct bcm_enet_priv *priv;
2786 struct net_device *dev;
2787 struct resource *res;
2789 /* stop netdevice */
2790 dev = platform_get_drvdata(pdev);
2791 priv = netdev_priv(dev);
2792 unregister_netdev(dev);
2794 /* release device resources */
2795 iounmap(priv->base);
2796 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2797 release_mem_region(res->start, resource_size(res));
2799 platform_set_drvdata(pdev, NULL);
2804 struct platform_driver bcm63xx_enetsw_driver = {
2805 .probe = bcm_enetsw_probe,
2806 .remove = bcm_enetsw_remove,
2808 .name = "bcm63xx_enetsw",
2809 .owner = THIS_MODULE,
2813 /* reserve & remap memory space shared between all macs */
2814 static int bcm_enet_shared_probe(struct platform_device *pdev)
2816 struct resource *res;
2820 memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2822 for (i = 0; i < 3; i++) {
2823 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
2824 p[i] = devm_ioremap_resource(&pdev->dev, res);
2829 memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2834 static int bcm_enet_shared_remove(struct platform_device *pdev)
2839 /* this "shared" driver is needed because both macs share a single
2842 struct platform_driver bcm63xx_enet_shared_driver = {
2843 .probe = bcm_enet_shared_probe,
2844 .remove = bcm_enet_shared_remove,
2846 .name = "bcm63xx_enet_shared",
2847 .owner = THIS_MODULE,
2852 static int __init bcm_enet_init(void)
2856 ret = platform_driver_register(&bcm63xx_enet_shared_driver);
2860 ret = platform_driver_register(&bcm63xx_enet_driver);
2862 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2864 ret = platform_driver_register(&bcm63xx_enetsw_driver);
2866 platform_driver_unregister(&bcm63xx_enet_driver);
2867 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2873 static void __exit bcm_enet_exit(void)
2875 platform_driver_unregister(&bcm63xx_enet_driver);
2876 platform_driver_unregister(&bcm63xx_enetsw_driver);
2877 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2881 module_init(bcm_enet_init);
2882 module_exit(bcm_enet_exit);
2884 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2885 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2886 MODULE_LICENSE("GPL");