]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/bcm63xx_enet.c
fbbfc4acd53f8d0e2f6c60d6a81149dd0683f02e
[linux.git] / drivers / net / ethernet / broadcom / bcm63xx_enet.c
1 /*
2  * Driver for BCM963xx builtin Ethernet mac
3  *
4  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/clk.h>
24 #include <linux/etherdevice.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/ethtool.h>
28 #include <linux/crc32.h>
29 #include <linux/err.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/if_vlan.h>
33
34 #include <bcm63xx_dev_enet.h>
35 #include "bcm63xx_enet.h"
36
37 static char bcm_enet_driver_name[] = "bcm63xx_enet";
38 static char bcm_enet_driver_version[] = "1.0";
39
40 static int copybreak __read_mostly = 128;
41 module_param(copybreak, int, 0);
42 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
43
44 /* io registers memory shared between all devices */
45 static void __iomem *bcm_enet_shared_base[3];
46
47 /*
48  * io helpers to access mac registers
49  */
50 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
51 {
52         return bcm_readl(priv->base + off);
53 }
54
55 static inline void enet_writel(struct bcm_enet_priv *priv,
56                                u32 val, u32 off)
57 {
58         bcm_writel(val, priv->base + off);
59 }
60
61 /*
62  * io helpers to access switch registers
63  */
64 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
65 {
66         return bcm_readl(priv->base + off);
67 }
68
69 static inline void enetsw_writel(struct bcm_enet_priv *priv,
70                                  u32 val, u32 off)
71 {
72         bcm_writel(val, priv->base + off);
73 }
74
75 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
76 {
77         return bcm_readw(priv->base + off);
78 }
79
80 static inline void enetsw_writew(struct bcm_enet_priv *priv,
81                                  u16 val, u32 off)
82 {
83         bcm_writew(val, priv->base + off);
84 }
85
86 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
87 {
88         return bcm_readb(priv->base + off);
89 }
90
91 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
92                                  u8 val, u32 off)
93 {
94         bcm_writeb(val, priv->base + off);
95 }
96
97
98 /* io helpers to access shared registers */
99 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
100 {
101         return bcm_readl(bcm_enet_shared_base[0] + off);
102 }
103
104 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
105                                        u32 val, u32 off)
106 {
107         bcm_writel(val, bcm_enet_shared_base[0] + off);
108 }
109
110 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off)
111 {
112         return bcm_readl(bcm_enet_shared_base[1] + off);
113 }
114
115 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
116                                        u32 val, u32 off)
117 {
118         bcm_writel(val, bcm_enet_shared_base[1] + off);
119 }
120
121 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off)
122 {
123         return bcm_readl(bcm_enet_shared_base[2] + off);
124 }
125
126 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
127                                        u32 val, u32 off)
128 {
129         bcm_writel(val, bcm_enet_shared_base[2] + off);
130 }
131
132 /*
133  * write given data into mii register and wait for transfer to end
134  * with timeout (average measured transfer time is 25us)
135  */
136 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
137 {
138         int limit;
139
140         /* make sure mii interrupt status is cleared */
141         enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
142
143         enet_writel(priv, data, ENET_MIIDATA_REG);
144         wmb();
145
146         /* busy wait on mii interrupt bit, with timeout */
147         limit = 1000;
148         do {
149                 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
150                         break;
151                 udelay(1);
152         } while (limit-- > 0);
153
154         return (limit < 0) ? 1 : 0;
155 }
156
157 /*
158  * MII internal read callback
159  */
160 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
161                               int regnum)
162 {
163         u32 tmp, val;
164
165         tmp = regnum << ENET_MIIDATA_REG_SHIFT;
166         tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
167         tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
168         tmp |= ENET_MIIDATA_OP_READ_MASK;
169
170         if (do_mdio_op(priv, tmp))
171                 return -1;
172
173         val = enet_readl(priv, ENET_MIIDATA_REG);
174         val &= 0xffff;
175         return val;
176 }
177
178 /*
179  * MII internal write callback
180  */
181 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
182                                int regnum, u16 value)
183 {
184         u32 tmp;
185
186         tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
187         tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
188         tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
189         tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
190         tmp |= ENET_MIIDATA_OP_WRITE_MASK;
191
192         (void)do_mdio_op(priv, tmp);
193         return 0;
194 }
195
196 /*
197  * MII read callback from phylib
198  */
199 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
200                                      int regnum)
201 {
202         return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
203 }
204
205 /*
206  * MII write callback from phylib
207  */
208 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
209                                       int regnum, u16 value)
210 {
211         return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
212 }
213
214 /*
215  * MII read callback from mii core
216  */
217 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
218                                   int regnum)
219 {
220         return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
221 }
222
223 /*
224  * MII write callback from mii core
225  */
226 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
227                                     int regnum, int value)
228 {
229         bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
230 }
231
232 /*
233  * refill rx queue
234  */
235 static int bcm_enet_refill_rx(struct net_device *dev)
236 {
237         struct bcm_enet_priv *priv;
238
239         priv = netdev_priv(dev);
240
241         while (priv->rx_desc_count < priv->rx_ring_size) {
242                 struct bcm_enet_desc *desc;
243                 struct sk_buff *skb;
244                 dma_addr_t p;
245                 int desc_idx;
246                 u32 len_stat;
247
248                 desc_idx = priv->rx_dirty_desc;
249                 desc = &priv->rx_desc_cpu[desc_idx];
250
251                 if (!priv->rx_skb[desc_idx]) {
252                         skb = netdev_alloc_skb(dev, priv->rx_skb_size);
253                         if (!skb)
254                                 break;
255                         priv->rx_skb[desc_idx] = skb;
256                         p = dma_map_single(&priv->pdev->dev, skb->data,
257                                            priv->rx_skb_size,
258                                            DMA_FROM_DEVICE);
259                         desc->address = p;
260                 }
261
262                 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
263                 len_stat |= DMADESC_OWNER_MASK;
264                 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
265                         len_stat |= DMADESC_WRAP_MASK;
266                         priv->rx_dirty_desc = 0;
267                 } else {
268                         priv->rx_dirty_desc++;
269                 }
270                 wmb();
271                 desc->len_stat = len_stat;
272
273                 priv->rx_desc_count++;
274
275                 /* tell dma engine we allocated one buffer */
276                 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
277         }
278
279         /* If rx ring is still empty, set a timer to try allocating
280          * again at a later time. */
281         if (priv->rx_desc_count == 0 && netif_running(dev)) {
282                 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
283                 priv->rx_timeout.expires = jiffies + HZ;
284                 add_timer(&priv->rx_timeout);
285         }
286
287         return 0;
288 }
289
290 /*
291  * timer callback to defer refill rx queue in case we're OOM
292  */
293 static void bcm_enet_refill_rx_timer(unsigned long data)
294 {
295         struct net_device *dev;
296         struct bcm_enet_priv *priv;
297
298         dev = (struct net_device *)data;
299         priv = netdev_priv(dev);
300
301         spin_lock(&priv->rx_lock);
302         bcm_enet_refill_rx((struct net_device *)data);
303         spin_unlock(&priv->rx_lock);
304 }
305
306 /*
307  * extract packet from rx queue
308  */
309 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
310 {
311         struct bcm_enet_priv *priv;
312         struct device *kdev;
313         int processed;
314
315         priv = netdev_priv(dev);
316         kdev = &priv->pdev->dev;
317         processed = 0;
318
319         /* don't scan ring further than number of refilled
320          * descriptor */
321         if (budget > priv->rx_desc_count)
322                 budget = priv->rx_desc_count;
323
324         do {
325                 struct bcm_enet_desc *desc;
326                 struct sk_buff *skb;
327                 int desc_idx;
328                 u32 len_stat;
329                 unsigned int len;
330
331                 desc_idx = priv->rx_curr_desc;
332                 desc = &priv->rx_desc_cpu[desc_idx];
333
334                 /* make sure we actually read the descriptor status at
335                  * each loop */
336                 rmb();
337
338                 len_stat = desc->len_stat;
339
340                 /* break if dma ownership belongs to hw */
341                 if (len_stat & DMADESC_OWNER_MASK)
342                         break;
343
344                 processed++;
345                 priv->rx_curr_desc++;
346                 if (priv->rx_curr_desc == priv->rx_ring_size)
347                         priv->rx_curr_desc = 0;
348                 priv->rx_desc_count--;
349
350                 /* if the packet does not have start of packet _and_
351                  * end of packet flag set, then just recycle it */
352                 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
353                         dev->stats.rx_dropped++;
354                         continue;
355                 }
356
357                 /* recycle packet if it's marked as bad */
358                 if (!priv->enet_is_sw &&
359                     unlikely(len_stat & DMADESC_ERR_MASK)) {
360                         dev->stats.rx_errors++;
361
362                         if (len_stat & DMADESC_OVSIZE_MASK)
363                                 dev->stats.rx_length_errors++;
364                         if (len_stat & DMADESC_CRC_MASK)
365                                 dev->stats.rx_crc_errors++;
366                         if (len_stat & DMADESC_UNDER_MASK)
367                                 dev->stats.rx_frame_errors++;
368                         if (len_stat & DMADESC_OV_MASK)
369                                 dev->stats.rx_fifo_errors++;
370                         continue;
371                 }
372
373                 /* valid packet */
374                 skb = priv->rx_skb[desc_idx];
375                 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
376                 /* don't include FCS */
377                 len -= 4;
378
379                 if (len < copybreak) {
380                         struct sk_buff *nskb;
381
382                         nskb = netdev_alloc_skb_ip_align(dev, len);
383                         if (!nskb) {
384                                 /* forget packet, just rearm desc */
385                                 dev->stats.rx_dropped++;
386                                 continue;
387                         }
388
389                         dma_sync_single_for_cpu(kdev, desc->address,
390                                                 len, DMA_FROM_DEVICE);
391                         memcpy(nskb->data, skb->data, len);
392                         dma_sync_single_for_device(kdev, desc->address,
393                                                    len, DMA_FROM_DEVICE);
394                         skb = nskb;
395                 } else {
396                         dma_unmap_single(&priv->pdev->dev, desc->address,
397                                          priv->rx_skb_size, DMA_FROM_DEVICE);
398                         priv->rx_skb[desc_idx] = NULL;
399                 }
400
401                 skb_put(skb, len);
402                 skb->protocol = eth_type_trans(skb, dev);
403                 dev->stats.rx_packets++;
404                 dev->stats.rx_bytes += len;
405                 netif_receive_skb(skb);
406
407         } while (--budget > 0);
408
409         if (processed || !priv->rx_desc_count) {
410                 bcm_enet_refill_rx(dev);
411
412                 /* kick rx dma */
413                 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
414                                  ENETDMAC_CHANCFG_REG(priv->rx_chan));
415         }
416
417         return processed;
418 }
419
420
421 /*
422  * try to or force reclaim of transmitted buffers
423  */
424 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
425 {
426         struct bcm_enet_priv *priv;
427         int released;
428
429         priv = netdev_priv(dev);
430         released = 0;
431
432         while (priv->tx_desc_count < priv->tx_ring_size) {
433                 struct bcm_enet_desc *desc;
434                 struct sk_buff *skb;
435
436                 /* We run in a bh and fight against start_xmit, which
437                  * is called with bh disabled  */
438                 spin_lock(&priv->tx_lock);
439
440                 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
441
442                 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
443                         spin_unlock(&priv->tx_lock);
444                         break;
445                 }
446
447                 /* ensure other field of the descriptor were not read
448                  * before we checked ownership */
449                 rmb();
450
451                 skb = priv->tx_skb[priv->tx_dirty_desc];
452                 priv->tx_skb[priv->tx_dirty_desc] = NULL;
453                 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
454                                  DMA_TO_DEVICE);
455
456                 priv->tx_dirty_desc++;
457                 if (priv->tx_dirty_desc == priv->tx_ring_size)
458                         priv->tx_dirty_desc = 0;
459                 priv->tx_desc_count++;
460
461                 spin_unlock(&priv->tx_lock);
462
463                 if (desc->len_stat & DMADESC_UNDER_MASK)
464                         dev->stats.tx_errors++;
465
466                 dev_kfree_skb(skb);
467                 released++;
468         }
469
470         if (netif_queue_stopped(dev) && released)
471                 netif_wake_queue(dev);
472
473         return released;
474 }
475
476 /*
477  * poll func, called by network core
478  */
479 static int bcm_enet_poll(struct napi_struct *napi, int budget)
480 {
481         struct bcm_enet_priv *priv;
482         struct net_device *dev;
483         int tx_work_done, rx_work_done;
484
485         priv = container_of(napi, struct bcm_enet_priv, napi);
486         dev = priv->net_dev;
487
488         /* ack interrupts */
489         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
490                          ENETDMAC_IR_REG(priv->rx_chan));
491         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
492                          ENETDMAC_IR_REG(priv->tx_chan));
493
494         /* reclaim sent skb */
495         tx_work_done = bcm_enet_tx_reclaim(dev, 0);
496
497         spin_lock(&priv->rx_lock);
498         rx_work_done = bcm_enet_receive_queue(dev, budget);
499         spin_unlock(&priv->rx_lock);
500
501         if (rx_work_done >= budget || tx_work_done > 0) {
502                 /* rx/tx queue is not yet empty/clean */
503                 return rx_work_done;
504         }
505
506         /* no more packet in rx/tx queue, remove device from poll
507          * queue */
508         napi_complete(napi);
509
510         /* restore rx/tx interrupt */
511         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
512                          ENETDMAC_IRMASK_REG(priv->rx_chan));
513         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
514                          ENETDMAC_IRMASK_REG(priv->tx_chan));
515
516         return rx_work_done;
517 }
518
519 /*
520  * mac interrupt handler
521  */
522 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
523 {
524         struct net_device *dev;
525         struct bcm_enet_priv *priv;
526         u32 stat;
527
528         dev = dev_id;
529         priv = netdev_priv(dev);
530
531         stat = enet_readl(priv, ENET_IR_REG);
532         if (!(stat & ENET_IR_MIB))
533                 return IRQ_NONE;
534
535         /* clear & mask interrupt */
536         enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
537         enet_writel(priv, 0, ENET_IRMASK_REG);
538
539         /* read mib registers in workqueue */
540         schedule_work(&priv->mib_update_task);
541
542         return IRQ_HANDLED;
543 }
544
545 /*
546  * rx/tx dma interrupt handler
547  */
548 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
549 {
550         struct net_device *dev;
551         struct bcm_enet_priv *priv;
552
553         dev = dev_id;
554         priv = netdev_priv(dev);
555
556         /* mask rx/tx interrupts */
557         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
558         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
559
560         napi_schedule(&priv->napi);
561
562         return IRQ_HANDLED;
563 }
564
565 /*
566  * tx request callback
567  */
568 static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
569 {
570         struct bcm_enet_priv *priv;
571         struct bcm_enet_desc *desc;
572         u32 len_stat;
573         int ret;
574
575         priv = netdev_priv(dev);
576
577         /* lock against tx reclaim */
578         spin_lock(&priv->tx_lock);
579
580         /* make sure  the tx hw queue  is not full,  should not happen
581          * since we stop queue before it's the case */
582         if (unlikely(!priv->tx_desc_count)) {
583                 netif_stop_queue(dev);
584                 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
585                         "available?\n");
586                 ret = NETDEV_TX_BUSY;
587                 goto out_unlock;
588         }
589
590         /* pad small packets sent on a switch device */
591         if (priv->enet_is_sw && skb->len < 64) {
592                 int needed = 64 - skb->len;
593                 char *data;
594
595                 if (unlikely(skb_tailroom(skb) < needed)) {
596                         struct sk_buff *nskb;
597
598                         nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
599                         if (!nskb) {
600                                 ret = NETDEV_TX_BUSY;
601                                 goto out_unlock;
602                         }
603                         dev_kfree_skb(skb);
604                         skb = nskb;
605                 }
606                 data = skb_put(skb, needed);
607                 memset(data, 0, needed);
608         }
609
610         /* point to the next available desc */
611         desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
612         priv->tx_skb[priv->tx_curr_desc] = skb;
613
614         /* fill descriptor */
615         desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
616                                        DMA_TO_DEVICE);
617
618         len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
619         len_stat |= DMADESC_ESOP_MASK |
620                 DMADESC_APPEND_CRC |
621                 DMADESC_OWNER_MASK;
622
623         priv->tx_curr_desc++;
624         if (priv->tx_curr_desc == priv->tx_ring_size) {
625                 priv->tx_curr_desc = 0;
626                 len_stat |= DMADESC_WRAP_MASK;
627         }
628         priv->tx_desc_count--;
629
630         /* dma might be already polling, make sure we update desc
631          * fields in correct order */
632         wmb();
633         desc->len_stat = len_stat;
634         wmb();
635
636         /* kick tx dma */
637         enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
638                          ENETDMAC_CHANCFG_REG(priv->tx_chan));
639
640         /* stop queue if no more desc available */
641         if (!priv->tx_desc_count)
642                 netif_stop_queue(dev);
643
644         dev->stats.tx_bytes += skb->len;
645         dev->stats.tx_packets++;
646         ret = NETDEV_TX_OK;
647
648 out_unlock:
649         spin_unlock(&priv->tx_lock);
650         return ret;
651 }
652
653 /*
654  * Change the interface's mac address.
655  */
656 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
657 {
658         struct bcm_enet_priv *priv;
659         struct sockaddr *addr = p;
660         u32 val;
661
662         priv = netdev_priv(dev);
663         memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
664
665         /* use perfect match register 0 to store my mac address */
666         val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
667                 (dev->dev_addr[4] << 8) | dev->dev_addr[5];
668         enet_writel(priv, val, ENET_PML_REG(0));
669
670         val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
671         val |= ENET_PMH_DATAVALID_MASK;
672         enet_writel(priv, val, ENET_PMH_REG(0));
673
674         return 0;
675 }
676
677 /*
678  * Change rx mode (promiscuous/allmulti) and update multicast list
679  */
680 static void bcm_enet_set_multicast_list(struct net_device *dev)
681 {
682         struct bcm_enet_priv *priv;
683         struct netdev_hw_addr *ha;
684         u32 val;
685         int i;
686
687         priv = netdev_priv(dev);
688
689         val = enet_readl(priv, ENET_RXCFG_REG);
690
691         if (dev->flags & IFF_PROMISC)
692                 val |= ENET_RXCFG_PROMISC_MASK;
693         else
694                 val &= ~ENET_RXCFG_PROMISC_MASK;
695
696         /* only 3 perfect match registers left, first one is used for
697          * own mac address */
698         if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
699                 val |= ENET_RXCFG_ALLMCAST_MASK;
700         else
701                 val &= ~ENET_RXCFG_ALLMCAST_MASK;
702
703         /* no need to set perfect match registers if we catch all
704          * multicast */
705         if (val & ENET_RXCFG_ALLMCAST_MASK) {
706                 enet_writel(priv, val, ENET_RXCFG_REG);
707                 return;
708         }
709
710         i = 0;
711         netdev_for_each_mc_addr(ha, dev) {
712                 u8 *dmi_addr;
713                 u32 tmp;
714
715                 if (i == 3)
716                         break;
717                 /* update perfect match registers */
718                 dmi_addr = ha->addr;
719                 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
720                         (dmi_addr[4] << 8) | dmi_addr[5];
721                 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
722
723                 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
724                 tmp |= ENET_PMH_DATAVALID_MASK;
725                 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
726         }
727
728         for (; i < 3; i++) {
729                 enet_writel(priv, 0, ENET_PML_REG(i + 1));
730                 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
731         }
732
733         enet_writel(priv, val, ENET_RXCFG_REG);
734 }
735
736 /*
737  * set mac duplex parameters
738  */
739 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
740 {
741         u32 val;
742
743         val = enet_readl(priv, ENET_TXCTL_REG);
744         if (fullduplex)
745                 val |= ENET_TXCTL_FD_MASK;
746         else
747                 val &= ~ENET_TXCTL_FD_MASK;
748         enet_writel(priv, val, ENET_TXCTL_REG);
749 }
750
751 /*
752  * set mac flow control parameters
753  */
754 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
755 {
756         u32 val;
757
758         /* rx flow control (pause frame handling) */
759         val = enet_readl(priv, ENET_RXCFG_REG);
760         if (rx_en)
761                 val |= ENET_RXCFG_ENFLOW_MASK;
762         else
763                 val &= ~ENET_RXCFG_ENFLOW_MASK;
764         enet_writel(priv, val, ENET_RXCFG_REG);
765
766         /* tx flow control (pause frame generation) */
767         val = enet_dma_readl(priv, ENETDMA_CFG_REG);
768         if (tx_en)
769                 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
770         else
771                 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
772         enet_dma_writel(priv, val, ENETDMA_CFG_REG);
773 }
774
775 /*
776  * link changed callback (from phylib)
777  */
778 static void bcm_enet_adjust_phy_link(struct net_device *dev)
779 {
780         struct bcm_enet_priv *priv;
781         struct phy_device *phydev;
782         int status_changed;
783
784         priv = netdev_priv(dev);
785         phydev = priv->phydev;
786         status_changed = 0;
787
788         if (priv->old_link != phydev->link) {
789                 status_changed = 1;
790                 priv->old_link = phydev->link;
791         }
792
793         /* reflect duplex change in mac configuration */
794         if (phydev->link && phydev->duplex != priv->old_duplex) {
795                 bcm_enet_set_duplex(priv,
796                                     (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
797                 status_changed = 1;
798                 priv->old_duplex = phydev->duplex;
799         }
800
801         /* enable flow control if remote advertise it (trust phylib to
802          * check that duplex is full */
803         if (phydev->link && phydev->pause != priv->old_pause) {
804                 int rx_pause_en, tx_pause_en;
805
806                 if (phydev->pause) {
807                         /* pause was advertised by lpa and us */
808                         rx_pause_en = 1;
809                         tx_pause_en = 1;
810                 } else if (!priv->pause_auto) {
811                         /* pause setting overrided by user */
812                         rx_pause_en = priv->pause_rx;
813                         tx_pause_en = priv->pause_tx;
814                 } else {
815                         rx_pause_en = 0;
816                         tx_pause_en = 0;
817                 }
818
819                 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
820                 status_changed = 1;
821                 priv->old_pause = phydev->pause;
822         }
823
824         if (status_changed) {
825                 pr_info("%s: link %s", dev->name, phydev->link ?
826                         "UP" : "DOWN");
827                 if (phydev->link)
828                         pr_cont(" - %d/%s - flow control %s", phydev->speed,
829                                DUPLEX_FULL == phydev->duplex ? "full" : "half",
830                                phydev->pause == 1 ? "rx&tx" : "off");
831
832                 pr_cont("\n");
833         }
834 }
835
836 /*
837  * link changed callback (if phylib is not used)
838  */
839 static void bcm_enet_adjust_link(struct net_device *dev)
840 {
841         struct bcm_enet_priv *priv;
842
843         priv = netdev_priv(dev);
844         bcm_enet_set_duplex(priv, priv->force_duplex_full);
845         bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
846         netif_carrier_on(dev);
847
848         pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
849                 dev->name,
850                 priv->force_speed_100 ? 100 : 10,
851                 priv->force_duplex_full ? "full" : "half",
852                 priv->pause_rx ? "rx" : "off",
853                 priv->pause_tx ? "tx" : "off");
854 }
855
856 /*
857  * open callback, allocate dma rings & buffers and start rx operation
858  */
859 static int bcm_enet_open(struct net_device *dev)
860 {
861         struct bcm_enet_priv *priv;
862         struct sockaddr addr;
863         struct device *kdev;
864         struct phy_device *phydev;
865         int i, ret;
866         unsigned int size;
867         char phy_id[MII_BUS_ID_SIZE + 3];
868         void *p;
869         u32 val;
870
871         priv = netdev_priv(dev);
872         kdev = &priv->pdev->dev;
873
874         if (priv->has_phy) {
875                 /* connect to PHY */
876                 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
877                          priv->mii_bus->id, priv->phy_id);
878
879                 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
880                                      PHY_INTERFACE_MODE_MII);
881
882                 if (IS_ERR(phydev)) {
883                         dev_err(kdev, "could not attach to PHY\n");
884                         return PTR_ERR(phydev);
885                 }
886
887                 /* mask with MAC supported features */
888                 phydev->supported &= (SUPPORTED_10baseT_Half |
889                                       SUPPORTED_10baseT_Full |
890                                       SUPPORTED_100baseT_Half |
891                                       SUPPORTED_100baseT_Full |
892                                       SUPPORTED_Autoneg |
893                                       SUPPORTED_Pause |
894                                       SUPPORTED_MII);
895                 phydev->advertising = phydev->supported;
896
897                 if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
898                         phydev->advertising |= SUPPORTED_Pause;
899                 else
900                         phydev->advertising &= ~SUPPORTED_Pause;
901
902                 dev_info(kdev, "attached PHY at address %d [%s]\n",
903                          phydev->addr, phydev->drv->name);
904
905                 priv->old_link = 0;
906                 priv->old_duplex = -1;
907                 priv->old_pause = -1;
908                 priv->phydev = phydev;
909         }
910
911         /* mask all interrupts and request them */
912         enet_writel(priv, 0, ENET_IRMASK_REG);
913         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
914         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
915
916         ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
917         if (ret)
918                 goto out_phy_disconnect;
919
920         ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
921                           dev->name, dev);
922         if (ret)
923                 goto out_freeirq;
924
925         ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
926                           IRQF_DISABLED, dev->name, dev);
927         if (ret)
928                 goto out_freeirq_rx;
929
930         /* initialize perfect match registers */
931         for (i = 0; i < 4; i++) {
932                 enet_writel(priv, 0, ENET_PML_REG(i));
933                 enet_writel(priv, 0, ENET_PMH_REG(i));
934         }
935
936         /* write device mac address */
937         memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
938         bcm_enet_set_mac_address(dev, &addr);
939
940         /* allocate rx dma ring */
941         size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
942         p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
943                                GFP_KERNEL | __GFP_ZERO);
944         if (!p) {
945                 ret = -ENOMEM;
946                 goto out_freeirq_tx;
947         }
948
949         priv->rx_desc_alloc_size = size;
950         priv->rx_desc_cpu = p;
951
952         /* allocate tx dma ring */
953         size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
954         p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
955                                GFP_KERNEL | __GFP_ZERO);
956         if (!p) {
957                 ret = -ENOMEM;
958                 goto out_free_rx_ring;
959         }
960
961         priv->tx_desc_alloc_size = size;
962         priv->tx_desc_cpu = p;
963
964         priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
965                                GFP_KERNEL);
966         if (!priv->tx_skb) {
967                 ret = -ENOMEM;
968                 goto out_free_tx_ring;
969         }
970
971         priv->tx_desc_count = priv->tx_ring_size;
972         priv->tx_dirty_desc = 0;
973         priv->tx_curr_desc = 0;
974         spin_lock_init(&priv->tx_lock);
975
976         /* init & fill rx ring with skbs */
977         priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
978                                GFP_KERNEL);
979         if (!priv->rx_skb) {
980                 ret = -ENOMEM;
981                 goto out_free_tx_skb;
982         }
983
984         priv->rx_desc_count = 0;
985         priv->rx_dirty_desc = 0;
986         priv->rx_curr_desc = 0;
987
988         /* initialize flow control buffer allocation */
989         enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
990                         ENETDMA_BUFALLOC_REG(priv->rx_chan));
991
992         if (bcm_enet_refill_rx(dev)) {
993                 dev_err(kdev, "cannot allocate rx skb queue\n");
994                 ret = -ENOMEM;
995                 goto out;
996         }
997
998         /* write rx & tx ring addresses */
999         enet_dmas_writel(priv, priv->rx_desc_dma,
1000                          ENETDMAS_RSTART_REG(priv->rx_chan));
1001         enet_dmas_writel(priv, priv->tx_desc_dma,
1002                          ENETDMAS_RSTART_REG(priv->tx_chan));
1003
1004         /* clear remaining state ram for rx & tx channel */
1005         enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
1006         enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
1007         enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
1008         enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
1009         enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
1010         enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
1011
1012         /* set max rx/tx length */
1013         enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1014         enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1015
1016         /* set dma maximum burst len */
1017         enet_dmac_writel(priv, priv->dma_maxburst,
1018                          ENETDMAC_MAXBURST_REG(priv->rx_chan));
1019         enet_dmac_writel(priv, priv->dma_maxburst,
1020                          ENETDMAC_MAXBURST_REG(priv->tx_chan));
1021
1022         /* set correct transmit fifo watermark */
1023         enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1024
1025         /* set flow control low/high threshold to 1/3 / 2/3 */
1026         val = priv->rx_ring_size / 3;
1027         enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1028         val = (priv->rx_ring_size * 2) / 3;
1029         enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1030
1031         /* all set, enable mac and interrupts, start dma engine and
1032          * kick rx dma channel */
1033         wmb();
1034         val = enet_readl(priv, ENET_CTL_REG);
1035         val |= ENET_CTL_ENABLE_MASK;
1036         enet_writel(priv, val, ENET_CTL_REG);
1037         enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1038         enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
1039                          ENETDMAC_CHANCFG_REG(priv->rx_chan));
1040
1041         /* watch "mib counters about to overflow" interrupt */
1042         enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1043         enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1044
1045         /* watch "packet transferred" interrupt in rx and tx */
1046         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
1047                          ENETDMAC_IR_REG(priv->rx_chan));
1048         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
1049                          ENETDMAC_IR_REG(priv->tx_chan));
1050
1051         /* make sure we enable napi before rx interrupt  */
1052         napi_enable(&priv->napi);
1053
1054         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
1055                          ENETDMAC_IRMASK_REG(priv->rx_chan));
1056         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
1057                          ENETDMAC_IRMASK_REG(priv->tx_chan));
1058
1059         if (priv->has_phy)
1060                 phy_start(priv->phydev);
1061         else
1062                 bcm_enet_adjust_link(dev);
1063
1064         netif_start_queue(dev);
1065         return 0;
1066
1067 out:
1068         for (i = 0; i < priv->rx_ring_size; i++) {
1069                 struct bcm_enet_desc *desc;
1070
1071                 if (!priv->rx_skb[i])
1072                         continue;
1073
1074                 desc = &priv->rx_desc_cpu[i];
1075                 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1076                                  DMA_FROM_DEVICE);
1077                 kfree_skb(priv->rx_skb[i]);
1078         }
1079         kfree(priv->rx_skb);
1080
1081 out_free_tx_skb:
1082         kfree(priv->tx_skb);
1083
1084 out_free_tx_ring:
1085         dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1086                           priv->tx_desc_cpu, priv->tx_desc_dma);
1087
1088 out_free_rx_ring:
1089         dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1090                           priv->rx_desc_cpu, priv->rx_desc_dma);
1091
1092 out_freeirq_tx:
1093         free_irq(priv->irq_tx, dev);
1094
1095 out_freeirq_rx:
1096         free_irq(priv->irq_rx, dev);
1097
1098 out_freeirq:
1099         free_irq(dev->irq, dev);
1100
1101 out_phy_disconnect:
1102         phy_disconnect(priv->phydev);
1103
1104         return ret;
1105 }
1106
1107 /*
1108  * disable mac
1109  */
1110 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1111 {
1112         int limit;
1113         u32 val;
1114
1115         val = enet_readl(priv, ENET_CTL_REG);
1116         val |= ENET_CTL_DISABLE_MASK;
1117         enet_writel(priv, val, ENET_CTL_REG);
1118
1119         limit = 1000;
1120         do {
1121                 u32 val;
1122
1123                 val = enet_readl(priv, ENET_CTL_REG);
1124                 if (!(val & ENET_CTL_DISABLE_MASK))
1125                         break;
1126                 udelay(1);
1127         } while (limit--);
1128 }
1129
1130 /*
1131  * disable dma in given channel
1132  */
1133 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1134 {
1135         int limit;
1136
1137         enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan));
1138
1139         limit = 1000;
1140         do {
1141                 u32 val;
1142
1143                 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan));
1144                 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1145                         break;
1146                 udelay(1);
1147         } while (limit--);
1148 }
1149
1150 /*
1151  * stop callback
1152  */
1153 static int bcm_enet_stop(struct net_device *dev)
1154 {
1155         struct bcm_enet_priv *priv;
1156         struct device *kdev;
1157         int i;
1158
1159         priv = netdev_priv(dev);
1160         kdev = &priv->pdev->dev;
1161
1162         netif_stop_queue(dev);
1163         napi_disable(&priv->napi);
1164         if (priv->has_phy)
1165                 phy_stop(priv->phydev);
1166         del_timer_sync(&priv->rx_timeout);
1167
1168         /* mask all interrupts */
1169         enet_writel(priv, 0, ENET_IRMASK_REG);
1170         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
1171         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
1172
1173         /* make sure no mib update is scheduled */
1174         cancel_work_sync(&priv->mib_update_task);
1175
1176         /* disable dma & mac */
1177         bcm_enet_disable_dma(priv, priv->tx_chan);
1178         bcm_enet_disable_dma(priv, priv->rx_chan);
1179         bcm_enet_disable_mac(priv);
1180
1181         /* force reclaim of all tx buffers */
1182         bcm_enet_tx_reclaim(dev, 1);
1183
1184         /* free the rx skb ring */
1185         for (i = 0; i < priv->rx_ring_size; i++) {
1186                 struct bcm_enet_desc *desc;
1187
1188                 if (!priv->rx_skb[i])
1189                         continue;
1190
1191                 desc = &priv->rx_desc_cpu[i];
1192                 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1193                                  DMA_FROM_DEVICE);
1194                 kfree_skb(priv->rx_skb[i]);
1195         }
1196
1197         /* free remaining allocated memory */
1198         kfree(priv->rx_skb);
1199         kfree(priv->tx_skb);
1200         dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1201                           priv->rx_desc_cpu, priv->rx_desc_dma);
1202         dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1203                           priv->tx_desc_cpu, priv->tx_desc_dma);
1204         free_irq(priv->irq_tx, dev);
1205         free_irq(priv->irq_rx, dev);
1206         free_irq(dev->irq, dev);
1207
1208         /* release phy */
1209         if (priv->has_phy) {
1210                 phy_disconnect(priv->phydev);
1211                 priv->phydev = NULL;
1212         }
1213
1214         return 0;
1215 }
1216
1217 /*
1218  * ethtool callbacks
1219  */
1220 struct bcm_enet_stats {
1221         char stat_string[ETH_GSTRING_LEN];
1222         int sizeof_stat;
1223         int stat_offset;
1224         int mib_reg;
1225 };
1226
1227 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),             \
1228                      offsetof(struct bcm_enet_priv, m)
1229 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),          \
1230                      offsetof(struct net_device_stats, m)
1231
1232 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1233         { "rx_packets", DEV_STAT(rx_packets), -1 },
1234         { "tx_packets", DEV_STAT(tx_packets), -1 },
1235         { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1236         { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1237         { "rx_errors", DEV_STAT(rx_errors), -1 },
1238         { "tx_errors", DEV_STAT(tx_errors), -1 },
1239         { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1240         { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1241
1242         { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1243         { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1244         { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1245         { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1246         { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1247         { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1248         { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1249         { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1250         { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1251         { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1252         { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1253         { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1254         { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1255         { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1256         { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1257         { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1258         { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1259         { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1260         { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1261         { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1262         { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1263
1264         { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1265         { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1266         { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1267         { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1268         { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1269         { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1270         { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1271         { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1272         { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1273         { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1274         { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1275         { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1276         { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1277         { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1278         { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1279         { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1280         { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1281         { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1282         { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1283         { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1284         { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1285         { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1286
1287 };
1288
1289 #define BCM_ENET_STATS_LEN      \
1290         (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1291
1292 static const u32 unused_mib_regs[] = {
1293         ETH_MIB_TX_ALL_OCTETS,
1294         ETH_MIB_TX_ALL_PKTS,
1295         ETH_MIB_RX_ALL_OCTETS,
1296         ETH_MIB_RX_ALL_PKTS,
1297 };
1298
1299
1300 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1301                                  struct ethtool_drvinfo *drvinfo)
1302 {
1303         strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1304         strlcpy(drvinfo->version, bcm_enet_driver_version,
1305                 sizeof(drvinfo->version));
1306         strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1307         strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1308         drvinfo->n_stats = BCM_ENET_STATS_LEN;
1309 }
1310
1311 static int bcm_enet_get_sset_count(struct net_device *netdev,
1312                                         int string_set)
1313 {
1314         switch (string_set) {
1315         case ETH_SS_STATS:
1316                 return BCM_ENET_STATS_LEN;
1317         default:
1318                 return -EINVAL;
1319         }
1320 }
1321
1322 static void bcm_enet_get_strings(struct net_device *netdev,
1323                                  u32 stringset, u8 *data)
1324 {
1325         int i;
1326
1327         switch (stringset) {
1328         case ETH_SS_STATS:
1329                 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1330                         memcpy(data + i * ETH_GSTRING_LEN,
1331                                bcm_enet_gstrings_stats[i].stat_string,
1332                                ETH_GSTRING_LEN);
1333                 }
1334                 break;
1335         }
1336 }
1337
1338 static void update_mib_counters(struct bcm_enet_priv *priv)
1339 {
1340         int i;
1341
1342         for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1343                 const struct bcm_enet_stats *s;
1344                 u32 val;
1345                 char *p;
1346
1347                 s = &bcm_enet_gstrings_stats[i];
1348                 if (s->mib_reg == -1)
1349                         continue;
1350
1351                 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1352                 p = (char *)priv + s->stat_offset;
1353
1354                 if (s->sizeof_stat == sizeof(u64))
1355                         *(u64 *)p += val;
1356                 else
1357                         *(u32 *)p += val;
1358         }
1359
1360         /* also empty unused mib counters to make sure mib counter
1361          * overflow interrupt is cleared */
1362         for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1363                 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1364 }
1365
1366 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1367 {
1368         struct bcm_enet_priv *priv;
1369
1370         priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1371         mutex_lock(&priv->mib_update_lock);
1372         update_mib_counters(priv);
1373         mutex_unlock(&priv->mib_update_lock);
1374
1375         /* reenable mib interrupt */
1376         if (netif_running(priv->net_dev))
1377                 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1378 }
1379
1380 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1381                                        struct ethtool_stats *stats,
1382                                        u64 *data)
1383 {
1384         struct bcm_enet_priv *priv;
1385         int i;
1386
1387         priv = netdev_priv(netdev);
1388
1389         mutex_lock(&priv->mib_update_lock);
1390         update_mib_counters(priv);
1391
1392         for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1393                 const struct bcm_enet_stats *s;
1394                 char *p;
1395
1396                 s = &bcm_enet_gstrings_stats[i];
1397                 if (s->mib_reg == -1)
1398                         p = (char *)&netdev->stats;
1399                 else
1400                         p = (char *)priv;
1401                 p += s->stat_offset;
1402                 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1403                         *(u64 *)p : *(u32 *)p;
1404         }
1405         mutex_unlock(&priv->mib_update_lock);
1406 }
1407
1408 static int bcm_enet_nway_reset(struct net_device *dev)
1409 {
1410         struct bcm_enet_priv *priv;
1411
1412         priv = netdev_priv(dev);
1413         if (priv->has_phy) {
1414                 if (!priv->phydev)
1415                         return -ENODEV;
1416                 return genphy_restart_aneg(priv->phydev);
1417         }
1418
1419         return -EOPNOTSUPP;
1420 }
1421
1422 static int bcm_enet_get_settings(struct net_device *dev,
1423                                  struct ethtool_cmd *cmd)
1424 {
1425         struct bcm_enet_priv *priv;
1426
1427         priv = netdev_priv(dev);
1428
1429         cmd->maxrxpkt = 0;
1430         cmd->maxtxpkt = 0;
1431
1432         if (priv->has_phy) {
1433                 if (!priv->phydev)
1434                         return -ENODEV;
1435                 return phy_ethtool_gset(priv->phydev, cmd);
1436         } else {
1437                 cmd->autoneg = 0;
1438                 ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
1439                                             ? SPEED_100 : SPEED_10));
1440                 cmd->duplex = (priv->force_duplex_full) ?
1441                         DUPLEX_FULL : DUPLEX_HALF;
1442                 cmd->supported = ADVERTISED_10baseT_Half  |
1443                         ADVERTISED_10baseT_Full |
1444                         ADVERTISED_100baseT_Half |
1445                         ADVERTISED_100baseT_Full;
1446                 cmd->advertising = 0;
1447                 cmd->port = PORT_MII;
1448                 cmd->transceiver = XCVR_EXTERNAL;
1449         }
1450         return 0;
1451 }
1452
1453 static int bcm_enet_set_settings(struct net_device *dev,
1454                                  struct ethtool_cmd *cmd)
1455 {
1456         struct bcm_enet_priv *priv;
1457
1458         priv = netdev_priv(dev);
1459         if (priv->has_phy) {
1460                 if (!priv->phydev)
1461                         return -ENODEV;
1462                 return phy_ethtool_sset(priv->phydev, cmd);
1463         } else {
1464
1465                 if (cmd->autoneg ||
1466                     (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1467                     cmd->port != PORT_MII)
1468                         return -EINVAL;
1469
1470                 priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1471                 priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1472
1473                 if (netif_running(dev))
1474                         bcm_enet_adjust_link(dev);
1475                 return 0;
1476         }
1477 }
1478
1479 static void bcm_enet_get_ringparam(struct net_device *dev,
1480                                    struct ethtool_ringparam *ering)
1481 {
1482         struct bcm_enet_priv *priv;
1483
1484         priv = netdev_priv(dev);
1485
1486         /* rx/tx ring is actually only limited by memory */
1487         ering->rx_max_pending = 8192;
1488         ering->tx_max_pending = 8192;
1489         ering->rx_pending = priv->rx_ring_size;
1490         ering->tx_pending = priv->tx_ring_size;
1491 }
1492
1493 static int bcm_enet_set_ringparam(struct net_device *dev,
1494                                   struct ethtool_ringparam *ering)
1495 {
1496         struct bcm_enet_priv *priv;
1497         int was_running;
1498
1499         priv = netdev_priv(dev);
1500
1501         was_running = 0;
1502         if (netif_running(dev)) {
1503                 bcm_enet_stop(dev);
1504                 was_running = 1;
1505         }
1506
1507         priv->rx_ring_size = ering->rx_pending;
1508         priv->tx_ring_size = ering->tx_pending;
1509
1510         if (was_running) {
1511                 int err;
1512
1513                 err = bcm_enet_open(dev);
1514                 if (err)
1515                         dev_close(dev);
1516                 else
1517                         bcm_enet_set_multicast_list(dev);
1518         }
1519         return 0;
1520 }
1521
1522 static void bcm_enet_get_pauseparam(struct net_device *dev,
1523                                     struct ethtool_pauseparam *ecmd)
1524 {
1525         struct bcm_enet_priv *priv;
1526
1527         priv = netdev_priv(dev);
1528         ecmd->autoneg = priv->pause_auto;
1529         ecmd->rx_pause = priv->pause_rx;
1530         ecmd->tx_pause = priv->pause_tx;
1531 }
1532
1533 static int bcm_enet_set_pauseparam(struct net_device *dev,
1534                                    struct ethtool_pauseparam *ecmd)
1535 {
1536         struct bcm_enet_priv *priv;
1537
1538         priv = netdev_priv(dev);
1539
1540         if (priv->has_phy) {
1541                 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1542                         /* asymetric pause mode not supported,
1543                          * actually possible but integrated PHY has RO
1544                          * asym_pause bit */
1545                         return -EINVAL;
1546                 }
1547         } else {
1548                 /* no pause autoneg on direct mii connection */
1549                 if (ecmd->autoneg)
1550                         return -EINVAL;
1551         }
1552
1553         priv->pause_auto = ecmd->autoneg;
1554         priv->pause_rx = ecmd->rx_pause;
1555         priv->pause_tx = ecmd->tx_pause;
1556
1557         return 0;
1558 }
1559
1560 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1561         .get_strings            = bcm_enet_get_strings,
1562         .get_sset_count         = bcm_enet_get_sset_count,
1563         .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1564         .nway_reset             = bcm_enet_nway_reset,
1565         .get_settings           = bcm_enet_get_settings,
1566         .set_settings           = bcm_enet_set_settings,
1567         .get_drvinfo            = bcm_enet_get_drvinfo,
1568         .get_link               = ethtool_op_get_link,
1569         .get_ringparam          = bcm_enet_get_ringparam,
1570         .set_ringparam          = bcm_enet_set_ringparam,
1571         .get_pauseparam         = bcm_enet_get_pauseparam,
1572         .set_pauseparam         = bcm_enet_set_pauseparam,
1573 };
1574
1575 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1576 {
1577         struct bcm_enet_priv *priv;
1578
1579         priv = netdev_priv(dev);
1580         if (priv->has_phy) {
1581                 if (!priv->phydev)
1582                         return -ENODEV;
1583                 return phy_mii_ioctl(priv->phydev, rq, cmd);
1584         } else {
1585                 struct mii_if_info mii;
1586
1587                 mii.dev = dev;
1588                 mii.mdio_read = bcm_enet_mdio_read_mii;
1589                 mii.mdio_write = bcm_enet_mdio_write_mii;
1590                 mii.phy_id = 0;
1591                 mii.phy_id_mask = 0x3f;
1592                 mii.reg_num_mask = 0x1f;
1593                 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1594         }
1595 }
1596
1597 /*
1598  * calculate actual hardware mtu
1599  */
1600 static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1601 {
1602         int actual_mtu;
1603
1604         actual_mtu = mtu;
1605
1606         /* add ethernet header + vlan tag size */
1607         actual_mtu += VLAN_ETH_HLEN;
1608
1609         if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
1610                 return -EINVAL;
1611
1612         /*
1613          * setup maximum size before we get overflow mark in
1614          * descriptor, note that this will not prevent reception of
1615          * big frames, they will be split into multiple buffers
1616          * anyway
1617          */
1618         priv->hw_mtu = actual_mtu;
1619
1620         /*
1621          * align rx buffer size to dma burst len, account FCS since
1622          * it's appended
1623          */
1624         priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1625                                   priv->dma_maxburst * 4);
1626         return 0;
1627 }
1628
1629 /*
1630  * adjust mtu, can't be called while device is running
1631  */
1632 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1633 {
1634         int ret;
1635
1636         if (netif_running(dev))
1637                 return -EBUSY;
1638
1639         ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
1640         if (ret)
1641                 return ret;
1642         dev->mtu = new_mtu;
1643         return 0;
1644 }
1645
1646 /*
1647  * preinit hardware to allow mii operation while device is down
1648  */
1649 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1650 {
1651         u32 val;
1652         int limit;
1653
1654         /* make sure mac is disabled */
1655         bcm_enet_disable_mac(priv);
1656
1657         /* soft reset mac */
1658         val = ENET_CTL_SRESET_MASK;
1659         enet_writel(priv, val, ENET_CTL_REG);
1660         wmb();
1661
1662         limit = 1000;
1663         do {
1664                 val = enet_readl(priv, ENET_CTL_REG);
1665                 if (!(val & ENET_CTL_SRESET_MASK))
1666                         break;
1667                 udelay(1);
1668         } while (limit--);
1669
1670         /* select correct mii interface */
1671         val = enet_readl(priv, ENET_CTL_REG);
1672         if (priv->use_external_mii)
1673                 val |= ENET_CTL_EPHYSEL_MASK;
1674         else
1675                 val &= ~ENET_CTL_EPHYSEL_MASK;
1676         enet_writel(priv, val, ENET_CTL_REG);
1677
1678         /* turn on mdc clock */
1679         enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1680                     ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1681
1682         /* set mib counters to self-clear when read */
1683         val = enet_readl(priv, ENET_MIBCTL_REG);
1684         val |= ENET_MIBCTL_RDCLEAR_MASK;
1685         enet_writel(priv, val, ENET_MIBCTL_REG);
1686 }
1687
1688 static const struct net_device_ops bcm_enet_ops = {
1689         .ndo_open               = bcm_enet_open,
1690         .ndo_stop               = bcm_enet_stop,
1691         .ndo_start_xmit         = bcm_enet_start_xmit,
1692         .ndo_set_mac_address    = bcm_enet_set_mac_address,
1693         .ndo_set_rx_mode        = bcm_enet_set_multicast_list,
1694         .ndo_do_ioctl           = bcm_enet_ioctl,
1695         .ndo_change_mtu         = bcm_enet_change_mtu,
1696 #ifdef CONFIG_NET_POLL_CONTROLLER
1697         .ndo_poll_controller = bcm_enet_netpoll,
1698 #endif
1699 };
1700
1701 /*
1702  * allocate netdevice, request register memory and register device.
1703  */
1704 static int bcm_enet_probe(struct platform_device *pdev)
1705 {
1706         struct bcm_enet_priv *priv;
1707         struct net_device *dev;
1708         struct bcm63xx_enet_platform_data *pd;
1709         struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1710         struct mii_bus *bus;
1711         const char *clk_name;
1712         int i, ret;
1713
1714         /* stop if shared driver failed, assume driver->probe will be
1715          * called in the same order we register devices (correct ?) */
1716         if (!bcm_enet_shared_base[0])
1717                 return -ENODEV;
1718
1719         res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1720         res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1721         res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1722         res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1723         if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1724                 return -ENODEV;
1725
1726         ret = 0;
1727         dev = alloc_etherdev(sizeof(*priv));
1728         if (!dev)
1729                 return -ENOMEM;
1730         priv = netdev_priv(dev);
1731
1732         priv->enet_is_sw = false;
1733         priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1734
1735         ret = compute_hw_mtu(priv, dev->mtu);
1736         if (ret)
1737                 goto out;
1738
1739         priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
1740         if (priv->base == NULL) {
1741                 ret = -ENOMEM;
1742                 goto out;
1743         }
1744
1745         dev->irq = priv->irq = res_irq->start;
1746         priv->irq_rx = res_irq_rx->start;
1747         priv->irq_tx = res_irq_tx->start;
1748         priv->mac_id = pdev->id;
1749
1750         /* get rx & tx dma channel id for this mac */
1751         if (priv->mac_id == 0) {
1752                 priv->rx_chan = 0;
1753                 priv->tx_chan = 1;
1754                 clk_name = "enet0";
1755         } else {
1756                 priv->rx_chan = 2;
1757                 priv->tx_chan = 3;
1758                 clk_name = "enet1";
1759         }
1760
1761         priv->mac_clk = clk_get(&pdev->dev, clk_name);
1762         if (IS_ERR(priv->mac_clk)) {
1763                 ret = PTR_ERR(priv->mac_clk);
1764                 goto out;
1765         }
1766         clk_prepare_enable(priv->mac_clk);
1767
1768         /* initialize default and fetch platform data */
1769         priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1770         priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1771
1772         pd = pdev->dev.platform_data;
1773         if (pd) {
1774                 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1775                 priv->has_phy = pd->has_phy;
1776                 priv->phy_id = pd->phy_id;
1777                 priv->has_phy_interrupt = pd->has_phy_interrupt;
1778                 priv->phy_interrupt = pd->phy_interrupt;
1779                 priv->use_external_mii = !pd->use_internal_phy;
1780                 priv->pause_auto = pd->pause_auto;
1781                 priv->pause_rx = pd->pause_rx;
1782                 priv->pause_tx = pd->pause_tx;
1783                 priv->force_duplex_full = pd->force_duplex_full;
1784                 priv->force_speed_100 = pd->force_speed_100;
1785         }
1786
1787         if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1788                 /* using internal PHY, enable clock */
1789                 priv->phy_clk = clk_get(&pdev->dev, "ephy");
1790                 if (IS_ERR(priv->phy_clk)) {
1791                         ret = PTR_ERR(priv->phy_clk);
1792                         priv->phy_clk = NULL;
1793                         goto out_put_clk_mac;
1794                 }
1795                 clk_prepare_enable(priv->phy_clk);
1796         }
1797
1798         /* do minimal hardware init to be able to probe mii bus */
1799         bcm_enet_hw_preinit(priv);
1800
1801         /* MII bus registration */
1802         if (priv->has_phy) {
1803
1804                 priv->mii_bus = mdiobus_alloc();
1805                 if (!priv->mii_bus) {
1806                         ret = -ENOMEM;
1807                         goto out_uninit_hw;
1808                 }
1809
1810                 bus = priv->mii_bus;
1811                 bus->name = "bcm63xx_enet MII bus";
1812                 bus->parent = &pdev->dev;
1813                 bus->priv = priv;
1814                 bus->read = bcm_enet_mdio_read_phylib;
1815                 bus->write = bcm_enet_mdio_write_phylib;
1816                 sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
1817
1818                 /* only probe bus where we think the PHY is, because
1819                  * the mdio read operation return 0 instead of 0xffff
1820                  * if a slave is not present on hw */
1821                 bus->phy_mask = ~(1 << priv->phy_id);
1822
1823                 bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
1824                                         GFP_KERNEL);
1825                 if (!bus->irq) {
1826                         ret = -ENOMEM;
1827                         goto out_free_mdio;
1828                 }
1829
1830                 if (priv->has_phy_interrupt)
1831                         bus->irq[priv->phy_id] = priv->phy_interrupt;
1832                 else
1833                         bus->irq[priv->phy_id] = PHY_POLL;
1834
1835                 ret = mdiobus_register(bus);
1836                 if (ret) {
1837                         dev_err(&pdev->dev, "unable to register mdio bus\n");
1838                         goto out_free_mdio;
1839                 }
1840         } else {
1841
1842                 /* run platform code to initialize PHY device */
1843                 if (pd->mii_config &&
1844                     pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1845                                    bcm_enet_mdio_write_mii)) {
1846                         dev_err(&pdev->dev, "unable to configure mdio bus\n");
1847                         goto out_uninit_hw;
1848                 }
1849         }
1850
1851         spin_lock_init(&priv->rx_lock);
1852
1853         /* init rx timeout (used for oom) */
1854         init_timer(&priv->rx_timeout);
1855         priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1856         priv->rx_timeout.data = (unsigned long)dev;
1857
1858         /* init the mib update lock&work */
1859         mutex_init(&priv->mib_update_lock);
1860         INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1861
1862         /* zero mib counters */
1863         for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1864                 enet_writel(priv, 0, ENET_MIB_REG(i));
1865
1866         /* register netdevice */
1867         dev->netdev_ops = &bcm_enet_ops;
1868         netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1869
1870         SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1871         SET_NETDEV_DEV(dev, &pdev->dev);
1872
1873         ret = register_netdev(dev);
1874         if (ret)
1875                 goto out_unregister_mdio;
1876
1877         netif_carrier_off(dev);
1878         platform_set_drvdata(pdev, dev);
1879         priv->pdev = pdev;
1880         priv->net_dev = dev;
1881
1882         return 0;
1883
1884 out_unregister_mdio:
1885         if (priv->mii_bus)
1886                 mdiobus_unregister(priv->mii_bus);
1887
1888 out_free_mdio:
1889         if (priv->mii_bus)
1890                 mdiobus_free(priv->mii_bus);
1891
1892 out_uninit_hw:
1893         /* turn off mdc clock */
1894         enet_writel(priv, 0, ENET_MIISC_REG);
1895         if (priv->phy_clk) {
1896                 clk_disable_unprepare(priv->phy_clk);
1897                 clk_put(priv->phy_clk);
1898         }
1899
1900 out_put_clk_mac:
1901         clk_disable_unprepare(priv->mac_clk);
1902         clk_put(priv->mac_clk);
1903 out:
1904         free_netdev(dev);
1905         return ret;
1906 }
1907
1908
1909 /*
1910  * exit func, stops hardware and unregisters netdevice
1911  */
1912 static int bcm_enet_remove(struct platform_device *pdev)
1913 {
1914         struct bcm_enet_priv *priv;
1915         struct net_device *dev;
1916
1917         /* stop netdevice */
1918         dev = platform_get_drvdata(pdev);
1919         priv = netdev_priv(dev);
1920         unregister_netdev(dev);
1921
1922         /* turn off mdc clock */
1923         enet_writel(priv, 0, ENET_MIISC_REG);
1924
1925         if (priv->has_phy) {
1926                 mdiobus_unregister(priv->mii_bus);
1927                 mdiobus_free(priv->mii_bus);
1928         } else {
1929                 struct bcm63xx_enet_platform_data *pd;
1930
1931                 pd = pdev->dev.platform_data;
1932                 if (pd && pd->mii_config)
1933                         pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1934                                        bcm_enet_mdio_write_mii);
1935         }
1936
1937         /* disable hw block clocks */
1938         if (priv->phy_clk) {
1939                 clk_disable_unprepare(priv->phy_clk);
1940                 clk_put(priv->phy_clk);
1941         }
1942         clk_disable_unprepare(priv->mac_clk);
1943         clk_put(priv->mac_clk);
1944
1945         free_netdev(dev);
1946         return 0;
1947 }
1948
1949 struct platform_driver bcm63xx_enet_driver = {
1950         .probe  = bcm_enet_probe,
1951         .remove = bcm_enet_remove,
1952         .driver = {
1953                 .name   = "bcm63xx_enet",
1954                 .owner  = THIS_MODULE,
1955         },
1956 };
1957
1958 /*
1959  * switch mii access callbacks
1960  */
1961 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1962                                 int ext, int phy_id, int location)
1963 {
1964         u32 reg;
1965         int ret;
1966
1967         spin_lock_bh(&priv->enetsw_mdio_lock);
1968         enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1969
1970         reg = ENETSW_MDIOC_RD_MASK |
1971                 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1972                 (location << ENETSW_MDIOC_REG_SHIFT);
1973
1974         if (ext)
1975                 reg |= ENETSW_MDIOC_EXT_MASK;
1976
1977         enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1978         udelay(50);
1979         ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1980         spin_unlock_bh(&priv->enetsw_mdio_lock);
1981         return ret;
1982 }
1983
1984 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1985                                  int ext, int phy_id, int location,
1986                                  uint16_t data)
1987 {
1988         u32 reg;
1989
1990         spin_lock_bh(&priv->enetsw_mdio_lock);
1991         enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1992
1993         reg = ENETSW_MDIOC_WR_MASK |
1994                 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1995                 (location << ENETSW_MDIOC_REG_SHIFT);
1996
1997         if (ext)
1998                 reg |= ENETSW_MDIOC_EXT_MASK;
1999
2000         reg |= data;
2001
2002         enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2003         udelay(50);
2004         spin_unlock_bh(&priv->enetsw_mdio_lock);
2005 }
2006
2007 static inline int bcm_enet_port_is_rgmii(int portid)
2008 {
2009         return portid >= ENETSW_RGMII_PORT0;
2010 }
2011
2012 /*
2013  * enet sw PHY polling
2014  */
2015 static void swphy_poll_timer(unsigned long data)
2016 {
2017         struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
2018         unsigned int i;
2019
2020         for (i = 0; i < priv->num_ports; i++) {
2021                 struct bcm63xx_enetsw_port *port;
2022                 int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
2023                 int external_phy = bcm_enet_port_is_rgmii(i);
2024                 u8 override;
2025
2026                 port = &priv->used_ports[i];
2027                 if (!port->used)
2028                         continue;
2029
2030                 if (port->bypass_link)
2031                         continue;
2032
2033                 /* dummy read to clear */
2034                 for (j = 0; j < 2; j++)
2035                         val = bcmenet_sw_mdio_read(priv, external_phy,
2036                                                    port->phy_id, MII_BMSR);
2037
2038                 if (val == 0xffff)
2039                         continue;
2040
2041                 up = (val & BMSR_LSTATUS) ? 1 : 0;
2042                 if (!(up ^ priv->sw_port_link[i]))
2043                         continue;
2044
2045                 priv->sw_port_link[i] = up;
2046
2047                 /* link changed */
2048                 if (!up) {
2049                         dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2050                                  port->name);
2051                         enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2052                                       ENETSW_PORTOV_REG(i));
2053                         enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2054                                       ENETSW_PTCTRL_TXDIS_MASK,
2055                                       ENETSW_PTCTRL_REG(i));
2056                         continue;
2057                 }
2058
2059                 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2060                                                  port->phy_id, MII_ADVERTISE);
2061
2062                 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2063                                            MII_LPA);
2064
2065                 lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2066                                             MII_STAT1000);
2067
2068                 /* figure out media and duplex from advertise and LPA values */
2069                 media = mii_nway_result(lpa & advertise);
2070                 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2071                 if (lpa2 & LPA_1000FULL)
2072                         duplex = 1;
2073
2074                 if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
2075                         speed = 1000;
2076                 else {
2077                         if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2078                                 speed = 100;
2079                         else
2080                                 speed = 10;
2081                 }
2082
2083                 dev_info(&priv->pdev->dev,
2084                          "link UP on %s, %dMbps, %s-duplex\n",
2085                          port->name, speed, duplex ? "full" : "half");
2086
2087                 override = ENETSW_PORTOV_ENABLE_MASK |
2088                         ENETSW_PORTOV_LINKUP_MASK;
2089
2090                 if (speed == 1000)
2091                         override |= ENETSW_IMPOV_1000_MASK;
2092                 else if (speed == 100)
2093                         override |= ENETSW_IMPOV_100_MASK;
2094                 if (duplex)
2095                         override |= ENETSW_IMPOV_FDX_MASK;
2096
2097                 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2098                 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2099         }
2100
2101         priv->swphy_poll.expires = jiffies + HZ;
2102         add_timer(&priv->swphy_poll);
2103 }
2104
2105 /*
2106  * open callback, allocate dma rings & buffers and start rx operation
2107  */
2108 static int bcm_enetsw_open(struct net_device *dev)
2109 {
2110         struct bcm_enet_priv *priv;
2111         struct device *kdev;
2112         int i, ret;
2113         unsigned int size;
2114         void *p;
2115         u32 val;
2116
2117         priv = netdev_priv(dev);
2118         kdev = &priv->pdev->dev;
2119
2120         /* mask all interrupts and request them */
2121         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
2122         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
2123
2124         ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2125                           IRQF_DISABLED, dev->name, dev);
2126         if (ret)
2127                 goto out_freeirq;
2128
2129         if (priv->irq_tx != -1) {
2130                 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2131                                   IRQF_DISABLED, dev->name, dev);
2132                 if (ret)
2133                         goto out_freeirq_rx;
2134         }
2135
2136         /* allocate rx dma ring */
2137         size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2138         p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2139         if (!p) {
2140                 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2141                 ret = -ENOMEM;
2142                 goto out_freeirq_tx;
2143         }
2144
2145         memset(p, 0, size);
2146         priv->rx_desc_alloc_size = size;
2147         priv->rx_desc_cpu = p;
2148
2149         /* allocate tx dma ring */
2150         size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2151         p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2152         if (!p) {
2153                 dev_err(kdev, "cannot allocate tx ring\n");
2154                 ret = -ENOMEM;
2155                 goto out_free_rx_ring;
2156         }
2157
2158         memset(p, 0, size);
2159         priv->tx_desc_alloc_size = size;
2160         priv->tx_desc_cpu = p;
2161
2162         priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
2163                                GFP_KERNEL);
2164         if (!priv->tx_skb) {
2165                 dev_err(kdev, "cannot allocate rx skb queue\n");
2166                 ret = -ENOMEM;
2167                 goto out_free_tx_ring;
2168         }
2169
2170         priv->tx_desc_count = priv->tx_ring_size;
2171         priv->tx_dirty_desc = 0;
2172         priv->tx_curr_desc = 0;
2173         spin_lock_init(&priv->tx_lock);
2174
2175         /* init & fill rx ring with skbs */
2176         priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
2177                                GFP_KERNEL);
2178         if (!priv->rx_skb) {
2179                 dev_err(kdev, "cannot allocate rx skb queue\n");
2180                 ret = -ENOMEM;
2181                 goto out_free_tx_skb;
2182         }
2183
2184         priv->rx_desc_count = 0;
2185         priv->rx_dirty_desc = 0;
2186         priv->rx_curr_desc = 0;
2187
2188         /* disable all ports */
2189         for (i = 0; i < priv->num_ports; i++) {
2190                 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2191                               ENETSW_PORTOV_REG(i));
2192                 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2193                               ENETSW_PTCTRL_TXDIS_MASK,
2194                               ENETSW_PTCTRL_REG(i));
2195
2196                 priv->sw_port_link[i] = 0;
2197         }
2198
2199         /* reset mib */
2200         val = enetsw_readb(priv, ENETSW_GMCR_REG);
2201         val |= ENETSW_GMCR_RST_MIB_MASK;
2202         enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2203         mdelay(1);
2204         val &= ~ENETSW_GMCR_RST_MIB_MASK;
2205         enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2206         mdelay(1);
2207
2208         /* force CPU port state */
2209         val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2210         val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2211         enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2212
2213         /* enable switch forward engine */
2214         val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2215         val |= ENETSW_SWMODE_FWD_EN_MASK;
2216         enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2217
2218         /* enable jumbo on all ports */
2219         enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2220         enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2221
2222         /* initialize flow control buffer allocation */
2223         enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2224                         ENETDMA_BUFALLOC_REG(priv->rx_chan));
2225
2226         if (bcm_enet_refill_rx(dev)) {
2227                 dev_err(kdev, "cannot allocate rx skb queue\n");
2228                 ret = -ENOMEM;
2229                 goto out;
2230         }
2231
2232         /* write rx & tx ring addresses */
2233         enet_dmas_writel(priv, priv->rx_desc_dma,
2234                          ENETDMAS_RSTART_REG(priv->rx_chan));
2235         enet_dmas_writel(priv, priv->tx_desc_dma,
2236                          ENETDMAS_RSTART_REG(priv->tx_chan));
2237
2238         /* clear remaining state ram for rx & tx channel */
2239         enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
2240         enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
2241         enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
2242         enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
2243         enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
2244         enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
2245
2246         /* set dma maximum burst len */
2247         enet_dmac_writel(priv, priv->dma_maxburst,
2248                          ENETDMAC_MAXBURST_REG(priv->rx_chan));
2249         enet_dmac_writel(priv, priv->dma_maxburst,
2250                          ENETDMAC_MAXBURST_REG(priv->tx_chan));
2251
2252         /* set flow control low/high threshold to 1/3 / 2/3 */
2253         val = priv->rx_ring_size / 3;
2254         enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2255         val = (priv->rx_ring_size * 2) / 3;
2256         enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2257
2258         /* all set, enable mac and interrupts, start dma engine and
2259          * kick rx dma channel
2260          */
2261         wmb();
2262         enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2263         enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2264                          ENETDMAC_CHANCFG_REG(priv->rx_chan));
2265
2266         /* watch "packet transferred" interrupt in rx and tx */
2267         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2268                          ENETDMAC_IR_REG(priv->rx_chan));
2269         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2270                          ENETDMAC_IR_REG(priv->tx_chan));
2271
2272         /* make sure we enable napi before rx interrupt  */
2273         napi_enable(&priv->napi);
2274
2275         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2276                          ENETDMAC_IRMASK_REG(priv->rx_chan));
2277         enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2278                          ENETDMAC_IRMASK_REG(priv->tx_chan));
2279
2280         netif_carrier_on(dev);
2281         netif_start_queue(dev);
2282
2283         /* apply override config for bypass_link ports here. */
2284         for (i = 0; i < priv->num_ports; i++) {
2285                 struct bcm63xx_enetsw_port *port;
2286                 u8 override;
2287                 port = &priv->used_ports[i];
2288                 if (!port->used)
2289                         continue;
2290
2291                 if (!port->bypass_link)
2292                         continue;
2293
2294                 override = ENETSW_PORTOV_ENABLE_MASK |
2295                         ENETSW_PORTOV_LINKUP_MASK;
2296
2297                 switch (port->force_speed) {
2298                 case 1000:
2299                         override |= ENETSW_IMPOV_1000_MASK;
2300                         break;
2301                 case 100:
2302                         override |= ENETSW_IMPOV_100_MASK;
2303                         break;
2304                 case 10:
2305                         break;
2306                 default:
2307                         pr_warn("invalid forced speed on port %s: assume 10\n",
2308                                port->name);
2309                         break;
2310                 }
2311
2312                 if (port->force_duplex_full)
2313                         override |= ENETSW_IMPOV_FDX_MASK;
2314
2315
2316                 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2317                 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2318         }
2319
2320         /* start phy polling timer */
2321         init_timer(&priv->swphy_poll);
2322         priv->swphy_poll.function = swphy_poll_timer;
2323         priv->swphy_poll.data = (unsigned long)priv;
2324         priv->swphy_poll.expires = jiffies;
2325         add_timer(&priv->swphy_poll);
2326         return 0;
2327
2328 out:
2329         for (i = 0; i < priv->rx_ring_size; i++) {
2330                 struct bcm_enet_desc *desc;
2331
2332                 if (!priv->rx_skb[i])
2333                         continue;
2334
2335                 desc = &priv->rx_desc_cpu[i];
2336                 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2337                                  DMA_FROM_DEVICE);
2338                 kfree_skb(priv->rx_skb[i]);
2339         }
2340         kfree(priv->rx_skb);
2341
2342 out_free_tx_skb:
2343         kfree(priv->tx_skb);
2344
2345 out_free_tx_ring:
2346         dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2347                           priv->tx_desc_cpu, priv->tx_desc_dma);
2348
2349 out_free_rx_ring:
2350         dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2351                           priv->rx_desc_cpu, priv->rx_desc_dma);
2352
2353 out_freeirq_tx:
2354         if (priv->irq_tx != -1)
2355                 free_irq(priv->irq_tx, dev);
2356
2357 out_freeirq_rx:
2358         free_irq(priv->irq_rx, dev);
2359
2360 out_freeirq:
2361         return ret;
2362 }
2363
2364 /* stop callback */
2365 static int bcm_enetsw_stop(struct net_device *dev)
2366 {
2367         struct bcm_enet_priv *priv;
2368         struct device *kdev;
2369         int i;
2370
2371         priv = netdev_priv(dev);
2372         kdev = &priv->pdev->dev;
2373
2374         del_timer_sync(&priv->swphy_poll);
2375         netif_stop_queue(dev);
2376         napi_disable(&priv->napi);
2377         del_timer_sync(&priv->rx_timeout);
2378
2379         /* mask all interrupts */
2380         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
2381         enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
2382
2383         /* disable dma & mac */
2384         bcm_enet_disable_dma(priv, priv->tx_chan);
2385         bcm_enet_disable_dma(priv, priv->rx_chan);
2386
2387         /* force reclaim of all tx buffers */
2388         bcm_enet_tx_reclaim(dev, 1);
2389
2390         /* free the rx skb ring */
2391         for (i = 0; i < priv->rx_ring_size; i++) {
2392                 struct bcm_enet_desc *desc;
2393
2394                 if (!priv->rx_skb[i])
2395                         continue;
2396
2397                 desc = &priv->rx_desc_cpu[i];
2398                 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2399                                  DMA_FROM_DEVICE);
2400                 kfree_skb(priv->rx_skb[i]);
2401         }
2402
2403         /* free remaining allocated memory */
2404         kfree(priv->rx_skb);
2405         kfree(priv->tx_skb);
2406         dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2407                           priv->rx_desc_cpu, priv->rx_desc_dma);
2408         dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2409                           priv->tx_desc_cpu, priv->tx_desc_dma);
2410         if (priv->irq_tx != -1)
2411                 free_irq(priv->irq_tx, dev);
2412         free_irq(priv->irq_rx, dev);
2413
2414         return 0;
2415 }
2416
2417 /* try to sort out phy external status by walking the used_port field
2418  * in the bcm_enet_priv structure. in case the phy address is not
2419  * assigned to any physical port on the switch, assume it is external
2420  * (and yell at the user).
2421  */
2422 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2423 {
2424         int i;
2425
2426         for (i = 0; i < priv->num_ports; ++i) {
2427                 if (!priv->used_ports[i].used)
2428                         continue;
2429                 if (priv->used_ports[i].phy_id == phy_id)
2430                         return bcm_enet_port_is_rgmii(i);
2431         }
2432
2433         printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2434                     phy_id);
2435         return 1;
2436 }
2437
2438 /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2439  * external/internal status of the given phy_id first.
2440  */
2441 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2442                                     int location)
2443 {
2444         struct bcm_enet_priv *priv;
2445
2446         priv = netdev_priv(dev);
2447         return bcmenet_sw_mdio_read(priv,
2448                                     bcm_enetsw_phy_is_external(priv, phy_id),
2449                                     phy_id, location);
2450 }
2451
2452 /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2453  * external/internal status of the given phy_id first.
2454  */
2455 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2456                                       int location,
2457                                       int val)
2458 {
2459         struct bcm_enet_priv *priv;
2460
2461         priv = netdev_priv(dev);
2462         bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2463                               phy_id, location, val);
2464 }
2465
2466 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2467 {
2468         struct mii_if_info mii;
2469
2470         mii.dev = dev;
2471         mii.mdio_read = bcm_enetsw_mii_mdio_read;
2472         mii.mdio_write = bcm_enetsw_mii_mdio_write;
2473         mii.phy_id = 0;
2474         mii.phy_id_mask = 0x3f;
2475         mii.reg_num_mask = 0x1f;
2476         return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2477
2478 }
2479
2480 static const struct net_device_ops bcm_enetsw_ops = {
2481         .ndo_open               = bcm_enetsw_open,
2482         .ndo_stop               = bcm_enetsw_stop,
2483         .ndo_start_xmit         = bcm_enet_start_xmit,
2484         .ndo_change_mtu         = bcm_enet_change_mtu,
2485         .ndo_do_ioctl           = bcm_enetsw_ioctl,
2486 };
2487
2488
2489 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2490         { "rx_packets", DEV_STAT(rx_packets), -1 },
2491         { "tx_packets", DEV_STAT(tx_packets), -1 },
2492         { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2493         { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2494         { "rx_errors", DEV_STAT(rx_errors), -1 },
2495         { "tx_errors", DEV_STAT(tx_errors), -1 },
2496         { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2497         { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2498
2499         { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2500         { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2501         { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2502         { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2503         { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2504         { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2505         { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2506         { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2507         { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2508         { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2509           ETHSW_MIB_RX_1024_1522 },
2510         { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2511           ETHSW_MIB_RX_1523_2047 },
2512         { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2513           ETHSW_MIB_RX_2048_4095 },
2514         { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2515           ETHSW_MIB_RX_4096_8191 },
2516         { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2517           ETHSW_MIB_RX_8192_9728 },
2518         { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2519         { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2520         { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2521         { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2522         { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2523
2524         { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2525         { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2526         { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2527         { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2528         { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2529         { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2530
2531 };
2532
2533 #define BCM_ENETSW_STATS_LEN    \
2534         (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2535
2536 static void bcm_enetsw_get_strings(struct net_device *netdev,
2537                                    u32 stringset, u8 *data)
2538 {
2539         int i;
2540
2541         switch (stringset) {
2542         case ETH_SS_STATS:
2543                 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2544                         memcpy(data + i * ETH_GSTRING_LEN,
2545                                bcm_enetsw_gstrings_stats[i].stat_string,
2546                                ETH_GSTRING_LEN);
2547                 }
2548                 break;
2549         }
2550 }
2551
2552 static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2553                                      int string_set)
2554 {
2555         switch (string_set) {
2556         case ETH_SS_STATS:
2557                 return BCM_ENETSW_STATS_LEN;
2558         default:
2559                 return -EINVAL;
2560         }
2561 }
2562
2563 static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2564                                    struct ethtool_drvinfo *drvinfo)
2565 {
2566         strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
2567         strncpy(drvinfo->version, bcm_enet_driver_version, 32);
2568         strncpy(drvinfo->fw_version, "N/A", 32);
2569         strncpy(drvinfo->bus_info, "bcm63xx", 32);
2570         drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
2571 }
2572
2573 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2574                                          struct ethtool_stats *stats,
2575                                          u64 *data)
2576 {
2577         struct bcm_enet_priv *priv;
2578         int i;
2579
2580         priv = netdev_priv(netdev);
2581
2582         for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2583                 const struct bcm_enet_stats *s;
2584                 u32 lo, hi;
2585                 char *p;
2586                 int reg;
2587
2588                 s = &bcm_enetsw_gstrings_stats[i];
2589
2590                 reg = s->mib_reg;
2591                 if (reg == -1)
2592                         continue;
2593
2594                 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2595                 p = (char *)priv + s->stat_offset;
2596
2597                 if (s->sizeof_stat == sizeof(u64)) {
2598                         hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2599                         *(u64 *)p = ((u64)hi << 32 | lo);
2600                 } else {
2601                         *(u32 *)p = lo;
2602                 }
2603         }
2604
2605         for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2606                 const struct bcm_enet_stats *s;
2607                 char *p;
2608
2609                 s = &bcm_enetsw_gstrings_stats[i];
2610
2611                 if (s->mib_reg == -1)
2612                         p = (char *)&netdev->stats + s->stat_offset;
2613                 else
2614                         p = (char *)priv + s->stat_offset;
2615
2616                 data[i] = (s->sizeof_stat == sizeof(u64)) ?
2617                         *(u64 *)p : *(u32 *)p;
2618         }
2619 }
2620
2621 static void bcm_enetsw_get_ringparam(struct net_device *dev,
2622                                      struct ethtool_ringparam *ering)
2623 {
2624         struct bcm_enet_priv *priv;
2625
2626         priv = netdev_priv(dev);
2627
2628         /* rx/tx ring is actually only limited by memory */
2629         ering->rx_max_pending = 8192;
2630         ering->tx_max_pending = 8192;
2631         ering->rx_mini_max_pending = 0;
2632         ering->rx_jumbo_max_pending = 0;
2633         ering->rx_pending = priv->rx_ring_size;
2634         ering->tx_pending = priv->tx_ring_size;
2635 }
2636
2637 static int bcm_enetsw_set_ringparam(struct net_device *dev,
2638                                     struct ethtool_ringparam *ering)
2639 {
2640         struct bcm_enet_priv *priv;
2641         int was_running;
2642
2643         priv = netdev_priv(dev);
2644
2645         was_running = 0;
2646         if (netif_running(dev)) {
2647                 bcm_enetsw_stop(dev);
2648                 was_running = 1;
2649         }
2650
2651         priv->rx_ring_size = ering->rx_pending;
2652         priv->tx_ring_size = ering->tx_pending;
2653
2654         if (was_running) {
2655                 int err;
2656
2657                 err = bcm_enetsw_open(dev);
2658                 if (err)
2659                         dev_close(dev);
2660         }
2661         return 0;
2662 }
2663
2664 static struct ethtool_ops bcm_enetsw_ethtool_ops = {
2665         .get_strings            = bcm_enetsw_get_strings,
2666         .get_sset_count         = bcm_enetsw_get_sset_count,
2667         .get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2668         .get_drvinfo            = bcm_enetsw_get_drvinfo,
2669         .get_ringparam          = bcm_enetsw_get_ringparam,
2670         .set_ringparam          = bcm_enetsw_set_ringparam,
2671 };
2672
2673 /* allocate netdevice, request register memory and register device. */
2674 static int bcm_enetsw_probe(struct platform_device *pdev)
2675 {
2676         struct bcm_enet_priv *priv;
2677         struct net_device *dev;
2678         struct bcm63xx_enetsw_platform_data *pd;
2679         struct resource *res_mem;
2680         int ret, irq_rx, irq_tx;
2681
2682         /* stop if shared driver failed, assume driver->probe will be
2683          * called in the same order we register devices (correct ?)
2684          */
2685         if (!bcm_enet_shared_base[0])
2686                 return -ENODEV;
2687
2688         res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2689         irq_rx = platform_get_irq(pdev, 0);
2690         irq_tx = platform_get_irq(pdev, 1);
2691         if (!res_mem || irq_rx < 0)
2692                 return -ENODEV;
2693
2694         ret = 0;
2695         dev = alloc_etherdev(sizeof(*priv));
2696         if (!dev)
2697                 return -ENOMEM;
2698         priv = netdev_priv(dev);
2699         memset(priv, 0, sizeof(*priv));
2700
2701         /* initialize default and fetch platform data */
2702         priv->enet_is_sw = true;
2703         priv->irq_rx = irq_rx;
2704         priv->irq_tx = irq_tx;
2705         priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2706         priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2707         priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2708
2709         pd = pdev->dev.platform_data;
2710         if (pd) {
2711                 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2712                 memcpy(priv->used_ports, pd->used_ports,
2713                        sizeof(pd->used_ports));
2714                 priv->num_ports = pd->num_ports;
2715         }
2716
2717         ret = compute_hw_mtu(priv, dev->mtu);
2718         if (ret)
2719                 goto out;
2720
2721         if (!request_mem_region(res_mem->start, resource_size(res_mem),
2722                                 "bcm63xx_enetsw")) {
2723                 ret = -EBUSY;
2724                 goto out;
2725         }
2726
2727         priv->base = ioremap(res_mem->start, resource_size(res_mem));
2728         if (priv->base == NULL) {
2729                 ret = -ENOMEM;
2730                 goto out_release_mem;
2731         }
2732
2733         priv->mac_clk = clk_get(&pdev->dev, "enetsw");
2734         if (IS_ERR(priv->mac_clk)) {
2735                 ret = PTR_ERR(priv->mac_clk);
2736                 goto out_unmap;
2737         }
2738         clk_enable(priv->mac_clk);
2739
2740         priv->rx_chan = 0;
2741         priv->tx_chan = 1;
2742         spin_lock_init(&priv->rx_lock);
2743
2744         /* init rx timeout (used for oom) */
2745         init_timer(&priv->rx_timeout);
2746         priv->rx_timeout.function = bcm_enet_refill_rx_timer;
2747         priv->rx_timeout.data = (unsigned long)dev;
2748
2749         /* register netdevice */
2750         dev->netdev_ops = &bcm_enetsw_ops;
2751         netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2752         SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
2753         SET_NETDEV_DEV(dev, &pdev->dev);
2754
2755         spin_lock_init(&priv->enetsw_mdio_lock);
2756
2757         ret = register_netdev(dev);
2758         if (ret)
2759                 goto out_put_clk;
2760
2761         netif_carrier_off(dev);
2762         platform_set_drvdata(pdev, dev);
2763         priv->pdev = pdev;
2764         priv->net_dev = dev;
2765
2766         return 0;
2767
2768 out_put_clk:
2769         clk_put(priv->mac_clk);
2770
2771 out_unmap:
2772         iounmap(priv->base);
2773
2774 out_release_mem:
2775         release_mem_region(res_mem->start, resource_size(res_mem));
2776 out:
2777         free_netdev(dev);
2778         return ret;
2779 }
2780
2781
2782 /* exit func, stops hardware and unregisters netdevice */
2783 static int bcm_enetsw_remove(struct platform_device *pdev)
2784 {
2785         struct bcm_enet_priv *priv;
2786         struct net_device *dev;
2787         struct resource *res;
2788
2789         /* stop netdevice */
2790         dev = platform_get_drvdata(pdev);
2791         priv = netdev_priv(dev);
2792         unregister_netdev(dev);
2793
2794         /* release device resources */
2795         iounmap(priv->base);
2796         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2797         release_mem_region(res->start, resource_size(res));
2798
2799         platform_set_drvdata(pdev, NULL);
2800         free_netdev(dev);
2801         return 0;
2802 }
2803
2804 struct platform_driver bcm63xx_enetsw_driver = {
2805         .probe  = bcm_enetsw_probe,
2806         .remove = bcm_enetsw_remove,
2807         .driver = {
2808                 .name   = "bcm63xx_enetsw",
2809                 .owner  = THIS_MODULE,
2810         },
2811 };
2812
2813 /* reserve & remap memory space shared between all macs */
2814 static int bcm_enet_shared_probe(struct platform_device *pdev)
2815 {
2816         struct resource *res;
2817         void __iomem *p[3];
2818         unsigned int i;
2819
2820         memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2821
2822         for (i = 0; i < 3; i++) {
2823                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
2824                 p[i] = devm_ioremap_resource(&pdev->dev, res);
2825                 if (!p[i])
2826                         return -ENOMEM;
2827         }
2828
2829         memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2830
2831         return 0;
2832 }
2833
2834 static int bcm_enet_shared_remove(struct platform_device *pdev)
2835 {
2836         return 0;
2837 }
2838
2839 /* this "shared" driver is needed because both macs share a single
2840  * address space
2841  */
2842 struct platform_driver bcm63xx_enet_shared_driver = {
2843         .probe  = bcm_enet_shared_probe,
2844         .remove = bcm_enet_shared_remove,
2845         .driver = {
2846                 .name   = "bcm63xx_enet_shared",
2847                 .owner  = THIS_MODULE,
2848         },
2849 };
2850
2851 /* entry point */
2852 static int __init bcm_enet_init(void)
2853 {
2854         int ret;
2855
2856         ret = platform_driver_register(&bcm63xx_enet_shared_driver);
2857         if (ret)
2858                 return ret;
2859
2860         ret = platform_driver_register(&bcm63xx_enet_driver);
2861         if (ret)
2862                 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2863
2864         ret = platform_driver_register(&bcm63xx_enetsw_driver);
2865         if (ret) {
2866                 platform_driver_unregister(&bcm63xx_enet_driver);
2867                 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2868         }
2869
2870         return ret;
2871 }
2872
2873 static void __exit bcm_enet_exit(void)
2874 {
2875         platform_driver_unregister(&bcm63xx_enet_driver);
2876         platform_driver_unregister(&bcm63xx_enetsw_driver);
2877         platform_driver_unregister(&bcm63xx_enet_shared_driver);
2878 }
2879
2880
2881 module_init(bcm_enet_init);
2882 module_exit(bcm_enet_exit);
2883
2884 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2885 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2886 MODULE_LICENSE("GPL");