]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/ibm/emac/core.c
regmap: Use regcache_mark_dirty() to indicate power loss or reset
[linux.git] / drivers / net / ethernet / ibm / emac / core.c
1 /*
2  * drivers/net/ethernet/ibm/emac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  *      Matt Porter <mporter@kernel.crashing.org>
16  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  *      Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
41 #include <linux/of.h>
42 #include <linux/of_address.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_net.h>
45 #include <linux/slab.h>
46
47 #include <asm/processor.h>
48 #include <asm/io.h>
49 #include <asm/dma.h>
50 #include <asm/uaccess.h>
51 #include <asm/dcr.h>
52 #include <asm/dcr-regs.h>
53
54 #include "core.h"
55
56 /*
57  * Lack of dma_unmap_???? calls is intentional.
58  *
59  * API-correct usage requires additional support state information to be
60  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
61  * EMAC design (e.g. TX buffer passed from network stack can be split into
62  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
63  * maintaining such information will add additional overhead.
64  * Current DMA API implementation for 4xx processors only ensures cache coherency
65  * and dma_unmap_???? routines are empty and are likely to stay this way.
66  * I decided to omit dma_unmap_??? calls because I don't want to add additional
67  * complexity just for the sake of following some abstract API, when it doesn't
68  * add any real benefit to the driver. I understand that this decision maybe
69  * controversial, but I really tried to make code API-correct and efficient
70  * at the same time and didn't come up with code I liked :(.                --ebs
71  */
72
73 #define DRV_NAME        "emac"
74 #define DRV_VERSION     "3.54"
75 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
76
77 MODULE_DESCRIPTION(DRV_DESC);
78 MODULE_AUTHOR
79     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
80 MODULE_LICENSE("GPL");
81
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
84
85 /* If packet size is less than this number, we allocate small skb and copy packet
86  * contents into it instead of just sending original big skb up
87  */
88 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
89
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91  * to avoid re-using the same PHY ID in cases where the arch didn't
92  * setup precise phy_map entries
93  *
94  * XXX This is something that needs to be reworked as we can have multiple
95  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96  * probably require in that case to have explicit PHY IDs in the device-tree
97  */
98 static u32 busy_phy_map;
99 static DEFINE_MUTEX(emac_phy_map_lock);
100
101 /* This is the wait queue used to wait on any event related to probe, that
102  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
103  */
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
105
106 /* Having stable interface names is a doomed idea. However, it would be nice
107  * if we didn't have completely random interface names at boot too :-) It's
108  * just a matter of making everybody's life easier. Since we are doing
109  * threaded probing, it's a bit harder though. The base idea here is that
110  * we make up a list of all emacs in the device-tree before we register the
111  * driver. Every emac will then wait for the previous one in the list to
112  * initialize before itself. We should also keep that list ordered by
113  * cell_index.
114  * That list is only 4 entries long, meaning that additional EMACs don't
115  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
116  */
117
118 #define EMAC_BOOT_LIST_SIZE     4
119 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
120
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
123
124 /* I don't want to litter system log with timeout errors
125  * when we have brain-damaged PHY.
126  */
127 static inline void emac_report_timeout_error(struct emac_instance *dev,
128                                              const char *error)
129 {
130         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
131                                   EMAC_FTR_460EX_PHY_CLK_FIX |
132                                   EMAC_FTR_440EP_PHY_CLK_FIX))
133                 DBG(dev, "%s" NL, error);
134         else if (net_ratelimit())
135                 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
136                         error);
137 }
138
139 /* EMAC PHY clock workaround:
140  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141  * which allows controlling each EMAC clock
142  */
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
144 {
145 #ifdef CONFIG_PPC_DCR_NATIVE
146         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147                 dcri_clrset(SDR0, SDR0_MFR,
148                             0, SDR0_MFR_ECS >> dev->cell_index);
149 #endif
150 }
151
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
153 {
154 #ifdef CONFIG_PPC_DCR_NATIVE
155         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156                 dcri_clrset(SDR0, SDR0_MFR,
157                             SDR0_MFR_ECS >> dev->cell_index, 0);
158 #endif
159 }
160
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON        HZ
163 #define PHY_POLL_LINK_OFF       (HZ / 5)
164
165 /* Graceful stop timeouts in us.
166  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167  */
168 #define STOP_TIMEOUT_10         1230
169 #define STOP_TIMEOUT_100        124
170 #define STOP_TIMEOUT_1000       13
171 #define STOP_TIMEOUT_1000_JUMBO 73
172
173 static unsigned char default_mcast_addr[] = {
174         0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175 };
176
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190         "tx_bd_excessive_collisions", "tx_bd_late_collision",
191         "tx_bd_multple_collisions", "tx_bd_single_collision",
192         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193         "tx_errors"
194 };
195
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
199
200 static inline int emac_phy_supports_gige(int phy_mode)
201 {
202         return  phy_mode == PHY_MODE_GMII ||
203                 phy_mode == PHY_MODE_RGMII ||
204                 phy_mode == PHY_MODE_SGMII ||
205                 phy_mode == PHY_MODE_TBI ||
206                 phy_mode == PHY_MODE_RTBI;
207 }
208
209 static inline int emac_phy_gpcs(int phy_mode)
210 {
211         return  phy_mode == PHY_MODE_SGMII ||
212                 phy_mode == PHY_MODE_TBI ||
213                 phy_mode == PHY_MODE_RTBI;
214 }
215
216 static inline void emac_tx_enable(struct emac_instance *dev)
217 {
218         struct emac_regs __iomem *p = dev->emacp;
219         u32 r;
220
221         DBG(dev, "tx_enable" NL);
222
223         r = in_be32(&p->mr0);
224         if (!(r & EMAC_MR0_TXE))
225                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
226 }
227
228 static void emac_tx_disable(struct emac_instance *dev)
229 {
230         struct emac_regs __iomem *p = dev->emacp;
231         u32 r;
232
233         DBG(dev, "tx_disable" NL);
234
235         r = in_be32(&p->mr0);
236         if (r & EMAC_MR0_TXE) {
237                 int n = dev->stop_timeout;
238                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
239                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
240                         udelay(1);
241                         --n;
242                 }
243                 if (unlikely(!n))
244                         emac_report_timeout_error(dev, "TX disable timeout");
245         }
246 }
247
248 static void emac_rx_enable(struct emac_instance *dev)
249 {
250         struct emac_regs __iomem *p = dev->emacp;
251         u32 r;
252
253         if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
254                 goto out;
255
256         DBG(dev, "rx_enable" NL);
257
258         r = in_be32(&p->mr0);
259         if (!(r & EMAC_MR0_RXE)) {
260                 if (unlikely(!(r & EMAC_MR0_RXI))) {
261                         /* Wait if previous async disable is still in progress */
262                         int n = dev->stop_timeout;
263                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
264                                 udelay(1);
265                                 --n;
266                         }
267                         if (unlikely(!n))
268                                 emac_report_timeout_error(dev,
269                                                           "RX disable timeout");
270                 }
271                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
272         }
273  out:
274         ;
275 }
276
277 static void emac_rx_disable(struct emac_instance *dev)
278 {
279         struct emac_regs __iomem *p = dev->emacp;
280         u32 r;
281
282         DBG(dev, "rx_disable" NL);
283
284         r = in_be32(&p->mr0);
285         if (r & EMAC_MR0_RXE) {
286                 int n = dev->stop_timeout;
287                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
288                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
289                         udelay(1);
290                         --n;
291                 }
292                 if (unlikely(!n))
293                         emac_report_timeout_error(dev, "RX disable timeout");
294         }
295 }
296
297 static inline void emac_netif_stop(struct emac_instance *dev)
298 {
299         netif_tx_lock_bh(dev->ndev);
300         netif_addr_lock(dev->ndev);
301         dev->no_mcast = 1;
302         netif_addr_unlock(dev->ndev);
303         netif_tx_unlock_bh(dev->ndev);
304         dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
305         mal_poll_disable(dev->mal, &dev->commac);
306         netif_tx_disable(dev->ndev);
307 }
308
309 static inline void emac_netif_start(struct emac_instance *dev)
310 {
311         netif_tx_lock_bh(dev->ndev);
312         netif_addr_lock(dev->ndev);
313         dev->no_mcast = 0;
314         if (dev->mcast_pending && netif_running(dev->ndev))
315                 __emac_set_multicast_list(dev);
316         netif_addr_unlock(dev->ndev);
317         netif_tx_unlock_bh(dev->ndev);
318
319         netif_wake_queue(dev->ndev);
320
321         /* NOTE: unconditional netif_wake_queue is only appropriate
322          * so long as all callers are assured to have free tx slots
323          * (taken from tg3... though the case where that is wrong is
324          *  not terribly harmful)
325          */
326         mal_poll_enable(dev->mal, &dev->commac);
327 }
328
329 static inline void emac_rx_disable_async(struct emac_instance *dev)
330 {
331         struct emac_regs __iomem *p = dev->emacp;
332         u32 r;
333
334         DBG(dev, "rx_disable_async" NL);
335
336         r = in_be32(&p->mr0);
337         if (r & EMAC_MR0_RXE)
338                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
339 }
340
341 static int emac_reset(struct emac_instance *dev)
342 {
343         struct emac_regs __iomem *p = dev->emacp;
344         int n = 20;
345
346         DBG(dev, "reset" NL);
347
348         if (!dev->reset_failed) {
349                 /* 40x erratum suggests stopping RX channel before reset,
350                  * we stop TX as well
351                  */
352                 emac_rx_disable(dev);
353                 emac_tx_disable(dev);
354         }
355
356 #ifdef CONFIG_PPC_DCR_NATIVE
357         /*
358          * PPC460EX/GT Embedded Processor Advanced User's Manual
359          * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
360          * Note: The PHY must provide a TX Clk in order to perform a soft reset
361          * of the EMAC. If none is present, select the internal clock
362          * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
363          * After a soft reset, select the external clock.
364          */
365         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
366                 if (dev->phy_address == 0xffffffff &&
367                     dev->phy_map == 0xffffffff) {
368                         /* No PHY: select internal loop clock before reset */
369                         dcri_clrset(SDR0, SDR0_ETH_CFG,
370                                     0, SDR0_ETH_CFG_ECS << dev->cell_index);
371                 } else {
372                         /* PHY present: select external clock before reset */
373                         dcri_clrset(SDR0, SDR0_ETH_CFG,
374                                     SDR0_ETH_CFG_ECS << dev->cell_index, 0);
375                 }
376         }
377 #endif
378
379         out_be32(&p->mr0, EMAC_MR0_SRST);
380         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
381                 --n;
382
383 #ifdef CONFIG_PPC_DCR_NATIVE
384         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
385                 if (dev->phy_address == 0xffffffff &&
386                     dev->phy_map == 0xffffffff) {
387                         /* No PHY: restore external clock source after reset */
388                         dcri_clrset(SDR0, SDR0_ETH_CFG,
389                                     SDR0_ETH_CFG_ECS << dev->cell_index, 0);
390                 }
391         }
392 #endif
393
394         if (n) {
395                 dev->reset_failed = 0;
396                 return 0;
397         } else {
398                 emac_report_timeout_error(dev, "reset timeout");
399                 dev->reset_failed = 1;
400                 return -ETIMEDOUT;
401         }
402 }
403
404 static void emac_hash_mc(struct emac_instance *dev)
405 {
406         const int regs = EMAC_XAHT_REGS(dev);
407         u32 *gaht_base = emac_gaht_base(dev);
408         u32 gaht_temp[regs];
409         struct netdev_hw_addr *ha;
410         int i;
411
412         DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
413
414         memset(gaht_temp, 0, sizeof (gaht_temp));
415
416         netdev_for_each_mc_addr(ha, dev->ndev) {
417                 int slot, reg, mask;
418                 DBG2(dev, "mc %pM" NL, ha->addr);
419
420                 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
421                                              ether_crc(ETH_ALEN, ha->addr));
422                 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
423                 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
424
425                 gaht_temp[reg] |= mask;
426         }
427
428         for (i = 0; i < regs; i++)
429                 out_be32(gaht_base + i, gaht_temp[i]);
430 }
431
432 static inline u32 emac_iff2rmr(struct net_device *ndev)
433 {
434         struct emac_instance *dev = netdev_priv(ndev);
435         u32 r;
436
437         r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
438
439         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
440             r |= EMAC4_RMR_BASE;
441         else
442             r |= EMAC_RMR_BASE;
443
444         if (ndev->flags & IFF_PROMISC)
445                 r |= EMAC_RMR_PME;
446         else if (ndev->flags & IFF_ALLMULTI ||
447                          (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
448                 r |= EMAC_RMR_PMME;
449         else if (!netdev_mc_empty(ndev))
450                 r |= EMAC_RMR_MAE;
451
452         if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
453                 r &= ~EMAC4_RMR_MJS_MASK;
454                 r |= EMAC4_RMR_MJS(ndev->mtu);
455         }
456
457         return r;
458 }
459
460 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
461 {
462         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
463
464         DBG2(dev, "__emac_calc_base_mr1" NL);
465
466         switch(tx_size) {
467         case 2048:
468                 ret |= EMAC_MR1_TFS_2K;
469                 break;
470         default:
471                 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
472                        dev->ndev->name, tx_size);
473         }
474
475         switch(rx_size) {
476         case 16384:
477                 ret |= EMAC_MR1_RFS_16K;
478                 break;
479         case 4096:
480                 ret |= EMAC_MR1_RFS_4K;
481                 break;
482         default:
483                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
484                        dev->ndev->name, rx_size);
485         }
486
487         return ret;
488 }
489
490 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
491 {
492         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
493                 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
494
495         DBG2(dev, "__emac4_calc_base_mr1" NL);
496
497         switch(tx_size) {
498         case 16384:
499                 ret |= EMAC4_MR1_TFS_16K;
500                 break;
501         case 4096:
502                 ret |= EMAC4_MR1_TFS_4K;
503                 break;
504         case 2048:
505                 ret |= EMAC4_MR1_TFS_2K;
506                 break;
507         default:
508                 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
509                        dev->ndev->name, tx_size);
510         }
511
512         switch(rx_size) {
513         case 16384:
514                 ret |= EMAC4_MR1_RFS_16K;
515                 break;
516         case 4096:
517                 ret |= EMAC4_MR1_RFS_4K;
518                 break;
519         case 2048:
520                 ret |= EMAC4_MR1_RFS_2K;
521                 break;
522         default:
523                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
524                        dev->ndev->name, rx_size);
525         }
526
527         return ret;
528 }
529
530 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
531 {
532         return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
533                 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
534                 __emac_calc_base_mr1(dev, tx_size, rx_size);
535 }
536
537 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
538 {
539         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
540                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
541         else
542                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
543 }
544
545 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
546                                  unsigned int low, unsigned int high)
547 {
548         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
549                 return (low << 22) | ( (high & 0x3ff) << 6);
550         else
551                 return (low << 23) | ( (high & 0x1ff) << 7);
552 }
553
554 static int emac_configure(struct emac_instance *dev)
555 {
556         struct emac_regs __iomem *p = dev->emacp;
557         struct net_device *ndev = dev->ndev;
558         int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
559         u32 r, mr1 = 0;
560
561         DBG(dev, "configure" NL);
562
563         if (!link) {
564                 out_be32(&p->mr1, in_be32(&p->mr1)
565                          | EMAC_MR1_FDE | EMAC_MR1_ILE);
566                 udelay(100);
567         } else if (emac_reset(dev) < 0)
568                 return -ETIMEDOUT;
569
570         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
571                 tah_reset(dev->tah_dev);
572
573         DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
574             link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
575
576         /* Default fifo sizes */
577         tx_size = dev->tx_fifo_size;
578         rx_size = dev->rx_fifo_size;
579
580         /* No link, force loopback */
581         if (!link)
582                 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
583
584         /* Check for full duplex */
585         else if (dev->phy.duplex == DUPLEX_FULL)
586                 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
587
588         /* Adjust fifo sizes, mr1 and timeouts based on link speed */
589         dev->stop_timeout = STOP_TIMEOUT_10;
590         switch (dev->phy.speed) {
591         case SPEED_1000:
592                 if (emac_phy_gpcs(dev->phy.mode)) {
593                         mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
594                                 (dev->phy.gpcs_address != 0xffffffff) ?
595                                  dev->phy.gpcs_address : dev->phy.address);
596
597                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
598                          * identify this GPCS PHY later.
599                          */
600                         out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
601                 } else
602                         mr1 |= EMAC_MR1_MF_1000;
603
604                 /* Extended fifo sizes */
605                 tx_size = dev->tx_fifo_size_gige;
606                 rx_size = dev->rx_fifo_size_gige;
607
608                 if (dev->ndev->mtu > ETH_DATA_LEN) {
609                         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
610                                 mr1 |= EMAC4_MR1_JPSM;
611                         else
612                                 mr1 |= EMAC_MR1_JPSM;
613                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
614                 } else
615                         dev->stop_timeout = STOP_TIMEOUT_1000;
616                 break;
617         case SPEED_100:
618                 mr1 |= EMAC_MR1_MF_100;
619                 dev->stop_timeout = STOP_TIMEOUT_100;
620                 break;
621         default: /* make gcc happy */
622                 break;
623         }
624
625         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
626                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
627                                 dev->phy.speed);
628         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
629                 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
630
631         /* on 40x erratum forces us to NOT use integrated flow control,
632          * let's hope it works on 44x ;)
633          */
634         if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
635             dev->phy.duplex == DUPLEX_FULL) {
636                 if (dev->phy.pause)
637                         mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
638                 else if (dev->phy.asym_pause)
639                         mr1 |= EMAC_MR1_APP;
640         }
641
642         /* Add base settings & fifo sizes & program MR1 */
643         mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
644         out_be32(&p->mr1, mr1);
645
646         /* Set individual MAC address */
647         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
648         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
649                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
650                  ndev->dev_addr[5]);
651
652         /* VLAN Tag Protocol ID */
653         out_be32(&p->vtpid, 0x8100);
654
655         /* Receive mode register */
656         r = emac_iff2rmr(ndev);
657         if (r & EMAC_RMR_MAE)
658                 emac_hash_mc(dev);
659         out_be32(&p->rmr, r);
660
661         /* FIFOs thresholds */
662         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
663                 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
664                                tx_size / 2 / dev->fifo_entry_size);
665         else
666                 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
667                               tx_size / 2 / dev->fifo_entry_size);
668         out_be32(&p->tmr1, r);
669         out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
670
671         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
672            there should be still enough space in FIFO to allow the our link
673            partner time to process this frame and also time to send PAUSE
674            frame itself.
675
676            Here is the worst case scenario for the RX FIFO "headroom"
677            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
678
679            1) One maximum-length frame on TX                    1522 bytes
680            2) One PAUSE frame time                                64 bytes
681            3) PAUSE frame decode time allowance                   64 bytes
682            4) One maximum-length frame on RX                    1522 bytes
683            5) Round-trip propagation delay of the link (100Mb)    15 bytes
684            ----------
685            3187 bytes
686
687            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
688            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
689          */
690         r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
691                            rx_size / 4 / dev->fifo_entry_size);
692         out_be32(&p->rwmr, r);
693
694         /* Set PAUSE timer to the maximum */
695         out_be32(&p->ptr, 0xffff);
696
697         /* IRQ sources */
698         r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
699                 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
700                 EMAC_ISR_IRE | EMAC_ISR_TE;
701         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
702             r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
703                                                   EMAC4_ISR_RXOE | */;
704         out_be32(&p->iser,  r);
705
706         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
707         if (emac_phy_gpcs(dev->phy.mode)) {
708                 if (dev->phy.gpcs_address != 0xffffffff)
709                         emac_mii_reset_gpcs(&dev->phy);
710                 else
711                         emac_mii_reset_phy(&dev->phy);
712         }
713
714         return 0;
715 }
716
717 static void emac_reinitialize(struct emac_instance *dev)
718 {
719         DBG(dev, "reinitialize" NL);
720
721         emac_netif_stop(dev);
722         if (!emac_configure(dev)) {
723                 emac_tx_enable(dev);
724                 emac_rx_enable(dev);
725         }
726         emac_netif_start(dev);
727 }
728
729 static void emac_full_tx_reset(struct emac_instance *dev)
730 {
731         DBG(dev, "full_tx_reset" NL);
732
733         emac_tx_disable(dev);
734         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
735         emac_clean_tx_ring(dev);
736         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
737
738         emac_configure(dev);
739
740         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
741         emac_tx_enable(dev);
742         emac_rx_enable(dev);
743 }
744
745 static void emac_reset_work(struct work_struct *work)
746 {
747         struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
748
749         DBG(dev, "reset_work" NL);
750
751         mutex_lock(&dev->link_lock);
752         if (dev->opened) {
753                 emac_netif_stop(dev);
754                 emac_full_tx_reset(dev);
755                 emac_netif_start(dev);
756         }
757         mutex_unlock(&dev->link_lock);
758 }
759
760 static void emac_tx_timeout(struct net_device *ndev)
761 {
762         struct emac_instance *dev = netdev_priv(ndev);
763
764         DBG(dev, "tx_timeout" NL);
765
766         schedule_work(&dev->reset_work);
767 }
768
769
770 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
771 {
772         int done = !!(stacr & EMAC_STACR_OC);
773
774         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
775                 done = !done;
776
777         return done;
778 };
779
780 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
781 {
782         struct emac_regs __iomem *p = dev->emacp;
783         u32 r = 0;
784         int n, err = -ETIMEDOUT;
785
786         mutex_lock(&dev->mdio_lock);
787
788         DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
789
790         /* Enable proper MDIO port */
791         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
792                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
793         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
794                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
795
796         /* Wait for management interface to become idle */
797         n = 20;
798         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
799                 udelay(1);
800                 if (!--n) {
801                         DBG2(dev, " -> timeout wait idle\n");
802                         goto bail;
803                 }
804         }
805
806         /* Issue read command */
807         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
808                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
809         else
810                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
811         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
812                 r |= EMAC_STACR_OC;
813         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
814                 r |= EMACX_STACR_STAC_READ;
815         else
816                 r |= EMAC_STACR_STAC_READ;
817         r |= (reg & EMAC_STACR_PRA_MASK)
818                 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
819         out_be32(&p->stacr, r);
820
821         /* Wait for read to complete */
822         n = 200;
823         while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
824                 udelay(1);
825                 if (!--n) {
826                         DBG2(dev, " -> timeout wait complete\n");
827                         goto bail;
828                 }
829         }
830
831         if (unlikely(r & EMAC_STACR_PHYE)) {
832                 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
833                 err = -EREMOTEIO;
834                 goto bail;
835         }
836
837         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
838
839         DBG2(dev, "mdio_read -> %04x" NL, r);
840         err = 0;
841  bail:
842         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
843                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
844         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
845                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
846         mutex_unlock(&dev->mdio_lock);
847
848         return err == 0 ? r : err;
849 }
850
851 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
852                               u16 val)
853 {
854         struct emac_regs __iomem *p = dev->emacp;
855         u32 r = 0;
856         int n, err = -ETIMEDOUT;
857
858         mutex_lock(&dev->mdio_lock);
859
860         DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
861
862         /* Enable proper MDIO port */
863         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
864                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
865         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
866                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
867
868         /* Wait for management interface to be idle */
869         n = 20;
870         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
871                 udelay(1);
872                 if (!--n) {
873                         DBG2(dev, " -> timeout wait idle\n");
874                         goto bail;
875                 }
876         }
877
878         /* Issue write command */
879         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
880                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
881         else
882                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
883         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
884                 r |= EMAC_STACR_OC;
885         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
886                 r |= EMACX_STACR_STAC_WRITE;
887         else
888                 r |= EMAC_STACR_STAC_WRITE;
889         r |= (reg & EMAC_STACR_PRA_MASK) |
890                 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
891                 (val << EMAC_STACR_PHYD_SHIFT);
892         out_be32(&p->stacr, r);
893
894         /* Wait for write to complete */
895         n = 200;
896         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
897                 udelay(1);
898                 if (!--n) {
899                         DBG2(dev, " -> timeout wait complete\n");
900                         goto bail;
901                 }
902         }
903         err = 0;
904  bail:
905         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
906                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
907         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
908                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
909         mutex_unlock(&dev->mdio_lock);
910 }
911
912 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
913 {
914         struct emac_instance *dev = netdev_priv(ndev);
915         int res;
916
917         res = __emac_mdio_read((dev->mdio_instance &&
918                                 dev->phy.gpcs_address != id) ?
919                                 dev->mdio_instance : dev,
920                                (u8) id, (u8) reg);
921         return res;
922 }
923
924 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
925 {
926         struct emac_instance *dev = netdev_priv(ndev);
927
928         __emac_mdio_write((dev->mdio_instance &&
929                            dev->phy.gpcs_address != id) ?
930                            dev->mdio_instance : dev,
931                           (u8) id, (u8) reg, (u16) val);
932 }
933
934 /* Tx lock BH */
935 static void __emac_set_multicast_list(struct emac_instance *dev)
936 {
937         struct emac_regs __iomem *p = dev->emacp;
938         u32 rmr = emac_iff2rmr(dev->ndev);
939
940         DBG(dev, "__multicast %08x" NL, rmr);
941
942         /* I decided to relax register access rules here to avoid
943          * full EMAC reset.
944          *
945          * There is a real problem with EMAC4 core if we use MWSW_001 bit
946          * in MR1 register and do a full EMAC reset.
947          * One TX BD status update is delayed and, after EMAC reset, it
948          * never happens, resulting in TX hung (it'll be recovered by TX
949          * timeout handler eventually, but this is just gross).
950          * So we either have to do full TX reset or try to cheat here :)
951          *
952          * The only required change is to RX mode register, so I *think* all
953          * we need is just to stop RX channel. This seems to work on all
954          * tested SoCs.                                                --ebs
955          *
956          * If we need the full reset, we might just trigger the workqueue
957          * and do it async... a bit nasty but should work --BenH
958          */
959         dev->mcast_pending = 0;
960         emac_rx_disable(dev);
961         if (rmr & EMAC_RMR_MAE)
962                 emac_hash_mc(dev);
963         out_be32(&p->rmr, rmr);
964         emac_rx_enable(dev);
965 }
966
967 /* Tx lock BH */
968 static void emac_set_multicast_list(struct net_device *ndev)
969 {
970         struct emac_instance *dev = netdev_priv(ndev);
971
972         DBG(dev, "multicast" NL);
973
974         BUG_ON(!netif_running(dev->ndev));
975
976         if (dev->no_mcast) {
977                 dev->mcast_pending = 1;
978                 return;
979         }
980         __emac_set_multicast_list(dev);
981 }
982
983 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
984 {
985         int rx_sync_size = emac_rx_sync_size(new_mtu);
986         int rx_skb_size = emac_rx_skb_size(new_mtu);
987         int i, ret = 0;
988         int mr1_jumbo_bit_change = 0;
989
990         mutex_lock(&dev->link_lock);
991         emac_netif_stop(dev);
992         emac_rx_disable(dev);
993         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
994
995         if (dev->rx_sg_skb) {
996                 ++dev->estats.rx_dropped_resize;
997                 dev_kfree_skb(dev->rx_sg_skb);
998                 dev->rx_sg_skb = NULL;
999         }
1000
1001         /* Make a first pass over RX ring and mark BDs ready, dropping
1002          * non-processed packets on the way. We need this as a separate pass
1003          * to simplify error recovery in the case of allocation failure later.
1004          */
1005         for (i = 0; i < NUM_RX_BUFF; ++i) {
1006                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1007                         ++dev->estats.rx_dropped_resize;
1008
1009                 dev->rx_desc[i].data_len = 0;
1010                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1011                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1012         }
1013
1014         /* Reallocate RX ring only if bigger skb buffers are required */
1015         if (rx_skb_size <= dev->rx_skb_size)
1016                 goto skip;
1017
1018         /* Second pass, allocate new skbs */
1019         for (i = 0; i < NUM_RX_BUFF; ++i) {
1020                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1021                 if (!skb) {
1022                         ret = -ENOMEM;
1023                         goto oom;
1024                 }
1025
1026                 BUG_ON(!dev->rx_skb[i]);
1027                 dev_kfree_skb(dev->rx_skb[i]);
1028
1029                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1030                 dev->rx_desc[i].data_ptr =
1031                     dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1032                                    DMA_FROM_DEVICE) + 2;
1033                 dev->rx_skb[i] = skb;
1034         }
1035  skip:
1036         /* Check if we need to change "Jumbo" bit in MR1 */
1037         if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1038                 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1039                                 (dev->ndev->mtu > ETH_DATA_LEN);
1040         } else {
1041                 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1042                                 (dev->ndev->mtu > ETH_DATA_LEN);
1043         }
1044
1045         if (mr1_jumbo_bit_change) {
1046                 /* This is to prevent starting RX channel in emac_rx_enable() */
1047                 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1048
1049                 dev->ndev->mtu = new_mtu;
1050                 emac_full_tx_reset(dev);
1051         }
1052
1053         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1054  oom:
1055         /* Restart RX */
1056         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1057         dev->rx_slot = 0;
1058         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1059         emac_rx_enable(dev);
1060         emac_netif_start(dev);
1061         mutex_unlock(&dev->link_lock);
1062
1063         return ret;
1064 }
1065
1066 /* Process ctx, rtnl_lock semaphore */
1067 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1068 {
1069         struct emac_instance *dev = netdev_priv(ndev);
1070         int ret = 0;
1071
1072         if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1073                 return -EINVAL;
1074
1075         DBG(dev, "change_mtu(%d)" NL, new_mtu);
1076
1077         if (netif_running(ndev)) {
1078                 /* Check if we really need to reinitialize RX ring */
1079                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1080                         ret = emac_resize_rx_ring(dev, new_mtu);
1081         }
1082
1083         if (!ret) {
1084                 ndev->mtu = new_mtu;
1085                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1086                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1087         }
1088
1089         return ret;
1090 }
1091
1092 static void emac_clean_tx_ring(struct emac_instance *dev)
1093 {
1094         int i;
1095
1096         for (i = 0; i < NUM_TX_BUFF; ++i) {
1097                 if (dev->tx_skb[i]) {
1098                         dev_kfree_skb(dev->tx_skb[i]);
1099                         dev->tx_skb[i] = NULL;
1100                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1101                                 ++dev->estats.tx_dropped;
1102                 }
1103                 dev->tx_desc[i].ctrl = 0;
1104                 dev->tx_desc[i].data_ptr = 0;
1105         }
1106 }
1107
1108 static void emac_clean_rx_ring(struct emac_instance *dev)
1109 {
1110         int i;
1111
1112         for (i = 0; i < NUM_RX_BUFF; ++i)
1113                 if (dev->rx_skb[i]) {
1114                         dev->rx_desc[i].ctrl = 0;
1115                         dev_kfree_skb(dev->rx_skb[i]);
1116                         dev->rx_skb[i] = NULL;
1117                         dev->rx_desc[i].data_ptr = 0;
1118                 }
1119
1120         if (dev->rx_sg_skb) {
1121                 dev_kfree_skb(dev->rx_sg_skb);
1122                 dev->rx_sg_skb = NULL;
1123         }
1124 }
1125
1126 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1127                                     gfp_t flags)
1128 {
1129         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1130         if (unlikely(!skb))
1131                 return -ENOMEM;
1132
1133         dev->rx_skb[slot] = skb;
1134         dev->rx_desc[slot].data_len = 0;
1135
1136         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1137         dev->rx_desc[slot].data_ptr =
1138             dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1139                            DMA_FROM_DEVICE) + 2;
1140         wmb();
1141         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1142             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1143
1144         return 0;
1145 }
1146
1147 static void emac_print_link_status(struct emac_instance *dev)
1148 {
1149         if (netif_carrier_ok(dev->ndev))
1150                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1151                        dev->ndev->name, dev->phy.speed,
1152                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1153                        dev->phy.pause ? ", pause enabled" :
1154                        dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1155         else
1156                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1157 }
1158
1159 /* Process ctx, rtnl_lock semaphore */
1160 static int emac_open(struct net_device *ndev)
1161 {
1162         struct emac_instance *dev = netdev_priv(ndev);
1163         int err, i;
1164
1165         DBG(dev, "open" NL);
1166
1167         /* Setup error IRQ handler */
1168         err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1169         if (err) {
1170                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1171                        ndev->name, dev->emac_irq);
1172                 return err;
1173         }
1174
1175         /* Allocate RX ring */
1176         for (i = 0; i < NUM_RX_BUFF; ++i)
1177                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1178                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
1179                                ndev->name);
1180                         goto oom;
1181                 }
1182
1183         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1184         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1185         dev->rx_sg_skb = NULL;
1186
1187         mutex_lock(&dev->link_lock);
1188         dev->opened = 1;
1189
1190         /* Start PHY polling now.
1191          */
1192         if (dev->phy.address >= 0) {
1193                 int link_poll_interval;
1194                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1195                         dev->phy.def->ops->read_link(&dev->phy);
1196                         emac_rx_clk_default(dev);
1197                         netif_carrier_on(dev->ndev);
1198                         link_poll_interval = PHY_POLL_LINK_ON;
1199                 } else {
1200                         emac_rx_clk_tx(dev);
1201                         netif_carrier_off(dev->ndev);
1202                         link_poll_interval = PHY_POLL_LINK_OFF;
1203                 }
1204                 dev->link_polling = 1;
1205                 wmb();
1206                 schedule_delayed_work(&dev->link_work, link_poll_interval);
1207                 emac_print_link_status(dev);
1208         } else
1209                 netif_carrier_on(dev->ndev);
1210
1211         /* Required for Pause packet support in EMAC */
1212         dev_mc_add_global(ndev, default_mcast_addr);
1213
1214         emac_configure(dev);
1215         mal_poll_add(dev->mal, &dev->commac);
1216         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1217         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1218         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1219         emac_tx_enable(dev);
1220         emac_rx_enable(dev);
1221         emac_netif_start(dev);
1222
1223         mutex_unlock(&dev->link_lock);
1224
1225         return 0;
1226  oom:
1227         emac_clean_rx_ring(dev);
1228         free_irq(dev->emac_irq, dev);
1229
1230         return -ENOMEM;
1231 }
1232
1233 /* BHs disabled */
1234 #if 0
1235 static int emac_link_differs(struct emac_instance *dev)
1236 {
1237         u32 r = in_be32(&dev->emacp->mr1);
1238
1239         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1240         int speed, pause, asym_pause;
1241
1242         if (r & EMAC_MR1_MF_1000)
1243                 speed = SPEED_1000;
1244         else if (r & EMAC_MR1_MF_100)
1245                 speed = SPEED_100;
1246         else
1247                 speed = SPEED_10;
1248
1249         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1250         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1251                 pause = 1;
1252                 asym_pause = 0;
1253                 break;
1254         case EMAC_MR1_APP:
1255                 pause = 0;
1256                 asym_pause = 1;
1257                 break;
1258         default:
1259                 pause = asym_pause = 0;
1260         }
1261         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1262             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1263 }
1264 #endif
1265
1266 static void emac_link_timer(struct work_struct *work)
1267 {
1268         struct emac_instance *dev =
1269                 container_of(to_delayed_work(work),
1270                              struct emac_instance, link_work);
1271         int link_poll_interval;
1272
1273         mutex_lock(&dev->link_lock);
1274         DBG2(dev, "link timer" NL);
1275
1276         if (!dev->opened)
1277                 goto bail;
1278
1279         if (dev->phy.def->ops->poll_link(&dev->phy)) {
1280                 if (!netif_carrier_ok(dev->ndev)) {
1281                         emac_rx_clk_default(dev);
1282                         /* Get new link parameters */
1283                         dev->phy.def->ops->read_link(&dev->phy);
1284
1285                         netif_carrier_on(dev->ndev);
1286                         emac_netif_stop(dev);
1287                         emac_full_tx_reset(dev);
1288                         emac_netif_start(dev);
1289                         emac_print_link_status(dev);
1290                 }
1291                 link_poll_interval = PHY_POLL_LINK_ON;
1292         } else {
1293                 if (netif_carrier_ok(dev->ndev)) {
1294                         emac_rx_clk_tx(dev);
1295                         netif_carrier_off(dev->ndev);
1296                         netif_tx_disable(dev->ndev);
1297                         emac_reinitialize(dev);
1298                         emac_print_link_status(dev);
1299                 }
1300                 link_poll_interval = PHY_POLL_LINK_OFF;
1301         }
1302         schedule_delayed_work(&dev->link_work, link_poll_interval);
1303  bail:
1304         mutex_unlock(&dev->link_lock);
1305 }
1306
1307 static void emac_force_link_update(struct emac_instance *dev)
1308 {
1309         netif_carrier_off(dev->ndev);
1310         smp_rmb();
1311         if (dev->link_polling) {
1312                 cancel_delayed_work_sync(&dev->link_work);
1313                 if (dev->link_polling)
1314                         schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1315         }
1316 }
1317
1318 /* Process ctx, rtnl_lock semaphore */
1319 static int emac_close(struct net_device *ndev)
1320 {
1321         struct emac_instance *dev = netdev_priv(ndev);
1322
1323         DBG(dev, "close" NL);
1324
1325         if (dev->phy.address >= 0) {
1326                 dev->link_polling = 0;
1327                 cancel_delayed_work_sync(&dev->link_work);
1328         }
1329         mutex_lock(&dev->link_lock);
1330         emac_netif_stop(dev);
1331         dev->opened = 0;
1332         mutex_unlock(&dev->link_lock);
1333
1334         emac_rx_disable(dev);
1335         emac_tx_disable(dev);
1336         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1337         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1338         mal_poll_del(dev->mal, &dev->commac);
1339
1340         emac_clean_tx_ring(dev);
1341         emac_clean_rx_ring(dev);
1342
1343         free_irq(dev->emac_irq, dev);
1344
1345         netif_carrier_off(ndev);
1346
1347         return 0;
1348 }
1349
1350 static inline u16 emac_tx_csum(struct emac_instance *dev,
1351                                struct sk_buff *skb)
1352 {
1353         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1354                 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1355                 ++dev->stats.tx_packets_csum;
1356                 return EMAC_TX_CTRL_TAH_CSUM;
1357         }
1358         return 0;
1359 }
1360
1361 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1362 {
1363         struct emac_regs __iomem *p = dev->emacp;
1364         struct net_device *ndev = dev->ndev;
1365
1366         /* Send the packet out. If the if makes a significant perf
1367          * difference, then we can store the TMR0 value in "dev"
1368          * instead
1369          */
1370         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1371                 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1372         else
1373                 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1374
1375         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1376                 netif_stop_queue(ndev);
1377                 DBG2(dev, "stopped TX queue" NL);
1378         }
1379
1380         ndev->trans_start = jiffies;
1381         ++dev->stats.tx_packets;
1382         dev->stats.tx_bytes += len;
1383
1384         return NETDEV_TX_OK;
1385 }
1386
1387 /* Tx lock BH */
1388 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1389 {
1390         struct emac_instance *dev = netdev_priv(ndev);
1391         unsigned int len = skb->len;
1392         int slot;
1393
1394         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1395             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1396
1397         slot = dev->tx_slot++;
1398         if (dev->tx_slot == NUM_TX_BUFF) {
1399                 dev->tx_slot = 0;
1400                 ctrl |= MAL_TX_CTRL_WRAP;
1401         }
1402
1403         DBG2(dev, "xmit(%u) %d" NL, len, slot);
1404
1405         dev->tx_skb[slot] = skb;
1406         dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1407                                                      skb->data, len,
1408                                                      DMA_TO_DEVICE);
1409         dev->tx_desc[slot].data_len = (u16) len;
1410         wmb();
1411         dev->tx_desc[slot].ctrl = ctrl;
1412
1413         return emac_xmit_finish(dev, len);
1414 }
1415
1416 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1417                                   u32 pd, int len, int last, u16 base_ctrl)
1418 {
1419         while (1) {
1420                 u16 ctrl = base_ctrl;
1421                 int chunk = min(len, MAL_MAX_TX_SIZE);
1422                 len -= chunk;
1423
1424                 slot = (slot + 1) % NUM_TX_BUFF;
1425
1426                 if (last && !len)
1427                         ctrl |= MAL_TX_CTRL_LAST;
1428                 if (slot == NUM_TX_BUFF - 1)
1429                         ctrl |= MAL_TX_CTRL_WRAP;
1430
1431                 dev->tx_skb[slot] = NULL;
1432                 dev->tx_desc[slot].data_ptr = pd;
1433                 dev->tx_desc[slot].data_len = (u16) chunk;
1434                 dev->tx_desc[slot].ctrl = ctrl;
1435                 ++dev->tx_cnt;
1436
1437                 if (!len)
1438                         break;
1439
1440                 pd += chunk;
1441         }
1442         return slot;
1443 }
1444
1445 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1446 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1447 {
1448         struct emac_instance *dev = netdev_priv(ndev);
1449         int nr_frags = skb_shinfo(skb)->nr_frags;
1450         int len = skb->len, chunk;
1451         int slot, i;
1452         u16 ctrl;
1453         u32 pd;
1454
1455         /* This is common "fast" path */
1456         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1457                 return emac_start_xmit(skb, ndev);
1458
1459         len -= skb->data_len;
1460
1461         /* Note, this is only an *estimation*, we can still run out of empty
1462          * slots because of the additional fragmentation into
1463          * MAL_MAX_TX_SIZE-sized chunks
1464          */
1465         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1466                 goto stop_queue;
1467
1468         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1469             emac_tx_csum(dev, skb);
1470         slot = dev->tx_slot;
1471
1472         /* skb data */
1473         dev->tx_skb[slot] = NULL;
1474         chunk = min(len, MAL_MAX_TX_SIZE);
1475         dev->tx_desc[slot].data_ptr = pd =
1476             dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1477         dev->tx_desc[slot].data_len = (u16) chunk;
1478         len -= chunk;
1479         if (unlikely(len))
1480                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1481                                        ctrl);
1482         /* skb fragments */
1483         for (i = 0; i < nr_frags; ++i) {
1484                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1485                 len = skb_frag_size(frag);
1486
1487                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1488                         goto undo_frame;
1489
1490                 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1491                                       DMA_TO_DEVICE);
1492
1493                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1494                                        ctrl);
1495         }
1496
1497         DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1498
1499         /* Attach skb to the last slot so we don't release it too early */
1500         dev->tx_skb[slot] = skb;
1501
1502         /* Send the packet out */
1503         if (dev->tx_slot == NUM_TX_BUFF - 1)
1504                 ctrl |= MAL_TX_CTRL_WRAP;
1505         wmb();
1506         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1507         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1508
1509         return emac_xmit_finish(dev, skb->len);
1510
1511  undo_frame:
1512         /* Well, too bad. Our previous estimation was overly optimistic.
1513          * Undo everything.
1514          */
1515         while (slot != dev->tx_slot) {
1516                 dev->tx_desc[slot].ctrl = 0;
1517                 --dev->tx_cnt;
1518                 if (--slot < 0)
1519                         slot = NUM_TX_BUFF - 1;
1520         }
1521         ++dev->estats.tx_undo;
1522
1523  stop_queue:
1524         netif_stop_queue(ndev);
1525         DBG2(dev, "stopped TX queue" NL);
1526         return NETDEV_TX_BUSY;
1527 }
1528
1529 /* Tx lock BHs */
1530 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1531 {
1532         struct emac_error_stats *st = &dev->estats;
1533
1534         DBG(dev, "BD TX error %04x" NL, ctrl);
1535
1536         ++st->tx_bd_errors;
1537         if (ctrl & EMAC_TX_ST_BFCS)
1538                 ++st->tx_bd_bad_fcs;
1539         if (ctrl & EMAC_TX_ST_LCS)
1540                 ++st->tx_bd_carrier_loss;
1541         if (ctrl & EMAC_TX_ST_ED)
1542                 ++st->tx_bd_excessive_deferral;
1543         if (ctrl & EMAC_TX_ST_EC)
1544                 ++st->tx_bd_excessive_collisions;
1545         if (ctrl & EMAC_TX_ST_LC)
1546                 ++st->tx_bd_late_collision;
1547         if (ctrl & EMAC_TX_ST_MC)
1548                 ++st->tx_bd_multple_collisions;
1549         if (ctrl & EMAC_TX_ST_SC)
1550                 ++st->tx_bd_single_collision;
1551         if (ctrl & EMAC_TX_ST_UR)
1552                 ++st->tx_bd_underrun;
1553         if (ctrl & EMAC_TX_ST_SQE)
1554                 ++st->tx_bd_sqe;
1555 }
1556
1557 static void emac_poll_tx(void *param)
1558 {
1559         struct emac_instance *dev = param;
1560         u32 bad_mask;
1561
1562         DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1563
1564         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1565                 bad_mask = EMAC_IS_BAD_TX_TAH;
1566         else
1567                 bad_mask = EMAC_IS_BAD_TX;
1568
1569         netif_tx_lock_bh(dev->ndev);
1570         if (dev->tx_cnt) {
1571                 u16 ctrl;
1572                 int slot = dev->ack_slot, n = 0;
1573         again:
1574                 ctrl = dev->tx_desc[slot].ctrl;
1575                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1576                         struct sk_buff *skb = dev->tx_skb[slot];
1577                         ++n;
1578
1579                         if (skb) {
1580                                 dev_kfree_skb(skb);
1581                                 dev->tx_skb[slot] = NULL;
1582                         }
1583                         slot = (slot + 1) % NUM_TX_BUFF;
1584
1585                         if (unlikely(ctrl & bad_mask))
1586                                 emac_parse_tx_error(dev, ctrl);
1587
1588                         if (--dev->tx_cnt)
1589                                 goto again;
1590                 }
1591                 if (n) {
1592                         dev->ack_slot = slot;
1593                         if (netif_queue_stopped(dev->ndev) &&
1594                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1595                                 netif_wake_queue(dev->ndev);
1596
1597                         DBG2(dev, "tx %d pkts" NL, n);
1598                 }
1599         }
1600         netif_tx_unlock_bh(dev->ndev);
1601 }
1602
1603 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1604                                        int len)
1605 {
1606         struct sk_buff *skb = dev->rx_skb[slot];
1607
1608         DBG2(dev, "recycle %d %d" NL, slot, len);
1609
1610         if (len)
1611                 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1612                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1613
1614         dev->rx_desc[slot].data_len = 0;
1615         wmb();
1616         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1617             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1618 }
1619
1620 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1621 {
1622         struct emac_error_stats *st = &dev->estats;
1623
1624         DBG(dev, "BD RX error %04x" NL, ctrl);
1625
1626         ++st->rx_bd_errors;
1627         if (ctrl & EMAC_RX_ST_OE)
1628                 ++st->rx_bd_overrun;
1629         if (ctrl & EMAC_RX_ST_BP)
1630                 ++st->rx_bd_bad_packet;
1631         if (ctrl & EMAC_RX_ST_RP)
1632                 ++st->rx_bd_runt_packet;
1633         if (ctrl & EMAC_RX_ST_SE)
1634                 ++st->rx_bd_short_event;
1635         if (ctrl & EMAC_RX_ST_AE)
1636                 ++st->rx_bd_alignment_error;
1637         if (ctrl & EMAC_RX_ST_BFCS)
1638                 ++st->rx_bd_bad_fcs;
1639         if (ctrl & EMAC_RX_ST_PTL)
1640                 ++st->rx_bd_packet_too_long;
1641         if (ctrl & EMAC_RX_ST_ORE)
1642                 ++st->rx_bd_out_of_range;
1643         if (ctrl & EMAC_RX_ST_IRE)
1644                 ++st->rx_bd_in_range;
1645 }
1646
1647 static inline void emac_rx_csum(struct emac_instance *dev,
1648                                 struct sk_buff *skb, u16 ctrl)
1649 {
1650 #ifdef CONFIG_IBM_EMAC_TAH
1651         if (!ctrl && dev->tah_dev) {
1652                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1653                 ++dev->stats.rx_packets_csum;
1654         }
1655 #endif
1656 }
1657
1658 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1659 {
1660         if (likely(dev->rx_sg_skb != NULL)) {
1661                 int len = dev->rx_desc[slot].data_len;
1662                 int tot_len = dev->rx_sg_skb->len + len;
1663
1664                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1665                         ++dev->estats.rx_dropped_mtu;
1666                         dev_kfree_skb(dev->rx_sg_skb);
1667                         dev->rx_sg_skb = NULL;
1668                 } else {
1669                         memcpy(skb_tail_pointer(dev->rx_sg_skb),
1670                                          dev->rx_skb[slot]->data, len);
1671                         skb_put(dev->rx_sg_skb, len);
1672                         emac_recycle_rx_skb(dev, slot, len);
1673                         return 0;
1674                 }
1675         }
1676         emac_recycle_rx_skb(dev, slot, 0);
1677         return -1;
1678 }
1679
1680 /* NAPI poll context */
1681 static int emac_poll_rx(void *param, int budget)
1682 {
1683         struct emac_instance *dev = param;
1684         int slot = dev->rx_slot, received = 0;
1685
1686         DBG2(dev, "poll_rx(%d)" NL, budget);
1687
1688  again:
1689         while (budget > 0) {
1690                 int len;
1691                 struct sk_buff *skb;
1692                 u16 ctrl = dev->rx_desc[slot].ctrl;
1693
1694                 if (ctrl & MAL_RX_CTRL_EMPTY)
1695                         break;
1696
1697                 skb = dev->rx_skb[slot];
1698                 mb();
1699                 len = dev->rx_desc[slot].data_len;
1700
1701                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1702                         goto sg;
1703
1704                 ctrl &= EMAC_BAD_RX_MASK;
1705                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1706                         emac_parse_rx_error(dev, ctrl);
1707                         ++dev->estats.rx_dropped_error;
1708                         emac_recycle_rx_skb(dev, slot, 0);
1709                         len = 0;
1710                         goto next;
1711                 }
1712
1713                 if (len < ETH_HLEN) {
1714                         ++dev->estats.rx_dropped_stack;
1715                         emac_recycle_rx_skb(dev, slot, len);
1716                         goto next;
1717                 }
1718
1719                 if (len && len < EMAC_RX_COPY_THRESH) {
1720                         struct sk_buff *copy_skb =
1721                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1722                         if (unlikely(!copy_skb))
1723                                 goto oom;
1724
1725                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1726                         memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
1727                         emac_recycle_rx_skb(dev, slot, len);
1728                         skb = copy_skb;
1729                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1730                         goto oom;
1731
1732                 skb_put(skb, len);
1733         push_packet:
1734                 skb->protocol = eth_type_trans(skb, dev->ndev);
1735                 emac_rx_csum(dev, skb, ctrl);
1736
1737                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1738                         ++dev->estats.rx_dropped_stack;
1739         next:
1740                 ++dev->stats.rx_packets;
1741         skip:
1742                 dev->stats.rx_bytes += len;
1743                 slot = (slot + 1) % NUM_RX_BUFF;
1744                 --budget;
1745                 ++received;
1746                 continue;
1747         sg:
1748                 if (ctrl & MAL_RX_CTRL_FIRST) {
1749                         BUG_ON(dev->rx_sg_skb);
1750                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1751                                 DBG(dev, "rx OOM %d" NL, slot);
1752                                 ++dev->estats.rx_dropped_oom;
1753                                 emac_recycle_rx_skb(dev, slot, 0);
1754                         } else {
1755                                 dev->rx_sg_skb = skb;
1756                                 skb_put(skb, len);
1757                         }
1758                 } else if (!emac_rx_sg_append(dev, slot) &&
1759                            (ctrl & MAL_RX_CTRL_LAST)) {
1760
1761                         skb = dev->rx_sg_skb;
1762                         dev->rx_sg_skb = NULL;
1763
1764                         ctrl &= EMAC_BAD_RX_MASK;
1765                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1766                                 emac_parse_rx_error(dev, ctrl);
1767                                 ++dev->estats.rx_dropped_error;
1768                                 dev_kfree_skb(skb);
1769                                 len = 0;
1770                         } else
1771                                 goto push_packet;
1772                 }
1773                 goto skip;
1774         oom:
1775                 DBG(dev, "rx OOM %d" NL, slot);
1776                 /* Drop the packet and recycle skb */
1777                 ++dev->estats.rx_dropped_oom;
1778                 emac_recycle_rx_skb(dev, slot, 0);
1779                 goto next;
1780         }
1781
1782         if (received) {
1783                 DBG2(dev, "rx %d BDs" NL, received);
1784                 dev->rx_slot = slot;
1785         }
1786
1787         if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1788                 mb();
1789                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1790                         DBG2(dev, "rx restart" NL);
1791                         received = 0;
1792                         goto again;
1793                 }
1794
1795                 if (dev->rx_sg_skb) {
1796                         DBG2(dev, "dropping partial rx packet" NL);
1797                         ++dev->estats.rx_dropped_error;
1798                         dev_kfree_skb(dev->rx_sg_skb);
1799                         dev->rx_sg_skb = NULL;
1800                 }
1801
1802                 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1803                 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1804                 emac_rx_enable(dev);
1805                 dev->rx_slot = 0;
1806         }
1807         return received;
1808 }
1809
1810 /* NAPI poll context */
1811 static int emac_peek_rx(void *param)
1812 {
1813         struct emac_instance *dev = param;
1814
1815         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1816 }
1817
1818 /* NAPI poll context */
1819 static int emac_peek_rx_sg(void *param)
1820 {
1821         struct emac_instance *dev = param;
1822
1823         int slot = dev->rx_slot;
1824         while (1) {
1825                 u16 ctrl = dev->rx_desc[slot].ctrl;
1826                 if (ctrl & MAL_RX_CTRL_EMPTY)
1827                         return 0;
1828                 else if (ctrl & MAL_RX_CTRL_LAST)
1829                         return 1;
1830
1831                 slot = (slot + 1) % NUM_RX_BUFF;
1832
1833                 /* I'm just being paranoid here :) */
1834                 if (unlikely(slot == dev->rx_slot))
1835                         return 0;
1836         }
1837 }
1838
1839 /* Hard IRQ */
1840 static void emac_rxde(void *param)
1841 {
1842         struct emac_instance *dev = param;
1843
1844         ++dev->estats.rx_stopped;
1845         emac_rx_disable_async(dev);
1846 }
1847
1848 /* Hard IRQ */
1849 static irqreturn_t emac_irq(int irq, void *dev_instance)
1850 {
1851         struct emac_instance *dev = dev_instance;
1852         struct emac_regs __iomem *p = dev->emacp;
1853         struct emac_error_stats *st = &dev->estats;
1854         u32 isr;
1855
1856         spin_lock(&dev->lock);
1857
1858         isr = in_be32(&p->isr);
1859         out_be32(&p->isr, isr);
1860
1861         DBG(dev, "isr = %08x" NL, isr);
1862
1863         if (isr & EMAC4_ISR_TXPE)
1864                 ++st->tx_parity;
1865         if (isr & EMAC4_ISR_RXPE)
1866                 ++st->rx_parity;
1867         if (isr & EMAC4_ISR_TXUE)
1868                 ++st->tx_underrun;
1869         if (isr & EMAC4_ISR_RXOE)
1870                 ++st->rx_fifo_overrun;
1871         if (isr & EMAC_ISR_OVR)
1872                 ++st->rx_overrun;
1873         if (isr & EMAC_ISR_BP)
1874                 ++st->rx_bad_packet;
1875         if (isr & EMAC_ISR_RP)
1876                 ++st->rx_runt_packet;
1877         if (isr & EMAC_ISR_SE)
1878                 ++st->rx_short_event;
1879         if (isr & EMAC_ISR_ALE)
1880                 ++st->rx_alignment_error;
1881         if (isr & EMAC_ISR_BFCS)
1882                 ++st->rx_bad_fcs;
1883         if (isr & EMAC_ISR_PTLE)
1884                 ++st->rx_packet_too_long;
1885         if (isr & EMAC_ISR_ORE)
1886                 ++st->rx_out_of_range;
1887         if (isr & EMAC_ISR_IRE)
1888                 ++st->rx_in_range;
1889         if (isr & EMAC_ISR_SQE)
1890                 ++st->tx_sqe;
1891         if (isr & EMAC_ISR_TE)
1892                 ++st->tx_errors;
1893
1894         spin_unlock(&dev->lock);
1895
1896         return IRQ_HANDLED;
1897 }
1898
1899 static struct net_device_stats *emac_stats(struct net_device *ndev)
1900 {
1901         struct emac_instance *dev = netdev_priv(ndev);
1902         struct emac_stats *st = &dev->stats;
1903         struct emac_error_stats *est = &dev->estats;
1904         struct net_device_stats *nst = &dev->nstats;
1905         unsigned long flags;
1906
1907         DBG2(dev, "stats" NL);
1908
1909         /* Compute "legacy" statistics */
1910         spin_lock_irqsave(&dev->lock, flags);
1911         nst->rx_packets = (unsigned long)st->rx_packets;
1912         nst->rx_bytes = (unsigned long)st->rx_bytes;
1913         nst->tx_packets = (unsigned long)st->tx_packets;
1914         nst->tx_bytes = (unsigned long)st->tx_bytes;
1915         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1916                                           est->rx_dropped_error +
1917                                           est->rx_dropped_resize +
1918                                           est->rx_dropped_mtu);
1919         nst->tx_dropped = (unsigned long)est->tx_dropped;
1920
1921         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1922         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1923                                               est->rx_fifo_overrun +
1924                                               est->rx_overrun);
1925         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1926                                                est->rx_alignment_error);
1927         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1928                                              est->rx_bad_fcs);
1929         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1930                                                 est->rx_bd_short_event +
1931                                                 est->rx_bd_packet_too_long +
1932                                                 est->rx_bd_out_of_range +
1933                                                 est->rx_bd_in_range +
1934                                                 est->rx_runt_packet +
1935                                                 est->rx_short_event +
1936                                                 est->rx_packet_too_long +
1937                                                 est->rx_out_of_range +
1938                                                 est->rx_in_range);
1939
1940         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1941         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1942                                               est->tx_underrun);
1943         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1944         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1945                                           est->tx_bd_excessive_collisions +
1946                                           est->tx_bd_late_collision +
1947                                           est->tx_bd_multple_collisions);
1948         spin_unlock_irqrestore(&dev->lock, flags);
1949         return nst;
1950 }
1951
1952 static struct mal_commac_ops emac_commac_ops = {
1953         .poll_tx = &emac_poll_tx,
1954         .poll_rx = &emac_poll_rx,
1955         .peek_rx = &emac_peek_rx,
1956         .rxde = &emac_rxde,
1957 };
1958
1959 static struct mal_commac_ops emac_commac_sg_ops = {
1960         .poll_tx = &emac_poll_tx,
1961         .poll_rx = &emac_poll_rx,
1962         .peek_rx = &emac_peek_rx_sg,
1963         .rxde = &emac_rxde,
1964 };
1965
1966 /* Ethtool support */
1967 static int emac_ethtool_get_settings(struct net_device *ndev,
1968                                      struct ethtool_cmd *cmd)
1969 {
1970         struct emac_instance *dev = netdev_priv(ndev);
1971
1972         cmd->supported = dev->phy.features;
1973         cmd->port = PORT_MII;
1974         cmd->phy_address = dev->phy.address;
1975         cmd->transceiver =
1976             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1977
1978         mutex_lock(&dev->link_lock);
1979         cmd->advertising = dev->phy.advertising;
1980         cmd->autoneg = dev->phy.autoneg;
1981         cmd->speed = dev->phy.speed;
1982         cmd->duplex = dev->phy.duplex;
1983         mutex_unlock(&dev->link_lock);
1984
1985         return 0;
1986 }
1987
1988 static int emac_ethtool_set_settings(struct net_device *ndev,
1989                                      struct ethtool_cmd *cmd)
1990 {
1991         struct emac_instance *dev = netdev_priv(ndev);
1992         u32 f = dev->phy.features;
1993
1994         DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1995             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1996
1997         /* Basic sanity checks */
1998         if (dev->phy.address < 0)
1999                 return -EOPNOTSUPP;
2000         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
2001                 return -EINVAL;
2002         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
2003                 return -EINVAL;
2004         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
2005                 return -EINVAL;
2006
2007         if (cmd->autoneg == AUTONEG_DISABLE) {
2008                 switch (cmd->speed) {
2009                 case SPEED_10:
2010                         if (cmd->duplex == DUPLEX_HALF &&
2011                             !(f & SUPPORTED_10baseT_Half))
2012                                 return -EINVAL;
2013                         if (cmd->duplex == DUPLEX_FULL &&
2014                             !(f & SUPPORTED_10baseT_Full))
2015                                 return -EINVAL;
2016                         break;
2017                 case SPEED_100:
2018                         if (cmd->duplex == DUPLEX_HALF &&
2019                             !(f & SUPPORTED_100baseT_Half))
2020                                 return -EINVAL;
2021                         if (cmd->duplex == DUPLEX_FULL &&
2022                             !(f & SUPPORTED_100baseT_Full))
2023                                 return -EINVAL;
2024                         break;
2025                 case SPEED_1000:
2026                         if (cmd->duplex == DUPLEX_HALF &&
2027                             !(f & SUPPORTED_1000baseT_Half))
2028                                 return -EINVAL;
2029                         if (cmd->duplex == DUPLEX_FULL &&
2030                             !(f & SUPPORTED_1000baseT_Full))
2031                                 return -EINVAL;
2032                         break;
2033                 default:
2034                         return -EINVAL;
2035                 }
2036
2037                 mutex_lock(&dev->link_lock);
2038                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2039                                                 cmd->duplex);
2040                 mutex_unlock(&dev->link_lock);
2041
2042         } else {
2043                 if (!(f & SUPPORTED_Autoneg))
2044                         return -EINVAL;
2045
2046                 mutex_lock(&dev->link_lock);
2047                 dev->phy.def->ops->setup_aneg(&dev->phy,
2048                                               (cmd->advertising & f) |
2049                                               (dev->phy.advertising &
2050                                                (ADVERTISED_Pause |
2051                                                 ADVERTISED_Asym_Pause)));
2052                 mutex_unlock(&dev->link_lock);
2053         }
2054         emac_force_link_update(dev);
2055
2056         return 0;
2057 }
2058
2059 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2060                                        struct ethtool_ringparam *rp)
2061 {
2062         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2063         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2064 }
2065
2066 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2067                                         struct ethtool_pauseparam *pp)
2068 {
2069         struct emac_instance *dev = netdev_priv(ndev);
2070
2071         mutex_lock(&dev->link_lock);
2072         if ((dev->phy.features & SUPPORTED_Autoneg) &&
2073             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2074                 pp->autoneg = 1;
2075
2076         if (dev->phy.duplex == DUPLEX_FULL) {
2077                 if (dev->phy.pause)
2078                         pp->rx_pause = pp->tx_pause = 1;
2079                 else if (dev->phy.asym_pause)
2080                         pp->tx_pause = 1;
2081         }
2082         mutex_unlock(&dev->link_lock);
2083 }
2084
2085 static int emac_get_regs_len(struct emac_instance *dev)
2086 {
2087         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2088                 return sizeof(struct emac_ethtool_regs_subhdr) +
2089                         EMAC4_ETHTOOL_REGS_SIZE(dev);
2090         else
2091                 return sizeof(struct emac_ethtool_regs_subhdr) +
2092                         EMAC_ETHTOOL_REGS_SIZE(dev);
2093 }
2094
2095 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2096 {
2097         struct emac_instance *dev = netdev_priv(ndev);
2098         int size;
2099
2100         size = sizeof(struct emac_ethtool_regs_hdr) +
2101                 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2102         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2103                 size += zmii_get_regs_len(dev->zmii_dev);
2104         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2105                 size += rgmii_get_regs_len(dev->rgmii_dev);
2106         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2107                 size += tah_get_regs_len(dev->tah_dev);
2108
2109         return size;
2110 }
2111
2112 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2113 {
2114         struct emac_ethtool_regs_subhdr *hdr = buf;
2115
2116         hdr->index = dev->cell_index;
2117         if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2118                 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2119                 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2120                 return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
2121         } else {
2122                 hdr->version = EMAC_ETHTOOL_REGS_VER;
2123                 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2124                 return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
2125         }
2126 }
2127
2128 static void emac_ethtool_get_regs(struct net_device *ndev,
2129                                   struct ethtool_regs *regs, void *buf)
2130 {
2131         struct emac_instance *dev = netdev_priv(ndev);
2132         struct emac_ethtool_regs_hdr *hdr = buf;
2133
2134         hdr->components = 0;
2135         buf = hdr + 1;
2136
2137         buf = mal_dump_regs(dev->mal, buf);
2138         buf = emac_dump_regs(dev, buf);
2139         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2140                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2141                 buf = zmii_dump_regs(dev->zmii_dev, buf);
2142         }
2143         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2144                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2145                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2146         }
2147         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2148                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2149                 buf = tah_dump_regs(dev->tah_dev, buf);
2150         }
2151 }
2152
2153 static int emac_ethtool_nway_reset(struct net_device *ndev)
2154 {
2155         struct emac_instance *dev = netdev_priv(ndev);
2156         int res = 0;
2157
2158         DBG(dev, "nway_reset" NL);
2159
2160         if (dev->phy.address < 0)
2161                 return -EOPNOTSUPP;
2162
2163         mutex_lock(&dev->link_lock);
2164         if (!dev->phy.autoneg) {
2165                 res = -EINVAL;
2166                 goto out;
2167         }
2168
2169         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2170  out:
2171         mutex_unlock(&dev->link_lock);
2172         emac_force_link_update(dev);
2173         return res;
2174 }
2175
2176 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2177 {
2178         if (stringset == ETH_SS_STATS)
2179                 return EMAC_ETHTOOL_STATS_COUNT;
2180         else
2181                 return -EINVAL;
2182 }
2183
2184 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2185                                      u8 * buf)
2186 {
2187         if (stringset == ETH_SS_STATS)
2188                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2189 }
2190
2191 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2192                                            struct ethtool_stats *estats,
2193                                            u64 * tmp_stats)
2194 {
2195         struct emac_instance *dev = netdev_priv(ndev);
2196
2197         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2198         tmp_stats += sizeof(dev->stats) / sizeof(u64);
2199         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2200 }
2201
2202 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2203                                      struct ethtool_drvinfo *info)
2204 {
2205         struct emac_instance *dev = netdev_priv(ndev);
2206
2207         strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2208         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2209         snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
2210                  dev->cell_index, dev->ofdev->dev.of_node->full_name);
2211         info->regdump_len = emac_ethtool_get_regs_len(ndev);
2212 }
2213
2214 static const struct ethtool_ops emac_ethtool_ops = {
2215         .get_settings = emac_ethtool_get_settings,
2216         .set_settings = emac_ethtool_set_settings,
2217         .get_drvinfo = emac_ethtool_get_drvinfo,
2218
2219         .get_regs_len = emac_ethtool_get_regs_len,
2220         .get_regs = emac_ethtool_get_regs,
2221
2222         .nway_reset = emac_ethtool_nway_reset,
2223
2224         .get_ringparam = emac_ethtool_get_ringparam,
2225         .get_pauseparam = emac_ethtool_get_pauseparam,
2226
2227         .get_strings = emac_ethtool_get_strings,
2228         .get_sset_count = emac_ethtool_get_sset_count,
2229         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2230
2231         .get_link = ethtool_op_get_link,
2232 };
2233
2234 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2235 {
2236         struct emac_instance *dev = netdev_priv(ndev);
2237         struct mii_ioctl_data *data = if_mii(rq);
2238
2239         DBG(dev, "ioctl %08x" NL, cmd);
2240
2241         if (dev->phy.address < 0)
2242                 return -EOPNOTSUPP;
2243
2244         switch (cmd) {
2245         case SIOCGMIIPHY:
2246                 data->phy_id = dev->phy.address;
2247                 /* Fall through */
2248         case SIOCGMIIREG:
2249                 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2250                                                data->reg_num);
2251                 return 0;
2252
2253         case SIOCSMIIREG:
2254                 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2255                                 data->val_in);
2256                 return 0;
2257         default:
2258                 return -EOPNOTSUPP;
2259         }
2260 }
2261
2262 struct emac_depentry {
2263         u32                     phandle;
2264         struct device_node      *node;
2265         struct platform_device  *ofdev;
2266         void                    *drvdata;
2267 };
2268
2269 #define EMAC_DEP_MAL_IDX        0
2270 #define EMAC_DEP_ZMII_IDX       1
2271 #define EMAC_DEP_RGMII_IDX      2
2272 #define EMAC_DEP_TAH_IDX        3
2273 #define EMAC_DEP_MDIO_IDX       4
2274 #define EMAC_DEP_PREV_IDX       5
2275 #define EMAC_DEP_COUNT          6
2276
2277 static int emac_check_deps(struct emac_instance *dev,
2278                            struct emac_depentry *deps)
2279 {
2280         int i, there = 0;
2281         struct device_node *np;
2282
2283         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2284                 /* no dependency on that item, allright */
2285                 if (deps[i].phandle == 0) {
2286                         there++;
2287                         continue;
2288                 }
2289                 /* special case for blist as the dependency might go away */
2290                 if (i == EMAC_DEP_PREV_IDX) {
2291                         np = *(dev->blist - 1);
2292                         if (np == NULL) {
2293                                 deps[i].phandle = 0;
2294                                 there++;
2295                                 continue;
2296                         }
2297                         if (deps[i].node == NULL)
2298                                 deps[i].node = of_node_get(np);
2299                 }
2300                 if (deps[i].node == NULL)
2301                         deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2302                 if (deps[i].node == NULL)
2303                         continue;
2304                 if (deps[i].ofdev == NULL)
2305                         deps[i].ofdev = of_find_device_by_node(deps[i].node);
2306                 if (deps[i].ofdev == NULL)
2307                         continue;
2308                 if (deps[i].drvdata == NULL)
2309                         deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2310                 if (deps[i].drvdata != NULL)
2311                         there++;
2312         }
2313         return there == EMAC_DEP_COUNT;
2314 }
2315
2316 static void emac_put_deps(struct emac_instance *dev)
2317 {
2318         of_dev_put(dev->mal_dev);
2319         of_dev_put(dev->zmii_dev);
2320         of_dev_put(dev->rgmii_dev);
2321         of_dev_put(dev->mdio_dev);
2322         of_dev_put(dev->tah_dev);
2323 }
2324
2325 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2326                               void *data)
2327 {
2328         /* We are only intereted in device addition */
2329         if (action == BUS_NOTIFY_BOUND_DRIVER)
2330                 wake_up_all(&emac_probe_wait);
2331         return 0;
2332 }
2333
2334 static struct notifier_block emac_of_bus_notifier = {
2335         .notifier_call = emac_of_bus_notify
2336 };
2337
2338 static int emac_wait_deps(struct emac_instance *dev)
2339 {
2340         struct emac_depentry deps[EMAC_DEP_COUNT];
2341         int i, err;
2342
2343         memset(&deps, 0, sizeof(deps));
2344
2345         deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2346         deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2347         deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2348         if (dev->tah_ph)
2349                 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2350         if (dev->mdio_ph)
2351                 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2352         if (dev->blist && dev->blist > emac_boot_list)
2353                 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2354         bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2355         wait_event_timeout(emac_probe_wait,
2356                            emac_check_deps(dev, deps),
2357                            EMAC_PROBE_DEP_TIMEOUT);
2358         bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2359         err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2360         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2361                 of_node_put(deps[i].node);
2362                 if (err)
2363                         of_dev_put(deps[i].ofdev);
2364         }
2365         if (err == 0) {
2366                 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2367                 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2368                 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2369                 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2370                 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2371         }
2372         of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2373         return err;
2374 }
2375
2376 static int emac_read_uint_prop(struct device_node *np, const char *name,
2377                                u32 *val, int fatal)
2378 {
2379         int len;
2380         const u32 *prop = of_get_property(np, name, &len);
2381         if (prop == NULL || len < sizeof(u32)) {
2382                 if (fatal)
2383                         printk(KERN_ERR "%s: missing %s property\n",
2384                                np->full_name, name);
2385                 return -ENODEV;
2386         }
2387         *val = *prop;
2388         return 0;
2389 }
2390
2391 static int emac_init_phy(struct emac_instance *dev)
2392 {
2393         struct device_node *np = dev->ofdev->dev.of_node;
2394         struct net_device *ndev = dev->ndev;
2395         u32 phy_map, adv;
2396         int i;
2397
2398         dev->phy.dev = ndev;
2399         dev->phy.mode = dev->phy_mode;
2400
2401         /* PHY-less configuration.
2402          * XXX I probably should move these settings to the dev tree
2403          */
2404         if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2405                 emac_reset(dev);
2406
2407                 /* PHY-less configuration.
2408                  * XXX I probably should move these settings to the dev tree
2409                  */
2410                 dev->phy.address = -1;
2411                 dev->phy.features = SUPPORTED_MII;
2412                 if (emac_phy_supports_gige(dev->phy_mode))
2413                         dev->phy.features |= SUPPORTED_1000baseT_Full;
2414                 else
2415                         dev->phy.features |= SUPPORTED_100baseT_Full;
2416                 dev->phy.pause = 1;
2417
2418                 return 0;
2419         }
2420
2421         mutex_lock(&emac_phy_map_lock);
2422         phy_map = dev->phy_map | busy_phy_map;
2423
2424         DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2425
2426         dev->phy.mdio_read = emac_mdio_read;
2427         dev->phy.mdio_write = emac_mdio_write;
2428
2429         /* Enable internal clock source */
2430 #ifdef CONFIG_PPC_DCR_NATIVE
2431         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2432                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2433 #endif
2434         /* PHY clock workaround */
2435         emac_rx_clk_tx(dev);
2436
2437         /* Enable internal clock source on 440GX*/
2438 #ifdef CONFIG_PPC_DCR_NATIVE
2439         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2440                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2441 #endif
2442         /* Configure EMAC with defaults so we can at least use MDIO
2443          * This is needed mostly for 440GX
2444          */
2445         if (emac_phy_gpcs(dev->phy.mode)) {
2446                 /* XXX
2447                  * Make GPCS PHY address equal to EMAC index.
2448                  * We probably should take into account busy_phy_map
2449                  * and/or phy_map here.
2450                  *
2451                  * Note that the busy_phy_map is currently global
2452                  * while it should probably be per-ASIC...
2453                  */
2454                 dev->phy.gpcs_address = dev->gpcs_address;
2455                 if (dev->phy.gpcs_address == 0xffffffff)
2456                         dev->phy.address = dev->cell_index;
2457         }
2458
2459         emac_configure(dev);
2460
2461         if (dev->phy_address != 0xffffffff)
2462                 phy_map = ~(1 << dev->phy_address);
2463
2464         for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2465                 if (!(phy_map & 1)) {
2466                         int r;
2467                         busy_phy_map |= 1 << i;
2468
2469                         /* Quick check if there is a PHY at the address */
2470                         r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2471                         if (r == 0xffff || r < 0)
2472                                 continue;
2473                         if (!emac_mii_phy_probe(&dev->phy, i))
2474                                 break;
2475                 }
2476
2477         /* Enable external clock source */
2478 #ifdef CONFIG_PPC_DCR_NATIVE
2479         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2480                 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2481 #endif
2482         mutex_unlock(&emac_phy_map_lock);
2483         if (i == 0x20) {
2484                 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2485                 return -ENXIO;
2486         }
2487
2488         /* Init PHY */
2489         if (dev->phy.def->ops->init)
2490                 dev->phy.def->ops->init(&dev->phy);
2491
2492         /* Disable any PHY features not supported by the platform */
2493         dev->phy.def->features &= ~dev->phy_feat_exc;
2494         dev->phy.features &= ~dev->phy_feat_exc;
2495
2496         /* Setup initial link parameters */
2497         if (dev->phy.features & SUPPORTED_Autoneg) {
2498                 adv = dev->phy.features;
2499                 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2500                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2501                 /* Restart autonegotiation */
2502                 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2503         } else {
2504                 u32 f = dev->phy.def->features;
2505                 int speed = SPEED_10, fd = DUPLEX_HALF;
2506
2507                 /* Select highest supported speed/duplex */
2508                 if (f & SUPPORTED_1000baseT_Full) {
2509                         speed = SPEED_1000;
2510                         fd = DUPLEX_FULL;
2511                 } else if (f & SUPPORTED_1000baseT_Half)
2512                         speed = SPEED_1000;
2513                 else if (f & SUPPORTED_100baseT_Full) {
2514                         speed = SPEED_100;
2515                         fd = DUPLEX_FULL;
2516                 } else if (f & SUPPORTED_100baseT_Half)
2517                         speed = SPEED_100;
2518                 else if (f & SUPPORTED_10baseT_Full)
2519                         fd = DUPLEX_FULL;
2520
2521                 /* Force link parameters */
2522                 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2523         }
2524         return 0;
2525 }
2526
2527 static int emac_init_config(struct emac_instance *dev)
2528 {
2529         struct device_node *np = dev->ofdev->dev.of_node;
2530         const void *p;
2531
2532         /* Read config from device-tree */
2533         if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2534                 return -ENXIO;
2535         if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2536                 return -ENXIO;
2537         if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2538                 return -ENXIO;
2539         if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2540                 return -ENXIO;
2541         if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2542                 dev->max_mtu = 1500;
2543         if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2544                 dev->rx_fifo_size = 2048;
2545         if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2546                 dev->tx_fifo_size = 2048;
2547         if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2548                 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2549         if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2550                 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2551         if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2552                 dev->phy_address = 0xffffffff;
2553         if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2554                 dev->phy_map = 0xffffffff;
2555         if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2556                 dev->gpcs_address = 0xffffffff;
2557         if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2558                 return -ENXIO;
2559         if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2560                 dev->tah_ph = 0;
2561         if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2562                 dev->tah_port = 0;
2563         if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2564                 dev->mdio_ph = 0;
2565         if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2566                 dev->zmii_ph = 0;
2567         if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2568                 dev->zmii_port = 0xffffffff;
2569         if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2570                 dev->rgmii_ph = 0;
2571         if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2572                 dev->rgmii_port = 0xffffffff;
2573         if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2574                 dev->fifo_entry_size = 16;
2575         if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2576                 dev->mal_burst_size = 256;
2577
2578         /* PHY mode needs some decoding */
2579         dev->phy_mode = of_get_phy_mode(np);
2580         if (dev->phy_mode < 0)
2581                 dev->phy_mode = PHY_MODE_NA;
2582
2583         /* Check EMAC version */
2584         if (of_device_is_compatible(np, "ibm,emac4sync")) {
2585                 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2586                 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2587                     of_device_is_compatible(np, "ibm,emac-460gt"))
2588                         dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2589                 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2590                     of_device_is_compatible(np, "ibm,emac-405exr"))
2591                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2592                 if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2593                         dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2594                                           EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2595                                           EMAC_FTR_460EX_PHY_CLK_FIX);
2596                 }
2597         } else if (of_device_is_compatible(np, "ibm,emac4")) {
2598                 dev->features |= EMAC_FTR_EMAC4;
2599                 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2600                         dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2601         } else {
2602                 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2603                     of_device_is_compatible(np, "ibm,emac-440gr"))
2604                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2605                 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2606 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2607                         dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2608 #else
2609                         printk(KERN_ERR "%s: Flow control not disabled!\n",
2610                                         np->full_name);
2611                         return -ENXIO;
2612 #endif
2613                 }
2614
2615         }
2616
2617         /* Fixup some feature bits based on the device tree */
2618         if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2619                 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2620         if (of_get_property(np, "has-new-stacr-staopc", NULL))
2621                 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2622
2623         /* CAB lacks the appropriate properties */
2624         if (of_device_is_compatible(np, "ibm,emac-axon"))
2625                 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2626                         EMAC_FTR_STACR_OC_INVERT;
2627
2628         /* Enable TAH/ZMII/RGMII features as found */
2629         if (dev->tah_ph != 0) {
2630 #ifdef CONFIG_IBM_EMAC_TAH
2631                 dev->features |= EMAC_FTR_HAS_TAH;
2632 #else
2633                 printk(KERN_ERR "%s: TAH support not enabled !\n",
2634                        np->full_name);
2635                 return -ENXIO;
2636 #endif
2637         }
2638
2639         if (dev->zmii_ph != 0) {
2640 #ifdef CONFIG_IBM_EMAC_ZMII
2641                 dev->features |= EMAC_FTR_HAS_ZMII;
2642 #else
2643                 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2644                        np->full_name);
2645                 return -ENXIO;
2646 #endif
2647         }
2648
2649         if (dev->rgmii_ph != 0) {
2650 #ifdef CONFIG_IBM_EMAC_RGMII
2651                 dev->features |= EMAC_FTR_HAS_RGMII;
2652 #else
2653                 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2654                        np->full_name);
2655                 return -ENXIO;
2656 #endif
2657         }
2658
2659         /* Read MAC-address */
2660         p = of_get_property(np, "local-mac-address", NULL);
2661         if (p == NULL) {
2662                 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2663                        np->full_name);
2664                 return -ENXIO;
2665         }
2666         memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2667
2668         /* IAHT and GAHT filter parameterization */
2669         if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2670                 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2671                 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2672         } else {
2673                 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2674                 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2675         }
2676
2677         DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2678         DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2679         DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2680         DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2681         DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2682
2683         return 0;
2684 }
2685
2686 static const struct net_device_ops emac_netdev_ops = {
2687         .ndo_open               = emac_open,
2688         .ndo_stop               = emac_close,
2689         .ndo_get_stats          = emac_stats,
2690         .ndo_set_rx_mode        = emac_set_multicast_list,
2691         .ndo_do_ioctl           = emac_ioctl,
2692         .ndo_tx_timeout         = emac_tx_timeout,
2693         .ndo_validate_addr      = eth_validate_addr,
2694         .ndo_set_mac_address    = eth_mac_addr,
2695         .ndo_start_xmit         = emac_start_xmit,
2696         .ndo_change_mtu         = eth_change_mtu,
2697 };
2698
2699 static const struct net_device_ops emac_gige_netdev_ops = {
2700         .ndo_open               = emac_open,
2701         .ndo_stop               = emac_close,
2702         .ndo_get_stats          = emac_stats,
2703         .ndo_set_rx_mode        = emac_set_multicast_list,
2704         .ndo_do_ioctl           = emac_ioctl,
2705         .ndo_tx_timeout         = emac_tx_timeout,
2706         .ndo_validate_addr      = eth_validate_addr,
2707         .ndo_set_mac_address    = eth_mac_addr,
2708         .ndo_start_xmit         = emac_start_xmit_sg,
2709         .ndo_change_mtu         = emac_change_mtu,
2710 };
2711
2712 static int emac_probe(struct platform_device *ofdev)
2713 {
2714         struct net_device *ndev;
2715         struct emac_instance *dev;
2716         struct device_node *np = ofdev->dev.of_node;
2717         struct device_node **blist = NULL;
2718         int err, i;
2719
2720         /* Skip unused/unwired EMACS.  We leave the check for an unused
2721          * property here for now, but new flat device trees should set a
2722          * status property to "disabled" instead.
2723          */
2724         if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2725                 return -ENODEV;
2726
2727         /* Find ourselves in the bootlist if we are there */
2728         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2729                 if (emac_boot_list[i] == np)
2730                         blist = &emac_boot_list[i];
2731
2732         /* Allocate our net_device structure */
2733         err = -ENOMEM;
2734         ndev = alloc_etherdev(sizeof(struct emac_instance));
2735         if (!ndev)
2736                 goto err_gone;
2737
2738         dev = netdev_priv(ndev);
2739         dev->ndev = ndev;
2740         dev->ofdev = ofdev;
2741         dev->blist = blist;
2742         SET_NETDEV_DEV(ndev, &ofdev->dev);
2743
2744         /* Initialize some embedded data structures */
2745         mutex_init(&dev->mdio_lock);
2746         mutex_init(&dev->link_lock);
2747         spin_lock_init(&dev->lock);
2748         INIT_WORK(&dev->reset_work, emac_reset_work);
2749
2750         /* Init various config data based on device-tree */
2751         err = emac_init_config(dev);
2752         if (err != 0)
2753                 goto err_free;
2754
2755         /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2756         dev->emac_irq = irq_of_parse_and_map(np, 0);
2757         dev->wol_irq = irq_of_parse_and_map(np, 1);
2758         if (dev->emac_irq == NO_IRQ) {
2759                 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2760                 goto err_free;
2761         }
2762         ndev->irq = dev->emac_irq;
2763
2764         /* Map EMAC regs */
2765         if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2766                 printk(KERN_ERR "%s: Can't get registers address\n",
2767                        np->full_name);
2768                 goto err_irq_unmap;
2769         }
2770         // TODO : request_mem_region
2771         dev->emacp = ioremap(dev->rsrc_regs.start,
2772                              resource_size(&dev->rsrc_regs));
2773         if (dev->emacp == NULL) {
2774                 printk(KERN_ERR "%s: Can't map device registers!\n",
2775                        np->full_name);
2776                 err = -ENOMEM;
2777                 goto err_irq_unmap;
2778         }
2779
2780         /* Wait for dependent devices */
2781         err = emac_wait_deps(dev);
2782         if (err) {
2783                 printk(KERN_ERR
2784                        "%s: Timeout waiting for dependent devices\n",
2785                        np->full_name);
2786                 /*  display more info about what's missing ? */
2787                 goto err_reg_unmap;
2788         }
2789         dev->mal = platform_get_drvdata(dev->mal_dev);
2790         if (dev->mdio_dev != NULL)
2791                 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
2792
2793         /* Register with MAL */
2794         dev->commac.ops = &emac_commac_ops;
2795         dev->commac.dev = dev;
2796         dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2797         dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2798         err = mal_register_commac(dev->mal, &dev->commac);
2799         if (err) {
2800                 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2801                        np->full_name, dev->mal_dev->dev.of_node->full_name);
2802                 goto err_rel_deps;
2803         }
2804         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2805         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2806
2807         /* Get pointers to BD rings */
2808         dev->tx_desc =
2809             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2810         dev->rx_desc =
2811             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2812
2813         DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2814         DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2815
2816         /* Clean rings */
2817         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2818         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2819         memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2820         memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2821
2822         /* Attach to ZMII, if needed */
2823         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2824             (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2825                 goto err_unreg_commac;
2826
2827         /* Attach to RGMII, if needed */
2828         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2829             (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2830                 goto err_detach_zmii;
2831
2832         /* Attach to TAH, if needed */
2833         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2834             (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2835                 goto err_detach_rgmii;
2836
2837         /* Set some link defaults before we can find out real parameters */
2838         dev->phy.speed = SPEED_100;
2839         dev->phy.duplex = DUPLEX_FULL;
2840         dev->phy.autoneg = AUTONEG_DISABLE;
2841         dev->phy.pause = dev->phy.asym_pause = 0;
2842         dev->stop_timeout = STOP_TIMEOUT_100;
2843         INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2844
2845         /* Some SoCs like APM821xx does not support Half Duplex mode. */
2846         if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
2847                 dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
2848                                      SUPPORTED_100baseT_Half |
2849                                      SUPPORTED_10baseT_Half);
2850         }
2851
2852         /* Find PHY if any */
2853         err = emac_init_phy(dev);
2854         if (err != 0)
2855                 goto err_detach_tah;
2856
2857         if (dev->tah_dev) {
2858                 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2859                 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2860         }
2861         ndev->watchdog_timeo = 5 * HZ;
2862         if (emac_phy_supports_gige(dev->phy_mode)) {
2863                 ndev->netdev_ops = &emac_gige_netdev_ops;
2864                 dev->commac.ops = &emac_commac_sg_ops;
2865         } else
2866                 ndev->netdev_ops = &emac_netdev_ops;
2867         ndev->ethtool_ops = &emac_ethtool_ops;
2868
2869         netif_carrier_off(ndev);
2870
2871         err = register_netdev(ndev);
2872         if (err) {
2873                 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2874                        np->full_name, err);
2875                 goto err_detach_tah;
2876         }
2877
2878         /* Set our drvdata last as we don't want them visible until we are
2879          * fully initialized
2880          */
2881         wmb();
2882         platform_set_drvdata(ofdev, dev);
2883
2884         /* There's a new kid in town ! Let's tell everybody */
2885         wake_up_all(&emac_probe_wait);
2886
2887
2888         printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2889                ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2890
2891         if (dev->phy_mode == PHY_MODE_SGMII)
2892                 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2893
2894         if (dev->phy.address >= 0)
2895                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2896                        dev->phy.def->name, dev->phy.address);
2897
2898         emac_dbg_register(dev);
2899
2900         /* Life is good */
2901         return 0;
2902
2903         /* I have a bad feeling about this ... */
2904
2905  err_detach_tah:
2906         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2907                 tah_detach(dev->tah_dev, dev->tah_port);
2908  err_detach_rgmii:
2909         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2910                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2911  err_detach_zmii:
2912         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2913                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2914  err_unreg_commac:
2915         mal_unregister_commac(dev->mal, &dev->commac);
2916  err_rel_deps:
2917         emac_put_deps(dev);
2918  err_reg_unmap:
2919         iounmap(dev->emacp);
2920  err_irq_unmap:
2921         if (dev->wol_irq != NO_IRQ)
2922                 irq_dispose_mapping(dev->wol_irq);
2923         if (dev->emac_irq != NO_IRQ)
2924                 irq_dispose_mapping(dev->emac_irq);
2925  err_free:
2926         free_netdev(ndev);
2927  err_gone:
2928         /* if we were on the bootlist, remove us as we won't show up and
2929          * wake up all waiters to notify them in case they were waiting
2930          * on us
2931          */
2932         if (blist) {
2933                 *blist = NULL;
2934                 wake_up_all(&emac_probe_wait);
2935         }
2936         return err;
2937 }
2938
2939 static int emac_remove(struct platform_device *ofdev)
2940 {
2941         struct emac_instance *dev = platform_get_drvdata(ofdev);
2942
2943         DBG(dev, "remove" NL);
2944
2945         unregister_netdev(dev->ndev);
2946
2947         cancel_work_sync(&dev->reset_work);
2948
2949         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2950                 tah_detach(dev->tah_dev, dev->tah_port);
2951         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2952                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2953         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2954                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2955
2956         busy_phy_map &= ~(1 << dev->phy.address);
2957         DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
2958
2959         mal_unregister_commac(dev->mal, &dev->commac);
2960         emac_put_deps(dev);
2961
2962         emac_dbg_unregister(dev);
2963         iounmap(dev->emacp);
2964
2965         if (dev->wol_irq != NO_IRQ)
2966                 irq_dispose_mapping(dev->wol_irq);
2967         if (dev->emac_irq != NO_IRQ)
2968                 irq_dispose_mapping(dev->emac_irq);
2969
2970         free_netdev(dev->ndev);
2971
2972         return 0;
2973 }
2974
2975 /* XXX Features in here should be replaced by properties... */
2976 static const struct of_device_id emac_match[] =
2977 {
2978         {
2979                 .type           = "network",
2980                 .compatible     = "ibm,emac",
2981         },
2982         {
2983                 .type           = "network",
2984                 .compatible     = "ibm,emac4",
2985         },
2986         {
2987                 .type           = "network",
2988                 .compatible     = "ibm,emac4sync",
2989         },
2990         {},
2991 };
2992 MODULE_DEVICE_TABLE(of, emac_match);
2993
2994 static struct platform_driver emac_driver = {
2995         .driver = {
2996                 .name = "emac",
2997                 .of_match_table = emac_match,
2998         },
2999         .probe = emac_probe,
3000         .remove = emac_remove,
3001 };
3002
3003 static void __init emac_make_bootlist(void)
3004 {
3005         struct device_node *np = NULL;
3006         int j, max, i = 0, k;
3007         int cell_indices[EMAC_BOOT_LIST_SIZE];
3008
3009         /* Collect EMACs */
3010         while((np = of_find_all_nodes(np)) != NULL) {
3011                 const u32 *idx;
3012
3013                 if (of_match_node(emac_match, np) == NULL)
3014                         continue;
3015                 if (of_get_property(np, "unused", NULL))
3016                         continue;
3017                 idx = of_get_property(np, "cell-index", NULL);
3018                 if (idx == NULL)
3019                         continue;
3020                 cell_indices[i] = *idx;
3021                 emac_boot_list[i++] = of_node_get(np);
3022                 if (i >= EMAC_BOOT_LIST_SIZE) {
3023                         of_node_put(np);
3024                         break;
3025                 }
3026         }
3027         max = i;
3028
3029         /* Bubble sort them (doh, what a creative algorithm :-) */
3030         for (i = 0; max > 1 && (i < (max - 1)); i++)
3031                 for (j = i; j < max; j++) {
3032                         if (cell_indices[i] > cell_indices[j]) {
3033                                 np = emac_boot_list[i];
3034                                 emac_boot_list[i] = emac_boot_list[j];
3035                                 emac_boot_list[j] = np;
3036                                 k = cell_indices[i];
3037                                 cell_indices[i] = cell_indices[j];
3038                                 cell_indices[j] = k;
3039                         }
3040                 }
3041 }
3042
3043 static int __init emac_init(void)
3044 {
3045         int rc;
3046
3047         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3048
3049         /* Init debug stuff */
3050         emac_init_debug();
3051
3052         /* Build EMAC boot list */
3053         emac_make_bootlist();
3054
3055         /* Init submodules */
3056         rc = mal_init();
3057         if (rc)
3058                 goto err;
3059         rc = zmii_init();
3060         if (rc)
3061                 goto err_mal;
3062         rc = rgmii_init();
3063         if (rc)
3064                 goto err_zmii;
3065         rc = tah_init();
3066         if (rc)
3067                 goto err_rgmii;
3068         rc = platform_driver_register(&emac_driver);
3069         if (rc)
3070                 goto err_tah;
3071
3072         return 0;
3073
3074  err_tah:
3075         tah_exit();
3076  err_rgmii:
3077         rgmii_exit();
3078  err_zmii:
3079         zmii_exit();
3080  err_mal:
3081         mal_exit();
3082  err:
3083         return rc;
3084 }
3085
3086 static void __exit emac_exit(void)
3087 {
3088         int i;
3089
3090         platform_driver_unregister(&emac_driver);
3091
3092         tah_exit();
3093         rgmii_exit();
3094         zmii_exit();
3095         mal_exit();
3096         emac_fini_debug();
3097
3098         /* Destroy EMAC boot list */
3099         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3100                 of_node_put(emac_boot_list[i]);
3101 }
3102
3103 module_init(emac_init);
3104 module_exit(emac_exit);