]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/broadcom/b44.c
Merge tag 'juno-fix-5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep...
[linux.git] / drivers / net / ethernet / broadcom / b44.c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9  * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
10  *
11  * Distribute under GPL.
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/netdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/mii.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/etherdevice.h>
26 #include <linux/pci.h>
27 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/ssb/ssb.h>
32 #include <linux/slab.h>
33 #include <linux/phy.h>
34
35 #include <linux/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/irq.h>
38
39
40 #include "b44.h"
41
42 #define DRV_MODULE_NAME         "b44"
43 #define DRV_MODULE_VERSION      "2.0"
44 #define DRV_DESCRIPTION         "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
45
46 #define B44_DEF_MSG_ENABLE        \
47         (NETIF_MSG_DRV          | \
48          NETIF_MSG_PROBE        | \
49          NETIF_MSG_LINK         | \
50          NETIF_MSG_TIMER        | \
51          NETIF_MSG_IFDOWN       | \
52          NETIF_MSG_IFUP         | \
53          NETIF_MSG_RX_ERR       | \
54          NETIF_MSG_TX_ERR)
55
56 /* length of time before we decide the hardware is borked,
57  * and dev->tx_timeout() should be called to fix the problem
58  */
59 #define B44_TX_TIMEOUT                  (5 * HZ)
60
61 /* hardware minimum and maximum for a single frame's data payload */
62 #define B44_MIN_MTU                     ETH_ZLEN
63 #define B44_MAX_MTU                     ETH_DATA_LEN
64
65 #define B44_RX_RING_SIZE                512
66 #define B44_DEF_RX_RING_PENDING         200
67 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
68                                  B44_RX_RING_SIZE)
69 #define B44_TX_RING_SIZE                512
70 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
71 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
72                                  B44_TX_RING_SIZE)
73
74 #define TX_RING_GAP(BP) \
75         (B44_TX_RING_SIZE - (BP)->tx_pending)
76 #define TX_BUFFS_AVAIL(BP)                                              \
77         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
78           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
79           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
80 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
81
82 #define RX_PKT_OFFSET           (RX_HEADER_LEN + 2)
83 #define RX_PKT_BUF_SZ           (1536 + RX_PKT_OFFSET)
84
85 /* minimum number of free TX descriptors required to wake up TX process */
86 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
87
88 /* b44 internal pattern match filter info */
89 #define B44_PATTERN_BASE        0x400
90 #define B44_PATTERN_SIZE        0x80
91 #define B44_PMASK_BASE          0x600
92 #define B44_PMASK_SIZE          0x10
93 #define B44_MAX_PATTERNS        16
94 #define B44_ETHIPV6UDP_HLEN     62
95 #define B44_ETHIPV4UDP_HLEN     42
96
97 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
98 MODULE_DESCRIPTION(DRV_DESCRIPTION);
99 MODULE_LICENSE("GPL");
100 MODULE_VERSION(DRV_MODULE_VERSION);
101
102 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
103 module_param(b44_debug, int, 0);
104 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
105
106
107 #ifdef CONFIG_B44_PCI
108 static const struct pci_device_id b44_pci_tbl[] = {
109         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
110         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
111         { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
112         { 0 } /* terminate list with empty entry */
113 };
114 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
115
116 static struct pci_driver b44_pci_driver = {
117         .name           = DRV_MODULE_NAME,
118         .id_table       = b44_pci_tbl,
119 };
120 #endif /* CONFIG_B44_PCI */
121
122 static const struct ssb_device_id b44_ssb_tbl[] = {
123         SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
124         {},
125 };
126 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
127
128 static void b44_halt(struct b44 *);
129 static void b44_init_rings(struct b44 *);
130
131 #define B44_FULL_RESET          1
132 #define B44_FULL_RESET_SKIP_PHY 2
133 #define B44_PARTIAL_RESET       3
134 #define B44_CHIP_RESET_FULL     4
135 #define B44_CHIP_RESET_PARTIAL  5
136
137 static void b44_init_hw(struct b44 *, int);
138
139 static int dma_desc_sync_size;
140 static int instance;
141
142 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
143 #define _B44(x...)      # x,
144 B44_STAT_REG_DECLARE
145 #undef _B44
146 };
147
148 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
149                                                 dma_addr_t dma_base,
150                                                 unsigned long offset,
151                                                 enum dma_data_direction dir)
152 {
153         dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
154                                    dma_desc_sync_size, dir);
155 }
156
157 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
158                                              dma_addr_t dma_base,
159                                              unsigned long offset,
160                                              enum dma_data_direction dir)
161 {
162         dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
163                                 dma_desc_sync_size, dir);
164 }
165
166 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
167 {
168         return ssb_read32(bp->sdev, reg);
169 }
170
171 static inline void bw32(const struct b44 *bp,
172                         unsigned long reg, unsigned long val)
173 {
174         ssb_write32(bp->sdev, reg, val);
175 }
176
177 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
178                         u32 bit, unsigned long timeout, const int clear)
179 {
180         unsigned long i;
181
182         for (i = 0; i < timeout; i++) {
183                 u32 val = br32(bp, reg);
184
185                 if (clear && !(val & bit))
186                         break;
187                 if (!clear && (val & bit))
188                         break;
189                 udelay(10);
190         }
191         if (i == timeout) {
192                 if (net_ratelimit())
193                         netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
194                                    bit, reg, clear ? "clear" : "set");
195
196                 return -ENODEV;
197         }
198         return 0;
199 }
200
201 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
202 {
203         u32 val;
204
205         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
206                             (index << CAM_CTRL_INDEX_SHIFT)));
207
208         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
209
210         val = br32(bp, B44_CAM_DATA_LO);
211
212         data[2] = (val >> 24) & 0xFF;
213         data[3] = (val >> 16) & 0xFF;
214         data[4] = (val >> 8) & 0xFF;
215         data[5] = (val >> 0) & 0xFF;
216
217         val = br32(bp, B44_CAM_DATA_HI);
218
219         data[0] = (val >> 8) & 0xFF;
220         data[1] = (val >> 0) & 0xFF;
221 }
222
223 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
224 {
225         u32 val;
226
227         val  = ((u32) data[2]) << 24;
228         val |= ((u32) data[3]) << 16;
229         val |= ((u32) data[4]) <<  8;
230         val |= ((u32) data[5]) <<  0;
231         bw32(bp, B44_CAM_DATA_LO, val);
232         val = (CAM_DATA_HI_VALID |
233                (((u32) data[0]) << 8) |
234                (((u32) data[1]) << 0));
235         bw32(bp, B44_CAM_DATA_HI, val);
236         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
237                             (index << CAM_CTRL_INDEX_SHIFT)));
238         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
239 }
240
241 static inline void __b44_disable_ints(struct b44 *bp)
242 {
243         bw32(bp, B44_IMASK, 0);
244 }
245
246 static void b44_disable_ints(struct b44 *bp)
247 {
248         __b44_disable_ints(bp);
249
250         /* Flush posted writes. */
251         br32(bp, B44_IMASK);
252 }
253
254 static void b44_enable_ints(struct b44 *bp)
255 {
256         bw32(bp, B44_IMASK, bp->imask);
257 }
258
259 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
260 {
261         int err;
262
263         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
264         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
265                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
266                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
267                              (reg << MDIO_DATA_RA_SHIFT) |
268                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
269         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
270         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
271
272         return err;
273 }
274
275 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
276 {
277         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
278         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
279                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
280                              (phy_addr << MDIO_DATA_PMD_SHIFT) |
281                              (reg << MDIO_DATA_RA_SHIFT) |
282                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
283                              (val & MDIO_DATA_DATA)));
284         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
285 }
286
287 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
288 {
289         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
290                 return 0;
291
292         return __b44_readphy(bp, bp->phy_addr, reg, val);
293 }
294
295 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
296 {
297         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
298                 return 0;
299
300         return __b44_writephy(bp, bp->phy_addr, reg, val);
301 }
302
303 /* miilib interface */
304 static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
305 {
306         u32 val;
307         struct b44 *bp = netdev_priv(dev);
308         int rc = __b44_readphy(bp, phy_id, location, &val);
309         if (rc)
310                 return 0xffffffff;
311         return val;
312 }
313
314 static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
315                                int val)
316 {
317         struct b44 *bp = netdev_priv(dev);
318         __b44_writephy(bp, phy_id, location, val);
319 }
320
321 static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
322 {
323         u32 val;
324         struct b44 *bp = bus->priv;
325         int rc = __b44_readphy(bp, phy_id, location, &val);
326         if (rc)
327                 return 0xffffffff;
328         return val;
329 }
330
331 static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
332                                  u16 val)
333 {
334         struct b44 *bp = bus->priv;
335         return __b44_writephy(bp, phy_id, location, val);
336 }
337
338 static int b44_phy_reset(struct b44 *bp)
339 {
340         u32 val;
341         int err;
342
343         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
344                 return 0;
345         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
346         if (err)
347                 return err;
348         udelay(100);
349         err = b44_readphy(bp, MII_BMCR, &val);
350         if (!err) {
351                 if (val & BMCR_RESET) {
352                         netdev_err(bp->dev, "PHY Reset would not complete\n");
353                         err = -ENODEV;
354                 }
355         }
356
357         return err;
358 }
359
360 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
361 {
362         u32 val;
363
364         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
365         bp->flags |= pause_flags;
366
367         val = br32(bp, B44_RXCONFIG);
368         if (pause_flags & B44_FLAG_RX_PAUSE)
369                 val |= RXCONFIG_FLOW;
370         else
371                 val &= ~RXCONFIG_FLOW;
372         bw32(bp, B44_RXCONFIG, val);
373
374         val = br32(bp, B44_MAC_FLOW);
375         if (pause_flags & B44_FLAG_TX_PAUSE)
376                 val |= (MAC_FLOW_PAUSE_ENAB |
377                         (0xc0 & MAC_FLOW_RX_HI_WATER));
378         else
379                 val &= ~MAC_FLOW_PAUSE_ENAB;
380         bw32(bp, B44_MAC_FLOW, val);
381 }
382
383 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
384 {
385         u32 pause_enab = 0;
386
387         /* The driver supports only rx pause by default because
388            the b44 mac tx pause mechanism generates excessive
389            pause frames.
390            Use ethtool to turn on b44 tx pause if necessary.
391          */
392         if ((local & ADVERTISE_PAUSE_CAP) &&
393             (local & ADVERTISE_PAUSE_ASYM)){
394                 if ((remote & LPA_PAUSE_ASYM) &&
395                     !(remote & LPA_PAUSE_CAP))
396                         pause_enab |= B44_FLAG_RX_PAUSE;
397         }
398
399         __b44_set_flow_ctrl(bp, pause_enab);
400 }
401
402 #ifdef CONFIG_BCM47XX
403 #include <linux/bcm47xx_nvram.h>
404 static void b44_wap54g10_workaround(struct b44 *bp)
405 {
406         char buf[20];
407         u32 val;
408         int err;
409
410         /*
411          * workaround for bad hardware design in Linksys WAP54G v1.0
412          * see https://dev.openwrt.org/ticket/146
413          * check and reset bit "isolate"
414          */
415         if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
416                 return;
417         if (simple_strtoul(buf, NULL, 0) == 2) {
418                 err = __b44_readphy(bp, 0, MII_BMCR, &val);
419                 if (err)
420                         goto error;
421                 if (!(val & BMCR_ISOLATE))
422                         return;
423                 val &= ~BMCR_ISOLATE;
424                 err = __b44_writephy(bp, 0, MII_BMCR, val);
425                 if (err)
426                         goto error;
427         }
428         return;
429 error:
430         pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
431 }
432 #else
433 static inline void b44_wap54g10_workaround(struct b44 *bp)
434 {
435 }
436 #endif
437
438 static int b44_setup_phy(struct b44 *bp)
439 {
440         u32 val;
441         int err;
442
443         b44_wap54g10_workaround(bp);
444
445         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
446                 return 0;
447         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
448                 goto out;
449         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
450                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
451                 goto out;
452         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
453                 goto out;
454         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
455                                 val | MII_TLEDCTRL_ENABLE)) != 0)
456                 goto out;
457
458         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
459                 u32 adv = ADVERTISE_CSMA;
460
461                 if (bp->flags & B44_FLAG_ADV_10HALF)
462                         adv |= ADVERTISE_10HALF;
463                 if (bp->flags & B44_FLAG_ADV_10FULL)
464                         adv |= ADVERTISE_10FULL;
465                 if (bp->flags & B44_FLAG_ADV_100HALF)
466                         adv |= ADVERTISE_100HALF;
467                 if (bp->flags & B44_FLAG_ADV_100FULL)
468                         adv |= ADVERTISE_100FULL;
469
470                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
471                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
472
473                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
474                         goto out;
475                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
476                                                        BMCR_ANRESTART))) != 0)
477                         goto out;
478         } else {
479                 u32 bmcr;
480
481                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
482                         goto out;
483                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
484                 if (bp->flags & B44_FLAG_100_BASE_T)
485                         bmcr |= BMCR_SPEED100;
486                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
487                         bmcr |= BMCR_FULLDPLX;
488                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
489                         goto out;
490
491                 /* Since we will not be negotiating there is no safe way
492                  * to determine if the link partner supports flow control
493                  * or not.  So just disable it completely in this case.
494                  */
495                 b44_set_flow_ctrl(bp, 0, 0);
496         }
497
498 out:
499         return err;
500 }
501
502 static void b44_stats_update(struct b44 *bp)
503 {
504         unsigned long reg;
505         u64 *val;
506
507         val = &bp->hw_stats.tx_good_octets;
508         u64_stats_update_begin(&bp->hw_stats.syncp);
509
510         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
511                 *val++ += br32(bp, reg);
512         }
513
514         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
515                 *val++ += br32(bp, reg);
516         }
517
518         u64_stats_update_end(&bp->hw_stats.syncp);
519 }
520
521 static void b44_link_report(struct b44 *bp)
522 {
523         if (!netif_carrier_ok(bp->dev)) {
524                 netdev_info(bp->dev, "Link is down\n");
525         } else {
526                 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
527                             (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
528                             (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
529
530                 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
531                             (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
532                             (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
533         }
534 }
535
536 static void b44_check_phy(struct b44 *bp)
537 {
538         u32 bmsr, aux;
539
540         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
541                 bp->flags |= B44_FLAG_100_BASE_T;
542                 if (!netif_carrier_ok(bp->dev)) {
543                         u32 val = br32(bp, B44_TX_CTRL);
544                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
545                                 val |= TX_CTRL_DUPLEX;
546                         else
547                                 val &= ~TX_CTRL_DUPLEX;
548                         bw32(bp, B44_TX_CTRL, val);
549                         netif_carrier_on(bp->dev);
550                         b44_link_report(bp);
551                 }
552                 return;
553         }
554
555         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
556             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
557             (bmsr != 0xffff)) {
558                 if (aux & MII_AUXCTRL_SPEED)
559                         bp->flags |= B44_FLAG_100_BASE_T;
560                 else
561                         bp->flags &= ~B44_FLAG_100_BASE_T;
562                 if (aux & MII_AUXCTRL_DUPLEX)
563                         bp->flags |= B44_FLAG_FULL_DUPLEX;
564                 else
565                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
566
567                 if (!netif_carrier_ok(bp->dev) &&
568                     (bmsr & BMSR_LSTATUS)) {
569                         u32 val = br32(bp, B44_TX_CTRL);
570                         u32 local_adv, remote_adv;
571
572                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
573                                 val |= TX_CTRL_DUPLEX;
574                         else
575                                 val &= ~TX_CTRL_DUPLEX;
576                         bw32(bp, B44_TX_CTRL, val);
577
578                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
579                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
580                             !b44_readphy(bp, MII_LPA, &remote_adv))
581                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
582
583                         /* Link now up */
584                         netif_carrier_on(bp->dev);
585                         b44_link_report(bp);
586                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
587                         /* Link now down */
588                         netif_carrier_off(bp->dev);
589                         b44_link_report(bp);
590                 }
591
592                 if (bmsr & BMSR_RFAULT)
593                         netdev_warn(bp->dev, "Remote fault detected in PHY\n");
594                 if (bmsr & BMSR_JCD)
595                         netdev_warn(bp->dev, "Jabber detected in PHY\n");
596         }
597 }
598
599 static void b44_timer(struct timer_list *t)
600 {
601         struct b44 *bp = from_timer(bp, t, timer);
602
603         spin_lock_irq(&bp->lock);
604
605         b44_check_phy(bp);
606
607         b44_stats_update(bp);
608
609         spin_unlock_irq(&bp->lock);
610
611         mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
612 }
613
614 static void b44_tx(struct b44 *bp)
615 {
616         u32 cur, cons;
617         unsigned bytes_compl = 0, pkts_compl = 0;
618
619         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
620         cur /= sizeof(struct dma_desc);
621
622         /* XXX needs updating when NETIF_F_SG is supported */
623         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
624                 struct ring_info *rp = &bp->tx_buffers[cons];
625                 struct sk_buff *skb = rp->skb;
626
627                 BUG_ON(skb == NULL);
628
629                 dma_unmap_single(bp->sdev->dma_dev,
630                                  rp->mapping,
631                                  skb->len,
632                                  DMA_TO_DEVICE);
633                 rp->skb = NULL;
634
635                 bytes_compl += skb->len;
636                 pkts_compl++;
637
638                 dev_consume_skb_irq(skb);
639         }
640
641         netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
642         bp->tx_cons = cons;
643         if (netif_queue_stopped(bp->dev) &&
644             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
645                 netif_wake_queue(bp->dev);
646
647         bw32(bp, B44_GPTIMER, 0);
648 }
649
650 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
651  * before the DMA address you give it.  So we allocate 30 more bytes
652  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
653  * point the chip at 30 bytes past where the rx_header will go.
654  */
655 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
656 {
657         struct dma_desc *dp;
658         struct ring_info *src_map, *map;
659         struct rx_header *rh;
660         struct sk_buff *skb;
661         dma_addr_t mapping;
662         int dest_idx;
663         u32 ctrl;
664
665         src_map = NULL;
666         if (src_idx >= 0)
667                 src_map = &bp->rx_buffers[src_idx];
668         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
669         map = &bp->rx_buffers[dest_idx];
670         skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
671         if (skb == NULL)
672                 return -ENOMEM;
673
674         mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
675                                  RX_PKT_BUF_SZ,
676                                  DMA_FROM_DEVICE);
677
678         /* Hardware bug work-around, the chip is unable to do PCI DMA
679            to/from anything above 1GB :-( */
680         if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
681                 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
682                 /* Sigh... */
683                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
684                         dma_unmap_single(bp->sdev->dma_dev, mapping,
685                                              RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
686                 dev_kfree_skb_any(skb);
687                 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
688                 if (skb == NULL)
689                         return -ENOMEM;
690                 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
691                                          RX_PKT_BUF_SZ,
692                                          DMA_FROM_DEVICE);
693                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
694                     mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
695                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
696                                 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
697                         dev_kfree_skb_any(skb);
698                         return -ENOMEM;
699                 }
700                 bp->force_copybreak = 1;
701         }
702
703         rh = (struct rx_header *) skb->data;
704
705         rh->len = 0;
706         rh->flags = 0;
707
708         map->skb = skb;
709         map->mapping = mapping;
710
711         if (src_map != NULL)
712                 src_map->skb = NULL;
713
714         ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
715         if (dest_idx == (B44_RX_RING_SIZE - 1))
716                 ctrl |= DESC_CTRL_EOT;
717
718         dp = &bp->rx_ring[dest_idx];
719         dp->ctrl = cpu_to_le32(ctrl);
720         dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
721
722         if (bp->flags & B44_FLAG_RX_RING_HACK)
723                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
724                                             dest_idx * sizeof(*dp),
725                                             DMA_BIDIRECTIONAL);
726
727         return RX_PKT_BUF_SZ;
728 }
729
730 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
731 {
732         struct dma_desc *src_desc, *dest_desc;
733         struct ring_info *src_map, *dest_map;
734         struct rx_header *rh;
735         int dest_idx;
736         __le32 ctrl;
737
738         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
739         dest_desc = &bp->rx_ring[dest_idx];
740         dest_map = &bp->rx_buffers[dest_idx];
741         src_desc = &bp->rx_ring[src_idx];
742         src_map = &bp->rx_buffers[src_idx];
743
744         dest_map->skb = src_map->skb;
745         rh = (struct rx_header *) src_map->skb->data;
746         rh->len = 0;
747         rh->flags = 0;
748         dest_map->mapping = src_map->mapping;
749
750         if (bp->flags & B44_FLAG_RX_RING_HACK)
751                 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
752                                          src_idx * sizeof(*src_desc),
753                                          DMA_BIDIRECTIONAL);
754
755         ctrl = src_desc->ctrl;
756         if (dest_idx == (B44_RX_RING_SIZE - 1))
757                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
758         else
759                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
760
761         dest_desc->ctrl = ctrl;
762         dest_desc->addr = src_desc->addr;
763
764         src_map->skb = NULL;
765
766         if (bp->flags & B44_FLAG_RX_RING_HACK)
767                 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
768                                              dest_idx * sizeof(*dest_desc),
769                                              DMA_BIDIRECTIONAL);
770
771         dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
772                                    RX_PKT_BUF_SZ,
773                                    DMA_FROM_DEVICE);
774 }
775
776 static int b44_rx(struct b44 *bp, int budget)
777 {
778         int received;
779         u32 cons, prod;
780
781         received = 0;
782         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
783         prod /= sizeof(struct dma_desc);
784         cons = bp->rx_cons;
785
786         while (cons != prod && budget > 0) {
787                 struct ring_info *rp = &bp->rx_buffers[cons];
788                 struct sk_buff *skb = rp->skb;
789                 dma_addr_t map = rp->mapping;
790                 struct rx_header *rh;
791                 u16 len;
792
793                 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
794                                         RX_PKT_BUF_SZ,
795                                         DMA_FROM_DEVICE);
796                 rh = (struct rx_header *) skb->data;
797                 len = le16_to_cpu(rh->len);
798                 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
799                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
800                 drop_it:
801                         b44_recycle_rx(bp, cons, bp->rx_prod);
802                 drop_it_no_recycle:
803                         bp->dev->stats.rx_dropped++;
804                         goto next_pkt;
805                 }
806
807                 if (len == 0) {
808                         int i = 0;
809
810                         do {
811                                 udelay(2);
812                                 barrier();
813                                 len = le16_to_cpu(rh->len);
814                         } while (len == 0 && i++ < 5);
815                         if (len == 0)
816                                 goto drop_it;
817                 }
818
819                 /* Omit CRC. */
820                 len -= 4;
821
822                 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
823                         int skb_size;
824                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
825                         if (skb_size < 0)
826                                 goto drop_it;
827                         dma_unmap_single(bp->sdev->dma_dev, map,
828                                          skb_size, DMA_FROM_DEVICE);
829                         /* Leave out rx_header */
830                         skb_put(skb, len + RX_PKT_OFFSET);
831                         skb_pull(skb, RX_PKT_OFFSET);
832                 } else {
833                         struct sk_buff *copy_skb;
834
835                         b44_recycle_rx(bp, cons, bp->rx_prod);
836                         copy_skb = napi_alloc_skb(&bp->napi, len);
837                         if (copy_skb == NULL)
838                                 goto drop_it_no_recycle;
839
840                         skb_put(copy_skb, len);
841                         /* DMA sync done above, copy just the actual packet */
842                         skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
843                                                          copy_skb->data, len);
844                         skb = copy_skb;
845                 }
846                 skb_checksum_none_assert(skb);
847                 skb->protocol = eth_type_trans(skb, bp->dev);
848                 netif_receive_skb(skb);
849                 received++;
850                 budget--;
851         next_pkt:
852                 bp->rx_prod = (bp->rx_prod + 1) &
853                         (B44_RX_RING_SIZE - 1);
854                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
855         }
856
857         bp->rx_cons = cons;
858         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
859
860         return received;
861 }
862
863 static int b44_poll(struct napi_struct *napi, int budget)
864 {
865         struct b44 *bp = container_of(napi, struct b44, napi);
866         int work_done;
867         unsigned long flags;
868
869         spin_lock_irqsave(&bp->lock, flags);
870
871         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
872                 /* spin_lock(&bp->tx_lock); */
873                 b44_tx(bp);
874                 /* spin_unlock(&bp->tx_lock); */
875         }
876         if (bp->istat & ISTAT_RFO) {    /* fast recovery, in ~20msec */
877                 bp->istat &= ~ISTAT_RFO;
878                 b44_disable_ints(bp);
879                 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
880                 b44_init_rings(bp);
881                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
882                 netif_wake_queue(bp->dev);
883         }
884
885         spin_unlock_irqrestore(&bp->lock, flags);
886
887         work_done = 0;
888         if (bp->istat & ISTAT_RX)
889                 work_done += b44_rx(bp, budget);
890
891         if (bp->istat & ISTAT_ERRORS) {
892                 spin_lock_irqsave(&bp->lock, flags);
893                 b44_halt(bp);
894                 b44_init_rings(bp);
895                 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
896                 netif_wake_queue(bp->dev);
897                 spin_unlock_irqrestore(&bp->lock, flags);
898                 work_done = 0;
899         }
900
901         if (work_done < budget) {
902                 napi_complete_done(napi, work_done);
903                 b44_enable_ints(bp);
904         }
905
906         return work_done;
907 }
908
909 static irqreturn_t b44_interrupt(int irq, void *dev_id)
910 {
911         struct net_device *dev = dev_id;
912         struct b44 *bp = netdev_priv(dev);
913         u32 istat, imask;
914         int handled = 0;
915
916         spin_lock(&bp->lock);
917
918         istat = br32(bp, B44_ISTAT);
919         imask = br32(bp, B44_IMASK);
920
921         /* The interrupt mask register controls which interrupt bits
922          * will actually raise an interrupt to the CPU when set by hw/firmware,
923          * but doesn't mask off the bits.
924          */
925         istat &= imask;
926         if (istat) {
927                 handled = 1;
928
929                 if (unlikely(!netif_running(dev))) {
930                         netdev_info(dev, "late interrupt\n");
931                         goto irq_ack;
932                 }
933
934                 if (napi_schedule_prep(&bp->napi)) {
935                         /* NOTE: These writes are posted by the readback of
936                          *       the ISTAT register below.
937                          */
938                         bp->istat = istat;
939                         __b44_disable_ints(bp);
940                         __napi_schedule(&bp->napi);
941                 }
942
943 irq_ack:
944                 bw32(bp, B44_ISTAT, istat);
945                 br32(bp, B44_ISTAT);
946         }
947         spin_unlock(&bp->lock);
948         return IRQ_RETVAL(handled);
949 }
950
951 static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
952 {
953         struct b44 *bp = netdev_priv(dev);
954
955         netdev_err(dev, "transmit timed out, resetting\n");
956
957         spin_lock_irq(&bp->lock);
958
959         b44_halt(bp);
960         b44_init_rings(bp);
961         b44_init_hw(bp, B44_FULL_RESET);
962
963         spin_unlock_irq(&bp->lock);
964
965         b44_enable_ints(bp);
966
967         netif_wake_queue(dev);
968 }
969
970 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
971 {
972         struct b44 *bp = netdev_priv(dev);
973         int rc = NETDEV_TX_OK;
974         dma_addr_t mapping;
975         u32 len, entry, ctrl;
976         unsigned long flags;
977
978         len = skb->len;
979         spin_lock_irqsave(&bp->lock, flags);
980
981         /* This is a hard error, log it. */
982         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
983                 netif_stop_queue(dev);
984                 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
985                 goto err_out;
986         }
987
988         mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
989         if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
990                 struct sk_buff *bounce_skb;
991
992                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
993                 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
994                         dma_unmap_single(bp->sdev->dma_dev, mapping, len,
995                                              DMA_TO_DEVICE);
996
997                 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
998                 if (!bounce_skb)
999                         goto err_out;
1000
1001                 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
1002                                          len, DMA_TO_DEVICE);
1003                 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
1004                         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
1005                                 dma_unmap_single(bp->sdev->dma_dev, mapping,
1006                                                      len, DMA_TO_DEVICE);
1007                         dev_kfree_skb_any(bounce_skb);
1008                         goto err_out;
1009                 }
1010
1011                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1012                 dev_consume_skb_any(skb);
1013                 skb = bounce_skb;
1014         }
1015
1016         entry = bp->tx_prod;
1017         bp->tx_buffers[entry].skb = skb;
1018         bp->tx_buffers[entry].mapping = mapping;
1019
1020         ctrl  = (len & DESC_CTRL_LEN);
1021         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1022         if (entry == (B44_TX_RING_SIZE - 1))
1023                 ctrl |= DESC_CTRL_EOT;
1024
1025         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1026         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1027
1028         if (bp->flags & B44_FLAG_TX_RING_HACK)
1029                 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1030                                             entry * sizeof(bp->tx_ring[0]),
1031                                             DMA_TO_DEVICE);
1032
1033         entry = NEXT_TX(entry);
1034
1035         bp->tx_prod = entry;
1036
1037         wmb();
1038
1039         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1040         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1041                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1042         if (bp->flags & B44_FLAG_REORDER_BUG)
1043                 br32(bp, B44_DMATX_PTR);
1044
1045         netdev_sent_queue(dev, skb->len);
1046
1047         if (TX_BUFFS_AVAIL(bp) < 1)
1048                 netif_stop_queue(dev);
1049
1050 out_unlock:
1051         spin_unlock_irqrestore(&bp->lock, flags);
1052
1053         return rc;
1054
1055 err_out:
1056         rc = NETDEV_TX_BUSY;
1057         goto out_unlock;
1058 }
1059
1060 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1061 {
1062         struct b44 *bp = netdev_priv(dev);
1063
1064         if (!netif_running(dev)) {
1065                 /* We'll just catch it later when the
1066                  * device is up'd.
1067                  */
1068                 dev->mtu = new_mtu;
1069                 return 0;
1070         }
1071
1072         spin_lock_irq(&bp->lock);
1073         b44_halt(bp);
1074         dev->mtu = new_mtu;
1075         b44_init_rings(bp);
1076         b44_init_hw(bp, B44_FULL_RESET);
1077         spin_unlock_irq(&bp->lock);
1078
1079         b44_enable_ints(bp);
1080
1081         return 0;
1082 }
1083
1084 /* Free up pending packets in all rx/tx rings.
1085  *
1086  * The chip has been shut down and the driver detached from
1087  * the networking, so no interrupts or new tx packets will
1088  * end up in the driver.  bp->lock is not held and we are not
1089  * in an interrupt context and thus may sleep.
1090  */
1091 static void b44_free_rings(struct b44 *bp)
1092 {
1093         struct ring_info *rp;
1094         int i;
1095
1096         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1097                 rp = &bp->rx_buffers[i];
1098
1099                 if (rp->skb == NULL)
1100                         continue;
1101                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1102                                  DMA_FROM_DEVICE);
1103                 dev_kfree_skb_any(rp->skb);
1104                 rp->skb = NULL;
1105         }
1106
1107         /* XXX needs changes once NETIF_F_SG is set... */
1108         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1109                 rp = &bp->tx_buffers[i];
1110
1111                 if (rp->skb == NULL)
1112                         continue;
1113                 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1114                                  DMA_TO_DEVICE);
1115                 dev_kfree_skb_any(rp->skb);
1116                 rp->skb = NULL;
1117         }
1118 }
1119
1120 /* Initialize tx/rx rings for packet processing.
1121  *
1122  * The chip has been shut down and the driver detached from
1123  * the networking, so no interrupts or new tx packets will
1124  * end up in the driver.
1125  */
1126 static void b44_init_rings(struct b44 *bp)
1127 {
1128         int i;
1129
1130         b44_free_rings(bp);
1131
1132         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1133         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1134
1135         if (bp->flags & B44_FLAG_RX_RING_HACK)
1136                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1137                                            DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1138
1139         if (bp->flags & B44_FLAG_TX_RING_HACK)
1140                 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1141                                            DMA_TABLE_BYTES, DMA_TO_DEVICE);
1142
1143         for (i = 0; i < bp->rx_pending; i++) {
1144                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1145                         break;
1146         }
1147 }
1148
1149 /*
1150  * Must not be invoked with interrupt sources disabled and
1151  * the hardware shutdown down.
1152  */
1153 static void b44_free_consistent(struct b44 *bp)
1154 {
1155         kfree(bp->rx_buffers);
1156         bp->rx_buffers = NULL;
1157         kfree(bp->tx_buffers);
1158         bp->tx_buffers = NULL;
1159         if (bp->rx_ring) {
1160                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1161                         dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1162                                          DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1163                         kfree(bp->rx_ring);
1164                 } else
1165                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1166                                           bp->rx_ring, bp->rx_ring_dma);
1167                 bp->rx_ring = NULL;
1168                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1169         }
1170         if (bp->tx_ring) {
1171                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1172                         dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1173                                          DMA_TABLE_BYTES, DMA_TO_DEVICE);
1174                         kfree(bp->tx_ring);
1175                 } else
1176                         dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1177                                           bp->tx_ring, bp->tx_ring_dma);
1178                 bp->tx_ring = NULL;
1179                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1180         }
1181 }
1182
1183 /*
1184  * Must not be invoked with interrupt sources disabled and
1185  * the hardware shutdown down.  Can sleep.
1186  */
1187 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1188 {
1189         int size;
1190
1191         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1192         bp->rx_buffers = kzalloc(size, gfp);
1193         if (!bp->rx_buffers)
1194                 goto out_err;
1195
1196         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1197         bp->tx_buffers = kzalloc(size, gfp);
1198         if (!bp->tx_buffers)
1199                 goto out_err;
1200
1201         size = DMA_TABLE_BYTES;
1202         bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1203                                          &bp->rx_ring_dma, gfp);
1204         if (!bp->rx_ring) {
1205                 /* Allocation may have failed due to pci_alloc_consistent
1206                    insisting on use of GFP_DMA, which is more restrictive
1207                    than necessary...  */
1208                 struct dma_desc *rx_ring;
1209                 dma_addr_t rx_ring_dma;
1210
1211                 rx_ring = kzalloc(size, gfp);
1212                 if (!rx_ring)
1213                         goto out_err;
1214
1215                 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1216                                              DMA_TABLE_BYTES,
1217                                              DMA_BIDIRECTIONAL);
1218
1219                 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1220                         rx_ring_dma + size > DMA_BIT_MASK(30)) {
1221                         kfree(rx_ring);
1222                         goto out_err;
1223                 }
1224
1225                 bp->rx_ring = rx_ring;
1226                 bp->rx_ring_dma = rx_ring_dma;
1227                 bp->flags |= B44_FLAG_RX_RING_HACK;
1228         }
1229
1230         bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1231                                          &bp->tx_ring_dma, gfp);
1232         if (!bp->tx_ring) {
1233                 /* Allocation may have failed due to ssb_dma_alloc_consistent
1234                    insisting on use of GFP_DMA, which is more restrictive
1235                    than necessary...  */
1236                 struct dma_desc *tx_ring;
1237                 dma_addr_t tx_ring_dma;
1238
1239                 tx_ring = kzalloc(size, gfp);
1240                 if (!tx_ring)
1241                         goto out_err;
1242
1243                 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1244                                              DMA_TABLE_BYTES,
1245                                              DMA_TO_DEVICE);
1246
1247                 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1248                         tx_ring_dma + size > DMA_BIT_MASK(30)) {
1249                         kfree(tx_ring);
1250                         goto out_err;
1251                 }
1252
1253                 bp->tx_ring = tx_ring;
1254                 bp->tx_ring_dma = tx_ring_dma;
1255                 bp->flags |= B44_FLAG_TX_RING_HACK;
1256         }
1257
1258         return 0;
1259
1260 out_err:
1261         b44_free_consistent(bp);
1262         return -ENOMEM;
1263 }
1264
1265 /* bp->lock is held. */
1266 static void b44_clear_stats(struct b44 *bp)
1267 {
1268         unsigned long reg;
1269
1270         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1271         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1272                 br32(bp, reg);
1273         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1274                 br32(bp, reg);
1275 }
1276
1277 /* bp->lock is held. */
1278 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1279 {
1280         struct ssb_device *sdev = bp->sdev;
1281         bool was_enabled;
1282
1283         was_enabled = ssb_device_is_enabled(bp->sdev);
1284
1285         ssb_device_enable(bp->sdev, 0);
1286         ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1287
1288         if (was_enabled) {
1289                 bw32(bp, B44_RCV_LAZY, 0);
1290                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1291                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1292                 bw32(bp, B44_DMATX_CTRL, 0);
1293                 bp->tx_prod = bp->tx_cons = 0;
1294                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1295                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1296                                      100, 0);
1297                 }
1298                 bw32(bp, B44_DMARX_CTRL, 0);
1299                 bp->rx_prod = bp->rx_cons = 0;
1300         }
1301
1302         b44_clear_stats(bp);
1303
1304         /*
1305          * Don't enable PHY if we are doing a partial reset
1306          * we are probably going to power down
1307          */
1308         if (reset_kind == B44_CHIP_RESET_PARTIAL)
1309                 return;
1310
1311         switch (sdev->bus->bustype) {
1312         case SSB_BUSTYPE_SSB:
1313                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1314                      (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1315                                         B44_MDC_RATIO)
1316                      & MDIO_CTRL_MAXF_MASK)));
1317                 break;
1318         case SSB_BUSTYPE_PCI:
1319                 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1320                      (0x0d & MDIO_CTRL_MAXF_MASK)));
1321                 break;
1322         case SSB_BUSTYPE_PCMCIA:
1323         case SSB_BUSTYPE_SDIO:
1324                 WARN_ON(1); /* A device with this bus does not exist. */
1325                 break;
1326         }
1327
1328         br32(bp, B44_MDIO_CTRL);
1329
1330         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1331                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1332                 br32(bp, B44_ENET_CTRL);
1333                 bp->flags |= B44_FLAG_EXTERNAL_PHY;
1334         } else {
1335                 u32 val = br32(bp, B44_DEVCTRL);
1336
1337                 if (val & DEVCTRL_EPR) {
1338                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1339                         br32(bp, B44_DEVCTRL);
1340                         udelay(100);
1341                 }
1342                 bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1343         }
1344 }
1345
1346 /* bp->lock is held. */
1347 static void b44_halt(struct b44 *bp)
1348 {
1349         b44_disable_ints(bp);
1350         /* reset PHY */
1351         b44_phy_reset(bp);
1352         /* power down PHY */
1353         netdev_info(bp->dev, "powering down PHY\n");
1354         bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1355         /* now reset the chip, but without enabling the MAC&PHY
1356          * part of it. This has to be done _after_ we shut down the PHY */
1357         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1358                 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1359         else
1360                 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1361 }
1362
1363 /* bp->lock is held. */
1364 static void __b44_set_mac_addr(struct b44 *bp)
1365 {
1366         bw32(bp, B44_CAM_CTRL, 0);
1367         if (!(bp->dev->flags & IFF_PROMISC)) {
1368                 u32 val;
1369
1370                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1371                 val = br32(bp, B44_CAM_CTRL);
1372                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1373         }
1374 }
1375
1376 static int b44_set_mac_addr(struct net_device *dev, void *p)
1377 {
1378         struct b44 *bp = netdev_priv(dev);
1379         struct sockaddr *addr = p;
1380         u32 val;
1381
1382         if (netif_running(dev))
1383                 return -EBUSY;
1384
1385         if (!is_valid_ether_addr(addr->sa_data))
1386                 return -EINVAL;
1387
1388         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1389
1390         spin_lock_irq(&bp->lock);
1391
1392         val = br32(bp, B44_RXCONFIG);
1393         if (!(val & RXCONFIG_CAM_ABSENT))
1394                 __b44_set_mac_addr(bp);
1395
1396         spin_unlock_irq(&bp->lock);
1397
1398         return 0;
1399 }
1400
1401 /* Called at device open time to get the chip ready for
1402  * packet processing.  Invoked with bp->lock held.
1403  */
1404 static void __b44_set_rx_mode(struct net_device *);
1405 static void b44_init_hw(struct b44 *bp, int reset_kind)
1406 {
1407         u32 val;
1408
1409         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1410         if (reset_kind == B44_FULL_RESET) {
1411                 b44_phy_reset(bp);
1412                 b44_setup_phy(bp);
1413         }
1414
1415         /* Enable CRC32, set proper LED modes and power on PHY */
1416         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1417         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1418
1419         /* This sets the MAC address too.  */
1420         __b44_set_rx_mode(bp->dev);
1421
1422         /* MTU + eth header + possible VLAN tag + struct rx_header */
1423         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1424         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1425
1426         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1427         if (reset_kind == B44_PARTIAL_RESET) {
1428                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1429                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1430         } else {
1431                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1432                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1433                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1434                                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1435                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1436
1437                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1438                 bp->rx_prod = bp->rx_pending;
1439
1440                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1441         }
1442
1443         val = br32(bp, B44_ENET_CTRL);
1444         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1445
1446         netdev_reset_queue(bp->dev);
1447 }
1448
1449 static int b44_open(struct net_device *dev)
1450 {
1451         struct b44 *bp = netdev_priv(dev);
1452         int err;
1453
1454         err = b44_alloc_consistent(bp, GFP_KERNEL);
1455         if (err)
1456                 goto out;
1457
1458         napi_enable(&bp->napi);
1459
1460         b44_init_rings(bp);
1461         b44_init_hw(bp, B44_FULL_RESET);
1462
1463         b44_check_phy(bp);
1464
1465         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1466         if (unlikely(err < 0)) {
1467                 napi_disable(&bp->napi);
1468                 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1469                 b44_free_rings(bp);
1470                 b44_free_consistent(bp);
1471                 goto out;
1472         }
1473
1474         timer_setup(&bp->timer, b44_timer, 0);
1475         bp->timer.expires = jiffies + HZ;
1476         add_timer(&bp->timer);
1477
1478         b44_enable_ints(bp);
1479
1480         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1481                 phy_start(dev->phydev);
1482
1483         netif_start_queue(dev);
1484 out:
1485         return err;
1486 }
1487
1488 #ifdef CONFIG_NET_POLL_CONTROLLER
1489 /*
1490  * Polling receive - used by netconsole and other diagnostic tools
1491  * to allow network i/o with interrupts disabled.
1492  */
1493 static void b44_poll_controller(struct net_device *dev)
1494 {
1495         disable_irq(dev->irq);
1496         b44_interrupt(dev->irq, dev);
1497         enable_irq(dev->irq);
1498 }
1499 #endif
1500
1501 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1502 {
1503         u32 i;
1504         u32 *pattern = (u32 *) pp;
1505
1506         for (i = 0; i < bytes; i += sizeof(u32)) {
1507                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1508                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1509         }
1510 }
1511
1512 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1513 {
1514         int magicsync = 6;
1515         int k, j, len = offset;
1516         int ethaddr_bytes = ETH_ALEN;
1517
1518         memset(ppattern + offset, 0xff, magicsync);
1519         for (j = 0; j < magicsync; j++) {
1520                 pmask[len >> 3] |= BIT(len & 7);
1521                 len++;
1522         }
1523
1524         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1525                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1526                         ethaddr_bytes = ETH_ALEN;
1527                 else
1528                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1529                 if (ethaddr_bytes <=0)
1530                         break;
1531                 for (k = 0; k< ethaddr_bytes; k++) {
1532                         ppattern[offset + magicsync +
1533                                 (j * ETH_ALEN) + k] = macaddr[k];
1534                         pmask[len >> 3] |= BIT(len & 7);
1535                         len++;
1536                 }
1537         }
1538         return len - 1;
1539 }
1540
1541 /* Setup magic packet patterns in the b44 WOL
1542  * pattern matching filter.
1543  */
1544 static void b44_setup_pseudo_magicp(struct b44 *bp)
1545 {
1546
1547         u32 val;
1548         int plen0, plen1, plen2;
1549         u8 *pwol_pattern;
1550         u8 pwol_mask[B44_PMASK_SIZE];
1551
1552         pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1553         if (!pwol_pattern)
1554                 return;
1555
1556         /* Ipv4 magic packet pattern - pattern 0.*/
1557         memset(pwol_mask, 0, B44_PMASK_SIZE);
1558         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1559                                   B44_ETHIPV4UDP_HLEN);
1560
1561         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1562         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1563
1564         /* Raw ethernet II magic packet pattern - pattern 1 */
1565         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1566         memset(pwol_mask, 0, B44_PMASK_SIZE);
1567         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1568                                   ETH_HLEN);
1569
1570         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1571                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1572         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1573                        B44_PMASK_BASE + B44_PMASK_SIZE);
1574
1575         /* Ipv6 magic packet pattern - pattern 2 */
1576         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1577         memset(pwol_mask, 0, B44_PMASK_SIZE);
1578         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1579                                   B44_ETHIPV6UDP_HLEN);
1580
1581         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1582                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1583         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1584                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1585
1586         kfree(pwol_pattern);
1587
1588         /* set these pattern's lengths: one less than each real length */
1589         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1590         bw32(bp, B44_WKUP_LEN, val);
1591
1592         /* enable wakeup pattern matching */
1593         val = br32(bp, B44_DEVCTRL);
1594         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1595
1596 }
1597
1598 #ifdef CONFIG_B44_PCI
1599 static void b44_setup_wol_pci(struct b44 *bp)
1600 {
1601         u16 val;
1602
1603         if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1604                 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1605                 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1606                 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1607         }
1608 }
1609 #else
1610 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1611 #endif /* CONFIG_B44_PCI */
1612
1613 static void b44_setup_wol(struct b44 *bp)
1614 {
1615         u32 val;
1616
1617         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1618
1619         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1620
1621                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1622
1623                 val = bp->dev->dev_addr[2] << 24 |
1624                         bp->dev->dev_addr[3] << 16 |
1625                         bp->dev->dev_addr[4] << 8 |
1626                         bp->dev->dev_addr[5];
1627                 bw32(bp, B44_ADDR_LO, val);
1628
1629                 val = bp->dev->dev_addr[0] << 8 |
1630                         bp->dev->dev_addr[1];
1631                 bw32(bp, B44_ADDR_HI, val);
1632
1633                 val = br32(bp, B44_DEVCTRL);
1634                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1635
1636         } else {
1637                 b44_setup_pseudo_magicp(bp);
1638         }
1639         b44_setup_wol_pci(bp);
1640 }
1641
1642 static int b44_close(struct net_device *dev)
1643 {
1644         struct b44 *bp = netdev_priv(dev);
1645
1646         netif_stop_queue(dev);
1647
1648         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1649                 phy_stop(dev->phydev);
1650
1651         napi_disable(&bp->napi);
1652
1653         del_timer_sync(&bp->timer);
1654
1655         spin_lock_irq(&bp->lock);
1656
1657         b44_halt(bp);
1658         b44_free_rings(bp);
1659         netif_carrier_off(dev);
1660
1661         spin_unlock_irq(&bp->lock);
1662
1663         free_irq(dev->irq, dev);
1664
1665         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1666                 b44_init_hw(bp, B44_PARTIAL_RESET);
1667                 b44_setup_wol(bp);
1668         }
1669
1670         b44_free_consistent(bp);
1671
1672         return 0;
1673 }
1674
1675 static void b44_get_stats64(struct net_device *dev,
1676                             struct rtnl_link_stats64 *nstat)
1677 {
1678         struct b44 *bp = netdev_priv(dev);
1679         struct b44_hw_stats *hwstat = &bp->hw_stats;
1680         unsigned int start;
1681
1682         do {
1683                 start = u64_stats_fetch_begin_irq(&hwstat->syncp);
1684
1685                 /* Convert HW stats into rtnl_link_stats64 stats. */
1686                 nstat->rx_packets = hwstat->rx_pkts;
1687                 nstat->tx_packets = hwstat->tx_pkts;
1688                 nstat->rx_bytes   = hwstat->rx_octets;
1689                 nstat->tx_bytes   = hwstat->tx_octets;
1690                 nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1691                                      hwstat->tx_oversize_pkts +
1692                                      hwstat->tx_underruns +
1693                                      hwstat->tx_excessive_cols +
1694                                      hwstat->tx_late_cols);
1695                 nstat->multicast  = hwstat->rx_multicast_pkts;
1696                 nstat->collisions = hwstat->tx_total_cols;
1697
1698                 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1699                                            hwstat->rx_undersize);
1700                 nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1701                 nstat->rx_frame_errors  = hwstat->rx_align_errs;
1702                 nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1703                 nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1704                                            hwstat->rx_oversize_pkts +
1705                                            hwstat->rx_missed_pkts +
1706                                            hwstat->rx_crc_align_errs +
1707                                            hwstat->rx_undersize +
1708                                            hwstat->rx_crc_errs +
1709                                            hwstat->rx_align_errs +
1710                                            hwstat->rx_symbol_errs);
1711
1712                 nstat->tx_aborted_errors = hwstat->tx_underruns;
1713 #if 0
1714                 /* Carrier lost counter seems to be broken for some devices */
1715                 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1716 #endif
1717         } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
1718
1719 }
1720
1721 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1722 {
1723         struct netdev_hw_addr *ha;
1724         int i, num_ents;
1725
1726         num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1727         i = 0;
1728         netdev_for_each_mc_addr(ha, dev) {
1729                 if (i == num_ents)
1730                         break;
1731                 __b44_cam_write(bp, ha->addr, i++ + 1);
1732         }
1733         return i+1;
1734 }
1735
1736 static void __b44_set_rx_mode(struct net_device *dev)
1737 {
1738         struct b44 *bp = netdev_priv(dev);
1739         u32 val;
1740
1741         val = br32(bp, B44_RXCONFIG);
1742         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1743         if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1744                 val |= RXCONFIG_PROMISC;
1745                 bw32(bp, B44_RXCONFIG, val);
1746         } else {
1747                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1748                 int i = 1;
1749
1750                 __b44_set_mac_addr(bp);
1751
1752                 if ((dev->flags & IFF_ALLMULTI) ||
1753                     (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1754                         val |= RXCONFIG_ALLMULTI;
1755                 else
1756                         i = __b44_load_mcast(bp, dev);
1757
1758                 for (; i < 64; i++)
1759                         __b44_cam_write(bp, zero, i);
1760
1761                 bw32(bp, B44_RXCONFIG, val);
1762                 val = br32(bp, B44_CAM_CTRL);
1763                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1764         }
1765 }
1766
1767 static void b44_set_rx_mode(struct net_device *dev)
1768 {
1769         struct b44 *bp = netdev_priv(dev);
1770
1771         spin_lock_irq(&bp->lock);
1772         __b44_set_rx_mode(dev);
1773         spin_unlock_irq(&bp->lock);
1774 }
1775
1776 static u32 b44_get_msglevel(struct net_device *dev)
1777 {
1778         struct b44 *bp = netdev_priv(dev);
1779         return bp->msg_enable;
1780 }
1781
1782 static void b44_set_msglevel(struct net_device *dev, u32 value)
1783 {
1784         struct b44 *bp = netdev_priv(dev);
1785         bp->msg_enable = value;
1786 }
1787
1788 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1789 {
1790         struct b44 *bp = netdev_priv(dev);
1791         struct ssb_bus *bus = bp->sdev->bus;
1792
1793         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1794         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1795         switch (bus->bustype) {
1796         case SSB_BUSTYPE_PCI:
1797                 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1798                 break;
1799         case SSB_BUSTYPE_SSB:
1800                 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1801                 break;
1802         case SSB_BUSTYPE_PCMCIA:
1803         case SSB_BUSTYPE_SDIO:
1804                 WARN_ON(1); /* A device with this bus does not exist. */
1805                 break;
1806         }
1807 }
1808
1809 static int b44_nway_reset(struct net_device *dev)
1810 {
1811         struct b44 *bp = netdev_priv(dev);
1812         u32 bmcr;
1813         int r;
1814
1815         spin_lock_irq(&bp->lock);
1816         b44_readphy(bp, MII_BMCR, &bmcr);
1817         b44_readphy(bp, MII_BMCR, &bmcr);
1818         r = -EINVAL;
1819         if (bmcr & BMCR_ANENABLE) {
1820                 b44_writephy(bp, MII_BMCR,
1821                              bmcr | BMCR_ANRESTART);
1822                 r = 0;
1823         }
1824         spin_unlock_irq(&bp->lock);
1825
1826         return r;
1827 }
1828
1829 static int b44_get_link_ksettings(struct net_device *dev,
1830                                   struct ethtool_link_ksettings *cmd)
1831 {
1832         struct b44 *bp = netdev_priv(dev);
1833         u32 supported, advertising;
1834
1835         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1836                 BUG_ON(!dev->phydev);
1837                 phy_ethtool_ksettings_get(dev->phydev, cmd);
1838
1839                 return 0;
1840         }
1841
1842         supported = (SUPPORTED_Autoneg);
1843         supported |= (SUPPORTED_100baseT_Half |
1844                       SUPPORTED_100baseT_Full |
1845                       SUPPORTED_10baseT_Half |
1846                       SUPPORTED_10baseT_Full |
1847                       SUPPORTED_MII);
1848
1849         advertising = 0;
1850         if (bp->flags & B44_FLAG_ADV_10HALF)
1851                 advertising |= ADVERTISED_10baseT_Half;
1852         if (bp->flags & B44_FLAG_ADV_10FULL)
1853                 advertising |= ADVERTISED_10baseT_Full;
1854         if (bp->flags & B44_FLAG_ADV_100HALF)
1855                 advertising |= ADVERTISED_100baseT_Half;
1856         if (bp->flags & B44_FLAG_ADV_100FULL)
1857                 advertising |= ADVERTISED_100baseT_Full;
1858         advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1859         cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1860                 SPEED_100 : SPEED_10;
1861         cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1862                 DUPLEX_FULL : DUPLEX_HALF;
1863         cmd->base.port = 0;
1864         cmd->base.phy_address = bp->phy_addr;
1865         cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1866                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1867         if (cmd->base.autoneg == AUTONEG_ENABLE)
1868                 advertising |= ADVERTISED_Autoneg;
1869
1870         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1871                                                 supported);
1872         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1873                                                 advertising);
1874
1875         if (!netif_running(dev)){
1876                 cmd->base.speed = 0;
1877                 cmd->base.duplex = 0xff;
1878         }
1879
1880         return 0;
1881 }
1882
1883 static int b44_set_link_ksettings(struct net_device *dev,
1884                                   const struct ethtool_link_ksettings *cmd)
1885 {
1886         struct b44 *bp = netdev_priv(dev);
1887         u32 speed;
1888         int ret;
1889         u32 advertising;
1890
1891         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1892                 BUG_ON(!dev->phydev);
1893                 spin_lock_irq(&bp->lock);
1894                 if (netif_running(dev))
1895                         b44_setup_phy(bp);
1896
1897                 ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1898
1899                 spin_unlock_irq(&bp->lock);
1900
1901                 return ret;
1902         }
1903
1904         speed = cmd->base.speed;
1905
1906         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1907                                                 cmd->link_modes.advertising);
1908
1909         /* We do not support gigabit. */
1910         if (cmd->base.autoneg == AUTONEG_ENABLE) {
1911                 if (advertising &
1912                     (ADVERTISED_1000baseT_Half |
1913                      ADVERTISED_1000baseT_Full))
1914                         return -EINVAL;
1915         } else if ((speed != SPEED_100 &&
1916                     speed != SPEED_10) ||
1917                    (cmd->base.duplex != DUPLEX_HALF &&
1918                     cmd->base.duplex != DUPLEX_FULL)) {
1919                         return -EINVAL;
1920         }
1921
1922         spin_lock_irq(&bp->lock);
1923
1924         if (cmd->base.autoneg == AUTONEG_ENABLE) {
1925                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1926                                B44_FLAG_100_BASE_T |
1927                                B44_FLAG_FULL_DUPLEX |
1928                                B44_FLAG_ADV_10HALF |
1929                                B44_FLAG_ADV_10FULL |
1930                                B44_FLAG_ADV_100HALF |
1931                                B44_FLAG_ADV_100FULL);
1932                 if (advertising == 0) {
1933                         bp->flags |= (B44_FLAG_ADV_10HALF |
1934                                       B44_FLAG_ADV_10FULL |
1935                                       B44_FLAG_ADV_100HALF |
1936                                       B44_FLAG_ADV_100FULL);
1937                 } else {
1938                         if (advertising & ADVERTISED_10baseT_Half)
1939                                 bp->flags |= B44_FLAG_ADV_10HALF;
1940                         if (advertising & ADVERTISED_10baseT_Full)
1941                                 bp->flags |= B44_FLAG_ADV_10FULL;
1942                         if (advertising & ADVERTISED_100baseT_Half)
1943                                 bp->flags |= B44_FLAG_ADV_100HALF;
1944                         if (advertising & ADVERTISED_100baseT_Full)
1945                                 bp->flags |= B44_FLAG_ADV_100FULL;
1946                 }
1947         } else {
1948                 bp->flags |= B44_FLAG_FORCE_LINK;
1949                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1950                 if (speed == SPEED_100)
1951                         bp->flags |= B44_FLAG_100_BASE_T;
1952                 if (cmd->base.duplex == DUPLEX_FULL)
1953                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1954         }
1955
1956         if (netif_running(dev))
1957                 b44_setup_phy(bp);
1958
1959         spin_unlock_irq(&bp->lock);
1960
1961         return 0;
1962 }
1963
1964 static void b44_get_ringparam(struct net_device *dev,
1965                               struct ethtool_ringparam *ering)
1966 {
1967         struct b44 *bp = netdev_priv(dev);
1968
1969         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1970         ering->rx_pending = bp->rx_pending;
1971
1972         /* XXX ethtool lacks a tx_max_pending, oops... */
1973 }
1974
1975 static int b44_set_ringparam(struct net_device *dev,
1976                              struct ethtool_ringparam *ering)
1977 {
1978         struct b44 *bp = netdev_priv(dev);
1979
1980         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1981             (ering->rx_mini_pending != 0) ||
1982             (ering->rx_jumbo_pending != 0) ||
1983             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1984                 return -EINVAL;
1985
1986         spin_lock_irq(&bp->lock);
1987
1988         bp->rx_pending = ering->rx_pending;
1989         bp->tx_pending = ering->tx_pending;
1990
1991         b44_halt(bp);
1992         b44_init_rings(bp);
1993         b44_init_hw(bp, B44_FULL_RESET);
1994         netif_wake_queue(bp->dev);
1995         spin_unlock_irq(&bp->lock);
1996
1997         b44_enable_ints(bp);
1998
1999         return 0;
2000 }
2001
2002 static void b44_get_pauseparam(struct net_device *dev,
2003                                 struct ethtool_pauseparam *epause)
2004 {
2005         struct b44 *bp = netdev_priv(dev);
2006
2007         epause->autoneg =
2008                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
2009         epause->rx_pause =
2010                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
2011         epause->tx_pause =
2012                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
2013 }
2014
2015 static int b44_set_pauseparam(struct net_device *dev,
2016                                 struct ethtool_pauseparam *epause)
2017 {
2018         struct b44 *bp = netdev_priv(dev);
2019
2020         spin_lock_irq(&bp->lock);
2021         if (epause->autoneg)
2022                 bp->flags |= B44_FLAG_PAUSE_AUTO;
2023         else
2024                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2025         if (epause->rx_pause)
2026                 bp->flags |= B44_FLAG_RX_PAUSE;
2027         else
2028                 bp->flags &= ~B44_FLAG_RX_PAUSE;
2029         if (epause->tx_pause)
2030                 bp->flags |= B44_FLAG_TX_PAUSE;
2031         else
2032                 bp->flags &= ~B44_FLAG_TX_PAUSE;
2033         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2034                 b44_halt(bp);
2035                 b44_init_rings(bp);
2036                 b44_init_hw(bp, B44_FULL_RESET);
2037         } else {
2038                 __b44_set_flow_ctrl(bp, bp->flags);
2039         }
2040         spin_unlock_irq(&bp->lock);
2041
2042         b44_enable_ints(bp);
2043
2044         return 0;
2045 }
2046
2047 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2048 {
2049         switch(stringset) {
2050         case ETH_SS_STATS:
2051                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2052                 break;
2053         }
2054 }
2055
2056 static int b44_get_sset_count(struct net_device *dev, int sset)
2057 {
2058         switch (sset) {
2059         case ETH_SS_STATS:
2060                 return ARRAY_SIZE(b44_gstrings);
2061         default:
2062                 return -EOPNOTSUPP;
2063         }
2064 }
2065
2066 static void b44_get_ethtool_stats(struct net_device *dev,
2067                                   struct ethtool_stats *stats, u64 *data)
2068 {
2069         struct b44 *bp = netdev_priv(dev);
2070         struct b44_hw_stats *hwstat = &bp->hw_stats;
2071         u64 *data_src, *data_dst;
2072         unsigned int start;
2073         u32 i;
2074
2075         spin_lock_irq(&bp->lock);
2076         b44_stats_update(bp);
2077         spin_unlock_irq(&bp->lock);
2078
2079         do {
2080                 data_src = &hwstat->tx_good_octets;
2081                 data_dst = data;
2082                 start = u64_stats_fetch_begin_irq(&hwstat->syncp);
2083
2084                 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2085                         *data_dst++ = *data_src++;
2086
2087         } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
2088 }
2089
2090 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2091 {
2092         struct b44 *bp = netdev_priv(dev);
2093
2094         wol->supported = WAKE_MAGIC;
2095         if (bp->flags & B44_FLAG_WOL_ENABLE)
2096                 wol->wolopts = WAKE_MAGIC;
2097         else
2098                 wol->wolopts = 0;
2099         memset(&wol->sopass, 0, sizeof(wol->sopass));
2100 }
2101
2102 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2103 {
2104         struct b44 *bp = netdev_priv(dev);
2105
2106         spin_lock_irq(&bp->lock);
2107         if (wol->wolopts & WAKE_MAGIC)
2108                 bp->flags |= B44_FLAG_WOL_ENABLE;
2109         else
2110                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2111         spin_unlock_irq(&bp->lock);
2112
2113         device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2114         return 0;
2115 }
2116
2117 static const struct ethtool_ops b44_ethtool_ops = {
2118         .get_drvinfo            = b44_get_drvinfo,
2119         .nway_reset             = b44_nway_reset,
2120         .get_link               = ethtool_op_get_link,
2121         .get_wol                = b44_get_wol,
2122         .set_wol                = b44_set_wol,
2123         .get_ringparam          = b44_get_ringparam,
2124         .set_ringparam          = b44_set_ringparam,
2125         .get_pauseparam         = b44_get_pauseparam,
2126         .set_pauseparam         = b44_set_pauseparam,
2127         .get_msglevel           = b44_get_msglevel,
2128         .set_msglevel           = b44_set_msglevel,
2129         .get_strings            = b44_get_strings,
2130         .get_sset_count         = b44_get_sset_count,
2131         .get_ethtool_stats      = b44_get_ethtool_stats,
2132         .get_link_ksettings     = b44_get_link_ksettings,
2133         .set_link_ksettings     = b44_set_link_ksettings,
2134 };
2135
2136 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2137 {
2138         struct b44 *bp = netdev_priv(dev);
2139         int err = -EINVAL;
2140
2141         if (!netif_running(dev))
2142                 goto out;
2143
2144         spin_lock_irq(&bp->lock);
2145         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2146                 BUG_ON(!dev->phydev);
2147                 err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2148         } else {
2149                 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2150         }
2151         spin_unlock_irq(&bp->lock);
2152 out:
2153         return err;
2154 }
2155
2156 static int b44_get_invariants(struct b44 *bp)
2157 {
2158         struct ssb_device *sdev = bp->sdev;
2159         int err = 0;
2160         u8 *addr;
2161
2162         bp->dma_offset = ssb_dma_translation(sdev);
2163
2164         if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2165             instance > 1) {
2166                 addr = sdev->bus->sprom.et1mac;
2167                 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2168         } else {
2169                 addr = sdev->bus->sprom.et0mac;
2170                 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2171         }
2172         /* Some ROMs have buggy PHY addresses with the high
2173          * bits set (sign extension?). Truncate them to a
2174          * valid PHY address. */
2175         bp->phy_addr &= 0x1F;
2176
2177         memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2178
2179         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2180                 pr_err("Invalid MAC address found in EEPROM\n");
2181                 return -EINVAL;
2182         }
2183
2184         bp->imask = IMASK_DEF;
2185
2186         /* XXX - really required?
2187            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2188         */
2189
2190         if (bp->sdev->id.revision >= 7)
2191                 bp->flags |= B44_FLAG_B0_ANDLATER;
2192
2193         return err;
2194 }
2195
2196 static const struct net_device_ops b44_netdev_ops = {
2197         .ndo_open               = b44_open,
2198         .ndo_stop               = b44_close,
2199         .ndo_start_xmit         = b44_start_xmit,
2200         .ndo_get_stats64        = b44_get_stats64,
2201         .ndo_set_rx_mode        = b44_set_rx_mode,
2202         .ndo_set_mac_address    = b44_set_mac_addr,
2203         .ndo_validate_addr      = eth_validate_addr,
2204         .ndo_do_ioctl           = b44_ioctl,
2205         .ndo_tx_timeout         = b44_tx_timeout,
2206         .ndo_change_mtu         = b44_change_mtu,
2207 #ifdef CONFIG_NET_POLL_CONTROLLER
2208         .ndo_poll_controller    = b44_poll_controller,
2209 #endif
2210 };
2211
2212 static void b44_adjust_link(struct net_device *dev)
2213 {
2214         struct b44 *bp = netdev_priv(dev);
2215         struct phy_device *phydev = dev->phydev;
2216         bool status_changed = 0;
2217
2218         BUG_ON(!phydev);
2219
2220         if (bp->old_link != phydev->link) {
2221                 status_changed = 1;
2222                 bp->old_link = phydev->link;
2223         }
2224
2225         /* reflect duplex change */
2226         if (phydev->link) {
2227                 if ((phydev->duplex == DUPLEX_HALF) &&
2228                     (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2229                         status_changed = 1;
2230                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2231                 } else if ((phydev->duplex == DUPLEX_FULL) &&
2232                            !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2233                         status_changed = 1;
2234                         bp->flags |= B44_FLAG_FULL_DUPLEX;
2235                 }
2236         }
2237
2238         if (status_changed) {
2239                 u32 val = br32(bp, B44_TX_CTRL);
2240                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
2241                         val |= TX_CTRL_DUPLEX;
2242                 else
2243                         val &= ~TX_CTRL_DUPLEX;
2244                 bw32(bp, B44_TX_CTRL, val);
2245                 phy_print_status(phydev);
2246         }
2247 }
2248
2249 static int b44_register_phy_one(struct b44 *bp)
2250 {
2251         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2252         struct mii_bus *mii_bus;
2253         struct ssb_device *sdev = bp->sdev;
2254         struct phy_device *phydev;
2255         char bus_id[MII_BUS_ID_SIZE + 3];
2256         struct ssb_sprom *sprom = &sdev->bus->sprom;
2257         int err;
2258
2259         mii_bus = mdiobus_alloc();
2260         if (!mii_bus) {
2261                 dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2262                 err = -ENOMEM;
2263                 goto err_out;
2264         }
2265
2266         mii_bus->priv = bp;
2267         mii_bus->read = b44_mdio_read_phylib;
2268         mii_bus->write = b44_mdio_write_phylib;
2269         mii_bus->name = "b44_eth_mii";
2270         mii_bus->parent = sdev->dev;
2271         mii_bus->phy_mask = ~(1 << bp->phy_addr);
2272         snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2273
2274         bp->mii_bus = mii_bus;
2275
2276         err = mdiobus_register(mii_bus);
2277         if (err) {
2278                 dev_err(sdev->dev, "failed to register MII bus\n");
2279                 goto err_out_mdiobus;
2280         }
2281
2282         if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2283             (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2284
2285                 dev_info(sdev->dev,
2286                          "could not find PHY at %i, use fixed one\n",
2287                          bp->phy_addr);
2288
2289                 bp->phy_addr = 0;
2290                 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2291                          bp->phy_addr);
2292         } else {
2293                 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2294                          bp->phy_addr);
2295         }
2296
2297         phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2298                              PHY_INTERFACE_MODE_MII);
2299         if (IS_ERR(phydev)) {
2300                 dev_err(sdev->dev, "could not attach PHY at %i\n",
2301                         bp->phy_addr);
2302                 err = PTR_ERR(phydev);
2303                 goto err_out_mdiobus_unregister;
2304         }
2305
2306         /* mask with MAC supported features */
2307         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2308         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2309         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2310         linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2311         linkmode_and(phydev->supported, phydev->supported, mask);
2312         linkmode_copy(phydev->advertising, phydev->supported);
2313
2314         bp->old_link = 0;
2315         bp->phy_addr = phydev->mdio.addr;
2316
2317         phy_attached_info(phydev);
2318
2319         return 0;
2320
2321 err_out_mdiobus_unregister:
2322         mdiobus_unregister(mii_bus);
2323
2324 err_out_mdiobus:
2325         mdiobus_free(mii_bus);
2326
2327 err_out:
2328         return err;
2329 }
2330
2331 static void b44_unregister_phy_one(struct b44 *bp)
2332 {
2333         struct net_device *dev = bp->dev;
2334         struct mii_bus *mii_bus = bp->mii_bus;
2335
2336         phy_disconnect(dev->phydev);
2337         mdiobus_unregister(mii_bus);
2338         mdiobus_free(mii_bus);
2339 }
2340
2341 static int b44_init_one(struct ssb_device *sdev,
2342                         const struct ssb_device_id *ent)
2343 {
2344         struct net_device *dev;
2345         struct b44 *bp;
2346         int err;
2347
2348         instance++;
2349
2350         pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2351
2352         dev = alloc_etherdev(sizeof(*bp));
2353         if (!dev) {
2354                 err = -ENOMEM;
2355                 goto out;
2356         }
2357
2358         SET_NETDEV_DEV(dev, sdev->dev);
2359
2360         /* No interesting netdevice features in this card... */
2361         dev->features |= 0;
2362
2363         bp = netdev_priv(dev);
2364         bp->sdev = sdev;
2365         bp->dev = dev;
2366         bp->force_copybreak = 0;
2367
2368         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2369
2370         spin_lock_init(&bp->lock);
2371         u64_stats_init(&bp->hw_stats.syncp);
2372
2373         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2374         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2375
2376         dev->netdev_ops = &b44_netdev_ops;
2377         netif_napi_add(dev, &bp->napi, b44_poll, 64);
2378         dev->watchdog_timeo = B44_TX_TIMEOUT;
2379         dev->min_mtu = B44_MIN_MTU;
2380         dev->max_mtu = B44_MAX_MTU;
2381         dev->irq = sdev->irq;
2382         dev->ethtool_ops = &b44_ethtool_ops;
2383
2384         err = ssb_bus_powerup(sdev->bus, 0);
2385         if (err) {
2386                 dev_err(sdev->dev,
2387                         "Failed to powerup the bus\n");
2388                 goto err_out_free_dev;
2389         }
2390
2391         if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
2392                 dev_err(sdev->dev,
2393                         "Required 30BIT DMA mask unsupported by the system\n");
2394                 goto err_out_powerdown;
2395         }
2396
2397         err = b44_get_invariants(bp);
2398         if (err) {
2399                 dev_err(sdev->dev,
2400                         "Problem fetching invariants of chip, aborting\n");
2401                 goto err_out_powerdown;
2402         }
2403
2404         if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2405                 dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2406                 err = -ENODEV;
2407                 goto err_out_powerdown;
2408         }
2409
2410         bp->mii_if.dev = dev;
2411         bp->mii_if.mdio_read = b44_mdio_read_mii;
2412         bp->mii_if.mdio_write = b44_mdio_write_mii;
2413         bp->mii_if.phy_id = bp->phy_addr;
2414         bp->mii_if.phy_id_mask = 0x1f;
2415         bp->mii_if.reg_num_mask = 0x1f;
2416
2417         /* By default, advertise all speed/duplex settings. */
2418         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2419                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2420
2421         /* By default, auto-negotiate PAUSE. */
2422         bp->flags |= B44_FLAG_PAUSE_AUTO;
2423
2424         err = register_netdev(dev);
2425         if (err) {
2426                 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2427                 goto err_out_powerdown;
2428         }
2429
2430         netif_carrier_off(dev);
2431
2432         ssb_set_drvdata(sdev, dev);
2433
2434         /* Chip reset provides power to the b44 MAC & PCI cores, which
2435          * is necessary for MAC register access.
2436          */
2437         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2438
2439         /* do a phy reset to test if there is an active phy */
2440         err = b44_phy_reset(bp);
2441         if (err < 0) {
2442                 dev_err(sdev->dev, "phy reset failed\n");
2443                 goto err_out_unregister_netdev;
2444         }
2445
2446         if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2447                 err = b44_register_phy_one(bp);
2448                 if (err) {
2449                         dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2450                         goto err_out_unregister_netdev;
2451                 }
2452         }
2453
2454         device_set_wakeup_capable(sdev->dev, true);
2455         netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2456
2457         return 0;
2458
2459 err_out_unregister_netdev:
2460         unregister_netdev(dev);
2461 err_out_powerdown:
2462         ssb_bus_may_powerdown(sdev->bus);
2463
2464 err_out_free_dev:
2465         netif_napi_del(&bp->napi);
2466         free_netdev(dev);
2467
2468 out:
2469         return err;
2470 }
2471
2472 static void b44_remove_one(struct ssb_device *sdev)
2473 {
2474         struct net_device *dev = ssb_get_drvdata(sdev);
2475         struct b44 *bp = netdev_priv(dev);
2476
2477         unregister_netdev(dev);
2478         if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2479                 b44_unregister_phy_one(bp);
2480         ssb_device_disable(sdev, 0);
2481         ssb_bus_may_powerdown(sdev->bus);
2482         netif_napi_del(&bp->napi);
2483         free_netdev(dev);
2484         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2485         ssb_set_drvdata(sdev, NULL);
2486 }
2487
2488 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2489 {
2490         struct net_device *dev = ssb_get_drvdata(sdev);
2491         struct b44 *bp = netdev_priv(dev);
2492
2493         if (!netif_running(dev))
2494                 return 0;
2495
2496         del_timer_sync(&bp->timer);
2497
2498         spin_lock_irq(&bp->lock);
2499
2500         b44_halt(bp);
2501         netif_carrier_off(bp->dev);
2502         netif_device_detach(bp->dev);
2503         b44_free_rings(bp);
2504
2505         spin_unlock_irq(&bp->lock);
2506
2507         free_irq(dev->irq, dev);
2508         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2509                 b44_init_hw(bp, B44_PARTIAL_RESET);
2510                 b44_setup_wol(bp);
2511         }
2512
2513         ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2514         return 0;
2515 }
2516
2517 static int b44_resume(struct ssb_device *sdev)
2518 {
2519         struct net_device *dev = ssb_get_drvdata(sdev);
2520         struct b44 *bp = netdev_priv(dev);
2521         int rc = 0;
2522
2523         rc = ssb_bus_powerup(sdev->bus, 0);
2524         if (rc) {
2525                 dev_err(sdev->dev,
2526                         "Failed to powerup the bus\n");
2527                 return rc;
2528         }
2529
2530         if (!netif_running(dev))
2531                 return 0;
2532
2533         spin_lock_irq(&bp->lock);
2534         b44_init_rings(bp);
2535         b44_init_hw(bp, B44_FULL_RESET);
2536         spin_unlock_irq(&bp->lock);
2537
2538         /*
2539          * As a shared interrupt, the handler can be called immediately. To be
2540          * able to check the interrupt status the hardware must already be
2541          * powered back on (b44_init_hw).
2542          */
2543         rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2544         if (rc) {
2545                 netdev_err(dev, "request_irq failed\n");
2546                 spin_lock_irq(&bp->lock);
2547                 b44_halt(bp);
2548                 b44_free_rings(bp);
2549                 spin_unlock_irq(&bp->lock);
2550                 return rc;
2551         }
2552
2553         netif_device_attach(bp->dev);
2554
2555         b44_enable_ints(bp);
2556         netif_wake_queue(dev);
2557
2558         mod_timer(&bp->timer, jiffies + 1);
2559
2560         return 0;
2561 }
2562
2563 static struct ssb_driver b44_ssb_driver = {
2564         .name           = DRV_MODULE_NAME,
2565         .id_table       = b44_ssb_tbl,
2566         .probe          = b44_init_one,
2567         .remove         = b44_remove_one,
2568         .suspend        = b44_suspend,
2569         .resume         = b44_resume,
2570 };
2571
2572 static inline int __init b44_pci_init(void)
2573 {
2574         int err = 0;
2575 #ifdef CONFIG_B44_PCI
2576         err = ssb_pcihost_register(&b44_pci_driver);
2577 #endif
2578         return err;
2579 }
2580
2581 static inline void b44_pci_exit(void)
2582 {
2583 #ifdef CONFIG_B44_PCI
2584         ssb_pcihost_unregister(&b44_pci_driver);
2585 #endif
2586 }
2587
2588 static int __init b44_init(void)
2589 {
2590         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2591         int err;
2592
2593         /* Setup paramaters for syncing RX/TX DMA descriptors */
2594         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2595
2596         err = b44_pci_init();
2597         if (err)
2598                 return err;
2599         err = ssb_driver_register(&b44_ssb_driver);
2600         if (err)
2601                 b44_pci_exit();
2602         return err;
2603 }
2604
2605 static void __exit b44_cleanup(void)
2606 {
2607         ssb_driver_unregister(&b44_ssb_driver);
2608         b44_pci_exit();
2609 }
2610
2611 module_init(b44_init);
2612 module_exit(b44_cleanup);
2613