]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/usb/lan78xx.c
net: hns: add phy_attached_info() to the hns driver
[linux.git] / drivers / net / usb / lan78xx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32
33 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME     "lan78xx"
36
37 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
38 #define THROTTLE_JIFFIES                (HZ / 8)
39 #define UNLINK_TIMEOUT_MS               3
40
41 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
42
43 #define SS_USB_PKT_SIZE                 (1024)
44 #define HS_USB_PKT_SIZE                 (512)
45 #define FS_USB_PKT_SIZE                 (64)
46
47 #define MAX_RX_FIFO_SIZE                (12 * 1024)
48 #define MAX_TX_FIFO_SIZE                (12 * 1024)
49 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
50 #define DEFAULT_BULK_IN_DELAY           (0x0800)
51 #define MAX_SINGLE_PACKET_SIZE          (9000)
52 #define DEFAULT_TX_CSUM_ENABLE          (true)
53 #define DEFAULT_RX_CSUM_ENABLE          (true)
54 #define DEFAULT_TSO_CSUM_ENABLE         (true)
55 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
56 #define DEFAULT_VLAN_RX_OFFLOAD         (true)
57 #define TX_OVERHEAD                     (8)
58 #define RXW_PADDING                     2
59
60 #define LAN78XX_USB_VENDOR_ID           (0x0424)
61 #define LAN7800_USB_PRODUCT_ID          (0x7800)
62 #define LAN7850_USB_PRODUCT_ID          (0x7850)
63 #define LAN7801_USB_PRODUCT_ID          (0x7801)
64 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
65 #define LAN78XX_OTP_MAGIC               (0x78F3)
66
67 #define MII_READ                        1
68 #define MII_WRITE                       0
69
70 #define EEPROM_INDICATOR                (0xA5)
71 #define EEPROM_MAC_OFFSET               (0x01)
72 #define MAX_EEPROM_SIZE                 512
73 #define OTP_INDICATOR_1                 (0xF3)
74 #define OTP_INDICATOR_2                 (0xF7)
75
76 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
77                                          WAKE_MCAST | WAKE_BCAST | \
78                                          WAKE_ARP | WAKE_MAGIC)
79
80 /* USB related defines */
81 #define BULK_IN_PIPE                    1
82 #define BULK_OUT_PIPE                   2
83
84 /* default autosuspend delay (mSec)*/
85 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
86
87 /* statistic update interval (mSec) */
88 #define STAT_UPDATE_TIMER               (1 * 1000)
89
90 /* defines interrupts from interrupt EP */
91 #define MAX_INT_EP                      (32)
92 #define INT_EP_INTEP                    (31)
93 #define INT_EP_OTP_WR_DONE              (28)
94 #define INT_EP_EEE_TX_LPI_START         (26)
95 #define INT_EP_EEE_TX_LPI_STOP          (25)
96 #define INT_EP_EEE_RX_LPI               (24)
97 #define INT_EP_MAC_RESET_TIMEOUT        (23)
98 #define INT_EP_RDFO                     (22)
99 #define INT_EP_TXE                      (21)
100 #define INT_EP_USB_STATUS               (20)
101 #define INT_EP_TX_DIS                   (19)
102 #define INT_EP_RX_DIS                   (18)
103 #define INT_EP_PHY                      (17)
104 #define INT_EP_DP                       (16)
105 #define INT_EP_MAC_ERR                  (15)
106 #define INT_EP_TDFU                     (14)
107 #define INT_EP_TDFO                     (13)
108 #define INT_EP_UTX                      (12)
109 #define INT_EP_GPIO_11                  (11)
110 #define INT_EP_GPIO_10                  (10)
111 #define INT_EP_GPIO_9                   (9)
112 #define INT_EP_GPIO_8                   (8)
113 #define INT_EP_GPIO_7                   (7)
114 #define INT_EP_GPIO_6                   (6)
115 #define INT_EP_GPIO_5                   (5)
116 #define INT_EP_GPIO_4                   (4)
117 #define INT_EP_GPIO_3                   (3)
118 #define INT_EP_GPIO_2                   (2)
119 #define INT_EP_GPIO_1                   (1)
120 #define INT_EP_GPIO_0                   (0)
121
122 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
123         "RX FCS Errors",
124         "RX Alignment Errors",
125         "Rx Fragment Errors",
126         "RX Jabber Errors",
127         "RX Undersize Frame Errors",
128         "RX Oversize Frame Errors",
129         "RX Dropped Frames",
130         "RX Unicast Byte Count",
131         "RX Broadcast Byte Count",
132         "RX Multicast Byte Count",
133         "RX Unicast Frames",
134         "RX Broadcast Frames",
135         "RX Multicast Frames",
136         "RX Pause Frames",
137         "RX 64 Byte Frames",
138         "RX 65 - 127 Byte Frames",
139         "RX 128 - 255 Byte Frames",
140         "RX 256 - 511 Bytes Frames",
141         "RX 512 - 1023 Byte Frames",
142         "RX 1024 - 1518 Byte Frames",
143         "RX Greater 1518 Byte Frames",
144         "EEE RX LPI Transitions",
145         "EEE RX LPI Time",
146         "TX FCS Errors",
147         "TX Excess Deferral Errors",
148         "TX Carrier Errors",
149         "TX Bad Byte Count",
150         "TX Single Collisions",
151         "TX Multiple Collisions",
152         "TX Excessive Collision",
153         "TX Late Collisions",
154         "TX Unicast Byte Count",
155         "TX Broadcast Byte Count",
156         "TX Multicast Byte Count",
157         "TX Unicast Frames",
158         "TX Broadcast Frames",
159         "TX Multicast Frames",
160         "TX Pause Frames",
161         "TX 64 Byte Frames",
162         "TX 65 - 127 Byte Frames",
163         "TX 128 - 255 Byte Frames",
164         "TX 256 - 511 Bytes Frames",
165         "TX 512 - 1023 Byte Frames",
166         "TX 1024 - 1518 Byte Frames",
167         "TX Greater 1518 Byte Frames",
168         "EEE TX LPI Transitions",
169         "EEE TX LPI Time",
170 };
171
172 struct lan78xx_statstage {
173         u32 rx_fcs_errors;
174         u32 rx_alignment_errors;
175         u32 rx_fragment_errors;
176         u32 rx_jabber_errors;
177         u32 rx_undersize_frame_errors;
178         u32 rx_oversize_frame_errors;
179         u32 rx_dropped_frames;
180         u32 rx_unicast_byte_count;
181         u32 rx_broadcast_byte_count;
182         u32 rx_multicast_byte_count;
183         u32 rx_unicast_frames;
184         u32 rx_broadcast_frames;
185         u32 rx_multicast_frames;
186         u32 rx_pause_frames;
187         u32 rx_64_byte_frames;
188         u32 rx_65_127_byte_frames;
189         u32 rx_128_255_byte_frames;
190         u32 rx_256_511_bytes_frames;
191         u32 rx_512_1023_byte_frames;
192         u32 rx_1024_1518_byte_frames;
193         u32 rx_greater_1518_byte_frames;
194         u32 eee_rx_lpi_transitions;
195         u32 eee_rx_lpi_time;
196         u32 tx_fcs_errors;
197         u32 tx_excess_deferral_errors;
198         u32 tx_carrier_errors;
199         u32 tx_bad_byte_count;
200         u32 tx_single_collisions;
201         u32 tx_multiple_collisions;
202         u32 tx_excessive_collision;
203         u32 tx_late_collisions;
204         u32 tx_unicast_byte_count;
205         u32 tx_broadcast_byte_count;
206         u32 tx_multicast_byte_count;
207         u32 tx_unicast_frames;
208         u32 tx_broadcast_frames;
209         u32 tx_multicast_frames;
210         u32 tx_pause_frames;
211         u32 tx_64_byte_frames;
212         u32 tx_65_127_byte_frames;
213         u32 tx_128_255_byte_frames;
214         u32 tx_256_511_bytes_frames;
215         u32 tx_512_1023_byte_frames;
216         u32 tx_1024_1518_byte_frames;
217         u32 tx_greater_1518_byte_frames;
218         u32 eee_tx_lpi_transitions;
219         u32 eee_tx_lpi_time;
220 };
221
222 struct lan78xx_statstage64 {
223         u64 rx_fcs_errors;
224         u64 rx_alignment_errors;
225         u64 rx_fragment_errors;
226         u64 rx_jabber_errors;
227         u64 rx_undersize_frame_errors;
228         u64 rx_oversize_frame_errors;
229         u64 rx_dropped_frames;
230         u64 rx_unicast_byte_count;
231         u64 rx_broadcast_byte_count;
232         u64 rx_multicast_byte_count;
233         u64 rx_unicast_frames;
234         u64 rx_broadcast_frames;
235         u64 rx_multicast_frames;
236         u64 rx_pause_frames;
237         u64 rx_64_byte_frames;
238         u64 rx_65_127_byte_frames;
239         u64 rx_128_255_byte_frames;
240         u64 rx_256_511_bytes_frames;
241         u64 rx_512_1023_byte_frames;
242         u64 rx_1024_1518_byte_frames;
243         u64 rx_greater_1518_byte_frames;
244         u64 eee_rx_lpi_transitions;
245         u64 eee_rx_lpi_time;
246         u64 tx_fcs_errors;
247         u64 tx_excess_deferral_errors;
248         u64 tx_carrier_errors;
249         u64 tx_bad_byte_count;
250         u64 tx_single_collisions;
251         u64 tx_multiple_collisions;
252         u64 tx_excessive_collision;
253         u64 tx_late_collisions;
254         u64 tx_unicast_byte_count;
255         u64 tx_broadcast_byte_count;
256         u64 tx_multicast_byte_count;
257         u64 tx_unicast_frames;
258         u64 tx_broadcast_frames;
259         u64 tx_multicast_frames;
260         u64 tx_pause_frames;
261         u64 tx_64_byte_frames;
262         u64 tx_65_127_byte_frames;
263         u64 tx_128_255_byte_frames;
264         u64 tx_256_511_bytes_frames;
265         u64 tx_512_1023_byte_frames;
266         u64 tx_1024_1518_byte_frames;
267         u64 tx_greater_1518_byte_frames;
268         u64 eee_tx_lpi_transitions;
269         u64 eee_tx_lpi_time;
270 };
271
272 static u32 lan78xx_regs[] = {
273         ID_REV,
274         INT_STS,
275         HW_CFG,
276         PMT_CTL,
277         E2P_CMD,
278         E2P_DATA,
279         USB_STATUS,
280         VLAN_TYPE,
281         MAC_CR,
282         MAC_RX,
283         MAC_TX,
284         FLOW,
285         ERR_STS,
286         MII_ACC,
287         MII_DATA,
288         EEE_TX_LPI_REQ_DLY,
289         EEE_TW_TX_SYS,
290         EEE_TX_LPI_REM_DLY,
291         WUCSR
292 };
293
294 #define PHY_REG_SIZE (32 * sizeof(u32))
295
296 struct lan78xx_net;
297
298 struct lan78xx_priv {
299         struct lan78xx_net *dev;
300         u32 rfe_ctl;
301         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
302         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
303         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
304         struct mutex dataport_mutex; /* for dataport access */
305         spinlock_t rfe_ctl_lock; /* for rfe register access */
306         struct work_struct set_multicast;
307         struct work_struct set_vlan;
308         u32 wol;
309 };
310
311 enum skb_state {
312         illegal = 0,
313         tx_start,
314         tx_done,
315         rx_start,
316         rx_done,
317         rx_cleanup,
318         unlink_start
319 };
320
321 struct skb_data {               /* skb->cb is one of these */
322         struct urb *urb;
323         struct lan78xx_net *dev;
324         enum skb_state state;
325         size_t length;
326         int num_of_packet;
327 };
328
329 struct usb_context {
330         struct usb_ctrlrequest req;
331         struct lan78xx_net *dev;
332 };
333
334 #define EVENT_TX_HALT                   0
335 #define EVENT_RX_HALT                   1
336 #define EVENT_RX_MEMORY                 2
337 #define EVENT_STS_SPLIT                 3
338 #define EVENT_LINK_RESET                4
339 #define EVENT_RX_PAUSED                 5
340 #define EVENT_DEV_WAKING                6
341 #define EVENT_DEV_ASLEEP                7
342 #define EVENT_DEV_OPEN                  8
343 #define EVENT_STAT_UPDATE               9
344
345 struct statstage {
346         struct mutex                    access_lock;    /* for stats access */
347         struct lan78xx_statstage        saved;
348         struct lan78xx_statstage        rollover_count;
349         struct lan78xx_statstage        rollover_max;
350         struct lan78xx_statstage64      curr_stat;
351 };
352
353 struct irq_domain_data {
354         struct irq_domain       *irqdomain;
355         unsigned int            phyirq;
356         struct irq_chip         *irqchip;
357         irq_flow_handler_t      irq_handler;
358         u32                     irqenable;
359         struct mutex            irq_lock;               /* for irq bus access */
360 };
361
362 struct lan78xx_net {
363         struct net_device       *net;
364         struct usb_device       *udev;
365         struct usb_interface    *intf;
366         void                    *driver_priv;
367
368         int                     rx_qlen;
369         int                     tx_qlen;
370         struct sk_buff_head     rxq;
371         struct sk_buff_head     txq;
372         struct sk_buff_head     done;
373         struct sk_buff_head     rxq_pause;
374         struct sk_buff_head     txq_pend;
375
376         struct tasklet_struct   bh;
377         struct delayed_work     wq;
378
379         struct usb_host_endpoint *ep_blkin;
380         struct usb_host_endpoint *ep_blkout;
381         struct usb_host_endpoint *ep_intr;
382
383         int                     msg_enable;
384
385         struct urb              *urb_intr;
386         struct usb_anchor       deferred;
387
388         struct mutex            phy_mutex; /* for phy access */
389         unsigned                pipe_in, pipe_out, pipe_intr;
390
391         u32                     hard_mtu;       /* count any extra framing */
392         size_t                  rx_urb_size;    /* size for rx urbs */
393
394         unsigned long           flags;
395
396         wait_queue_head_t       *wait;
397         unsigned char           suspend_count;
398
399         unsigned                maxpacket;
400         struct timer_list       delay;
401         struct timer_list       stat_monitor;
402
403         unsigned long           data[5];
404
405         int                     link_on;
406         u8                      mdix_ctrl;
407
408         u32                     chipid;
409         u32                     chiprev;
410         struct mii_bus          *mdiobus;
411         phy_interface_t         interface;
412
413         int                     fc_autoneg;
414         u8                      fc_request_control;
415
416         int                     delta;
417         struct statstage        stats;
418
419         struct irq_domain_data  domain_data;
420 };
421
422 /* define external phy id */
423 #define PHY_LAN8835                     (0x0007C130)
424 #define PHY_KSZ9031RNX                  (0x00221620)
425
426 /* use ethtool to change the level for any given device */
427 static int msg_level = -1;
428 module_param(msg_level, int, 0);
429 MODULE_PARM_DESC(msg_level, "Override default message level");
430
431 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
432 {
433         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
434         int ret;
435
436         if (!buf)
437                 return -ENOMEM;
438
439         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
440                               USB_VENDOR_REQUEST_READ_REGISTER,
441                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
442                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
443         if (likely(ret >= 0)) {
444                 le32_to_cpus(buf);
445                 *data = *buf;
446         } else {
447                 netdev_warn(dev->net,
448                             "Failed to read register index 0x%08x. ret = %d",
449                             index, ret);
450         }
451
452         kfree(buf);
453
454         return ret;
455 }
456
457 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
458 {
459         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
460         int ret;
461
462         if (!buf)
463                 return -ENOMEM;
464
465         *buf = data;
466         cpu_to_le32s(buf);
467
468         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
469                               USB_VENDOR_REQUEST_WRITE_REGISTER,
470                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
471                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
472         if (unlikely(ret < 0)) {
473                 netdev_warn(dev->net,
474                             "Failed to write register index 0x%08x. ret = %d",
475                             index, ret);
476         }
477
478         kfree(buf);
479
480         return ret;
481 }
482
483 static int lan78xx_read_stats(struct lan78xx_net *dev,
484                               struct lan78xx_statstage *data)
485 {
486         int ret = 0;
487         int i;
488         struct lan78xx_statstage *stats;
489         u32 *src;
490         u32 *dst;
491
492         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
493         if (!stats)
494                 return -ENOMEM;
495
496         ret = usb_control_msg(dev->udev,
497                               usb_rcvctrlpipe(dev->udev, 0),
498                               USB_VENDOR_REQUEST_GET_STATS,
499                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
500                               0,
501                               0,
502                               (void *)stats,
503                               sizeof(*stats),
504                               USB_CTRL_SET_TIMEOUT);
505         if (likely(ret >= 0)) {
506                 src = (u32 *)stats;
507                 dst = (u32 *)data;
508                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
509                         le32_to_cpus(&src[i]);
510                         dst[i] = src[i];
511                 }
512         } else {
513                 netdev_warn(dev->net,
514                             "Failed to read stat ret = 0x%x", ret);
515         }
516
517         kfree(stats);
518
519         return ret;
520 }
521
522 #define check_counter_rollover(struct1, dev_stats, member) {    \
523         if (struct1->member < dev_stats.saved.member)           \
524                 dev_stats.rollover_count.member++;              \
525         }
526
527 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
528                                         struct lan78xx_statstage *stats)
529 {
530         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
531         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
532         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
533         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
534         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
535         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
536         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
537         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
538         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
539         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
540         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
541         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
542         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
543         check_counter_rollover(stats, dev->stats, rx_pause_frames);
544         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
545         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
546         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
547         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
548         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
549         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
550         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
551         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
552         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
553         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
554         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
555         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
556         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
557         check_counter_rollover(stats, dev->stats, tx_single_collisions);
558         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
559         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
560         check_counter_rollover(stats, dev->stats, tx_late_collisions);
561         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
562         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
563         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
564         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
565         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
566         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
567         check_counter_rollover(stats, dev->stats, tx_pause_frames);
568         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
569         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
570         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
571         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
572         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
573         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
574         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
575         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
576         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
577
578         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
579 }
580
581 static void lan78xx_update_stats(struct lan78xx_net *dev)
582 {
583         u32 *p, *count, *max;
584         u64 *data;
585         int i;
586         struct lan78xx_statstage lan78xx_stats;
587
588         if (usb_autopm_get_interface(dev->intf) < 0)
589                 return;
590
591         p = (u32 *)&lan78xx_stats;
592         count = (u32 *)&dev->stats.rollover_count;
593         max = (u32 *)&dev->stats.rollover_max;
594         data = (u64 *)&dev->stats.curr_stat;
595
596         mutex_lock(&dev->stats.access_lock);
597
598         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
599                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
600
601         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
602                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
603
604         mutex_unlock(&dev->stats.access_lock);
605
606         usb_autopm_put_interface(dev->intf);
607 }
608
609 /* Loop until the read is completed with timeout called with phy_mutex held */
610 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
611 {
612         unsigned long start_time = jiffies;
613         u32 val;
614         int ret;
615
616         do {
617                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
618                 if (unlikely(ret < 0))
619                         return -EIO;
620
621                 if (!(val & MII_ACC_MII_BUSY_))
622                         return 0;
623         } while (!time_after(jiffies, start_time + HZ));
624
625         return -EIO;
626 }
627
628 static inline u32 mii_access(int id, int index, int read)
629 {
630         u32 ret;
631
632         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
633         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
634         if (read)
635                 ret |= MII_ACC_MII_READ_;
636         else
637                 ret |= MII_ACC_MII_WRITE_;
638         ret |= MII_ACC_MII_BUSY_;
639
640         return ret;
641 }
642
643 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
644 {
645         unsigned long start_time = jiffies;
646         u32 val;
647         int ret;
648
649         do {
650                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
651                 if (unlikely(ret < 0))
652                         return -EIO;
653
654                 if (!(val & E2P_CMD_EPC_BUSY_) ||
655                     (val & E2P_CMD_EPC_TIMEOUT_))
656                         break;
657                 usleep_range(40, 100);
658         } while (!time_after(jiffies, start_time + HZ));
659
660         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
661                 netdev_warn(dev->net, "EEPROM read operation timeout");
662                 return -EIO;
663         }
664
665         return 0;
666 }
667
668 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
669 {
670         unsigned long start_time = jiffies;
671         u32 val;
672         int ret;
673
674         do {
675                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
676                 if (unlikely(ret < 0))
677                         return -EIO;
678
679                 if (!(val & E2P_CMD_EPC_BUSY_))
680                         return 0;
681
682                 usleep_range(40, 100);
683         } while (!time_after(jiffies, start_time + HZ));
684
685         netdev_warn(dev->net, "EEPROM is busy");
686         return -EIO;
687 }
688
689 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
690                                    u32 length, u8 *data)
691 {
692         u32 val;
693         u32 saved;
694         int i, ret;
695         int retval;
696
697         /* depends on chip, some EEPROM pins are muxed with LED function.
698          * disable & restore LED function to access EEPROM.
699          */
700         ret = lan78xx_read_reg(dev, HW_CFG, &val);
701         saved = val;
702         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
703                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
704                 ret = lan78xx_write_reg(dev, HW_CFG, val);
705         }
706
707         retval = lan78xx_eeprom_confirm_not_busy(dev);
708         if (retval)
709                 return retval;
710
711         for (i = 0; i < length; i++) {
712                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
713                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
714                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
715                 if (unlikely(ret < 0)) {
716                         retval = -EIO;
717                         goto exit;
718                 }
719
720                 retval = lan78xx_wait_eeprom(dev);
721                 if (retval < 0)
722                         goto exit;
723
724                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
725                 if (unlikely(ret < 0)) {
726                         retval = -EIO;
727                         goto exit;
728                 }
729
730                 data[i] = val & 0xFF;
731                 offset++;
732         }
733
734         retval = 0;
735 exit:
736         if (dev->chipid == ID_REV_CHIP_ID_7800_)
737                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
738
739         return retval;
740 }
741
742 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
743                                u32 length, u8 *data)
744 {
745         u8 sig;
746         int ret;
747
748         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
749         if ((ret == 0) && (sig == EEPROM_INDICATOR))
750                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
751         else
752                 ret = -EINVAL;
753
754         return ret;
755 }
756
757 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
758                                     u32 length, u8 *data)
759 {
760         u32 val;
761         u32 saved;
762         int i, ret;
763         int retval;
764
765         /* depends on chip, some EEPROM pins are muxed with LED function.
766          * disable & restore LED function to access EEPROM.
767          */
768         ret = lan78xx_read_reg(dev, HW_CFG, &val);
769         saved = val;
770         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
771                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
772                 ret = lan78xx_write_reg(dev, HW_CFG, val);
773         }
774
775         retval = lan78xx_eeprom_confirm_not_busy(dev);
776         if (retval)
777                 goto exit;
778
779         /* Issue write/erase enable command */
780         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
781         ret = lan78xx_write_reg(dev, E2P_CMD, val);
782         if (unlikely(ret < 0)) {
783                 retval = -EIO;
784                 goto exit;
785         }
786
787         retval = lan78xx_wait_eeprom(dev);
788         if (retval < 0)
789                 goto exit;
790
791         for (i = 0; i < length; i++) {
792                 /* Fill data register */
793                 val = data[i];
794                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
795                 if (ret < 0) {
796                         retval = -EIO;
797                         goto exit;
798                 }
799
800                 /* Send "write" command */
801                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
802                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
803                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
804                 if (ret < 0) {
805                         retval = -EIO;
806                         goto exit;
807                 }
808
809                 retval = lan78xx_wait_eeprom(dev);
810                 if (retval < 0)
811                         goto exit;
812
813                 offset++;
814         }
815
816         retval = 0;
817 exit:
818         if (dev->chipid == ID_REV_CHIP_ID_7800_)
819                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
820
821         return retval;
822 }
823
824 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
825                                 u32 length, u8 *data)
826 {
827         int i;
828         int ret;
829         u32 buf;
830         unsigned long timeout;
831
832         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
833
834         if (buf & OTP_PWR_DN_PWRDN_N_) {
835                 /* clear it and wait to be cleared */
836                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
837
838                 timeout = jiffies + HZ;
839                 do {
840                         usleep_range(1, 10);
841                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
842                         if (time_after(jiffies, timeout)) {
843                                 netdev_warn(dev->net,
844                                             "timeout on OTP_PWR_DN");
845                                 return -EIO;
846                         }
847                 } while (buf & OTP_PWR_DN_PWRDN_N_);
848         }
849
850         for (i = 0; i < length; i++) {
851                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
852                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
853                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
854                                         ((offset + i) & OTP_ADDR2_10_3));
855
856                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
857                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
858
859                 timeout = jiffies + HZ;
860                 do {
861                         udelay(1);
862                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
863                         if (time_after(jiffies, timeout)) {
864                                 netdev_warn(dev->net,
865                                             "timeout on OTP_STATUS");
866                                 return -EIO;
867                         }
868                 } while (buf & OTP_STATUS_BUSY_);
869
870                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
871
872                 data[i] = (u8)(buf & 0xFF);
873         }
874
875         return 0;
876 }
877
878 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
879                                  u32 length, u8 *data)
880 {
881         int i;
882         int ret;
883         u32 buf;
884         unsigned long timeout;
885
886         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
887
888         if (buf & OTP_PWR_DN_PWRDN_N_) {
889                 /* clear it and wait to be cleared */
890                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
891
892                 timeout = jiffies + HZ;
893                 do {
894                         udelay(1);
895                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
896                         if (time_after(jiffies, timeout)) {
897                                 netdev_warn(dev->net,
898                                             "timeout on OTP_PWR_DN completion");
899                                 return -EIO;
900                         }
901                 } while (buf & OTP_PWR_DN_PWRDN_N_);
902         }
903
904         /* set to BYTE program mode */
905         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
906
907         for (i = 0; i < length; i++) {
908                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
909                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
910                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
911                                         ((offset + i) & OTP_ADDR2_10_3));
912                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
913                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
914                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
915
916                 timeout = jiffies + HZ;
917                 do {
918                         udelay(1);
919                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
920                         if (time_after(jiffies, timeout)) {
921                                 netdev_warn(dev->net,
922                                             "Timeout on OTP_STATUS completion");
923                                 return -EIO;
924                         }
925                 } while (buf & OTP_STATUS_BUSY_);
926         }
927
928         return 0;
929 }
930
931 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
932                             u32 length, u8 *data)
933 {
934         u8 sig;
935         int ret;
936
937         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
938
939         if (ret == 0) {
940                 if (sig == OTP_INDICATOR_2)
941                         offset += 0x100;
942                 else if (sig != OTP_INDICATOR_1)
943                         ret = -EINVAL;
944                 if (!ret)
945                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
946         }
947
948         return ret;
949 }
950
951 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
952 {
953         int i, ret;
954
955         for (i = 0; i < 100; i++) {
956                 u32 dp_sel;
957
958                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
959                 if (unlikely(ret < 0))
960                         return -EIO;
961
962                 if (dp_sel & DP_SEL_DPRDY_)
963                         return 0;
964
965                 usleep_range(40, 100);
966         }
967
968         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
969
970         return -EIO;
971 }
972
973 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
974                                   u32 addr, u32 length, u32 *buf)
975 {
976         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
977         u32 dp_sel;
978         int i, ret;
979
980         if (usb_autopm_get_interface(dev->intf) < 0)
981                         return 0;
982
983         mutex_lock(&pdata->dataport_mutex);
984
985         ret = lan78xx_dataport_wait_not_busy(dev);
986         if (ret < 0)
987                 goto done;
988
989         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
990
991         dp_sel &= ~DP_SEL_RSEL_MASK_;
992         dp_sel |= ram_select;
993         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
994
995         for (i = 0; i < length; i++) {
996                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
997
998                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
999
1000                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1001
1002                 ret = lan78xx_dataport_wait_not_busy(dev);
1003                 if (ret < 0)
1004                         goto done;
1005         }
1006
1007 done:
1008         mutex_unlock(&pdata->dataport_mutex);
1009         usb_autopm_put_interface(dev->intf);
1010
1011         return ret;
1012 }
1013
1014 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1015                                     int index, u8 addr[ETH_ALEN])
1016 {
1017         u32 temp;
1018
1019         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1020                 temp = addr[3];
1021                 temp = addr[2] | (temp << 8);
1022                 temp = addr[1] | (temp << 8);
1023                 temp = addr[0] | (temp << 8);
1024                 pdata->pfilter_table[index][1] = temp;
1025                 temp = addr[5];
1026                 temp = addr[4] | (temp << 8);
1027                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1028                 pdata->pfilter_table[index][0] = temp;
1029         }
1030 }
1031
1032 /* returns hash bit number for given MAC address */
1033 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1034 {
1035         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1036 }
1037
1038 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1039 {
1040         struct lan78xx_priv *pdata =
1041                         container_of(param, struct lan78xx_priv, set_multicast);
1042         struct lan78xx_net *dev = pdata->dev;
1043         int i;
1044         int ret;
1045
1046         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1047                   pdata->rfe_ctl);
1048
1049         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1050                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1051
1052         for (i = 1; i < NUM_OF_MAF; i++) {
1053                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1054                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1055                                         pdata->pfilter_table[i][1]);
1056                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1057                                         pdata->pfilter_table[i][0]);
1058         }
1059
1060         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1061 }
1062
1063 static void lan78xx_set_multicast(struct net_device *netdev)
1064 {
1065         struct lan78xx_net *dev = netdev_priv(netdev);
1066         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1067         unsigned long flags;
1068         int i;
1069
1070         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1071
1072         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1073                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1074
1075         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1076                         pdata->mchash_table[i] = 0;
1077         /* pfilter_table[0] has own HW address */
1078         for (i = 1; i < NUM_OF_MAF; i++) {
1079                         pdata->pfilter_table[i][0] =
1080                         pdata->pfilter_table[i][1] = 0;
1081         }
1082
1083         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1084
1085         if (dev->net->flags & IFF_PROMISC) {
1086                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1087                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1088         } else {
1089                 if (dev->net->flags & IFF_ALLMULTI) {
1090                         netif_dbg(dev, drv, dev->net,
1091                                   "receive all multicast enabled");
1092                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1093                 }
1094         }
1095
1096         if (netdev_mc_count(dev->net)) {
1097                 struct netdev_hw_addr *ha;
1098                 int i;
1099
1100                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1101
1102                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1103
1104                 i = 1;
1105                 netdev_for_each_mc_addr(ha, netdev) {
1106                         /* set first 32 into Perfect Filter */
1107                         if (i < 33) {
1108                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1109                         } else {
1110                                 u32 bitnum = lan78xx_hash(ha->addr);
1111
1112                                 pdata->mchash_table[bitnum / 32] |=
1113                                                         (1 << (bitnum % 32));
1114                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1115                         }
1116                         i++;
1117                 }
1118         }
1119
1120         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1121
1122         /* defer register writes to a sleepable context */
1123         schedule_work(&pdata->set_multicast);
1124 }
1125
1126 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1127                                       u16 lcladv, u16 rmtadv)
1128 {
1129         u32 flow = 0, fct_flow = 0;
1130         int ret;
1131         u8 cap;
1132
1133         if (dev->fc_autoneg)
1134                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1135         else
1136                 cap = dev->fc_request_control;
1137
1138         if (cap & FLOW_CTRL_TX)
1139                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1140
1141         if (cap & FLOW_CTRL_RX)
1142                 flow |= FLOW_CR_RX_FCEN_;
1143
1144         if (dev->udev->speed == USB_SPEED_SUPER)
1145                 fct_flow = 0x817;
1146         else if (dev->udev->speed == USB_SPEED_HIGH)
1147                 fct_flow = 0x211;
1148
1149         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1150                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1151                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1152
1153         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1154
1155         /* threshold value should be set before enabling flow */
1156         ret = lan78xx_write_reg(dev, FLOW, flow);
1157
1158         return 0;
1159 }
1160
1161 static int lan78xx_link_reset(struct lan78xx_net *dev)
1162 {
1163         struct phy_device *phydev = dev->net->phydev;
1164         struct ethtool_link_ksettings ecmd;
1165         int ladv, radv, ret;
1166         u32 buf;
1167
1168         /* clear LAN78xx interrupt status */
1169         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1170         if (unlikely(ret < 0))
1171                 return -EIO;
1172
1173         phy_read_status(phydev);
1174
1175         if (!phydev->link && dev->link_on) {
1176                 dev->link_on = false;
1177
1178                 /* reset MAC */
1179                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1180                 if (unlikely(ret < 0))
1181                         return -EIO;
1182                 buf |= MAC_CR_RST_;
1183                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1184                 if (unlikely(ret < 0))
1185                         return -EIO;
1186
1187                 del_timer(&dev->stat_monitor);
1188         } else if (phydev->link && !dev->link_on) {
1189                 dev->link_on = true;
1190
1191                 phy_ethtool_ksettings_get(phydev, &ecmd);
1192
1193                 if (dev->udev->speed == USB_SPEED_SUPER) {
1194                         if (ecmd.base.speed == 1000) {
1195                                 /* disable U2 */
1196                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1197                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1198                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1199                                 /* enable U1 */
1200                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1201                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1202                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1203                         } else {
1204                                 /* enable U1 & U2 */
1205                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1206                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1207                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1208                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1209                         }
1210                 }
1211
1212                 ladv = phy_read(phydev, MII_ADVERTISE);
1213                 if (ladv < 0)
1214                         return ladv;
1215
1216                 radv = phy_read(phydev, MII_LPA);
1217                 if (radv < 0)
1218                         return radv;
1219
1220                 netif_dbg(dev, link, dev->net,
1221                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1222                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1223
1224                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1225                                                  radv);
1226
1227                 if (!timer_pending(&dev->stat_monitor)) {
1228                         dev->delta = 1;
1229                         mod_timer(&dev->stat_monitor,
1230                                   jiffies + STAT_UPDATE_TIMER);
1231                 }
1232
1233                 tasklet_schedule(&dev->bh);
1234         }
1235
1236         return ret;
1237 }
1238
1239 /* some work can't be done in tasklets, so we use keventd
1240  *
1241  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1242  * but tasklet_schedule() doesn't.      hope the failure is rare.
1243  */
1244 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1245 {
1246         set_bit(work, &dev->flags);
1247         if (!schedule_delayed_work(&dev->wq, 0))
1248                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1249 }
1250
1251 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1252 {
1253         u32 intdata;
1254
1255         if (urb->actual_length != 4) {
1256                 netdev_warn(dev->net,
1257                             "unexpected urb length %d", urb->actual_length);
1258                 return;
1259         }
1260
1261         intdata = get_unaligned_le32(urb->transfer_buffer);
1262
1263         if (intdata & INT_ENP_PHY_INT) {
1264                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1265                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1266
1267                 if (dev->domain_data.phyirq > 0)
1268                         generic_handle_irq(dev->domain_data.phyirq);
1269         } else
1270                 netdev_warn(dev->net,
1271                             "unexpected interrupt: 0x%08x\n", intdata);
1272 }
1273
1274 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1275 {
1276         return MAX_EEPROM_SIZE;
1277 }
1278
1279 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1280                                       struct ethtool_eeprom *ee, u8 *data)
1281 {
1282         struct lan78xx_net *dev = netdev_priv(netdev);
1283         int ret;
1284
1285         ret = usb_autopm_get_interface(dev->intf);
1286         if (ret)
1287                 return ret;
1288
1289         ee->magic = LAN78XX_EEPROM_MAGIC;
1290
1291         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1292
1293         usb_autopm_put_interface(dev->intf);
1294
1295         return ret;
1296 }
1297
1298 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1299                                       struct ethtool_eeprom *ee, u8 *data)
1300 {
1301         struct lan78xx_net *dev = netdev_priv(netdev);
1302         int ret;
1303
1304         ret = usb_autopm_get_interface(dev->intf);
1305         if (ret)
1306                 return ret;
1307
1308         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1309          * to load data from EEPROM
1310          */
1311         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1312                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1313         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1314                  (ee->offset == 0) &&
1315                  (ee->len == 512) &&
1316                  (data[0] == OTP_INDICATOR_1))
1317                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1318
1319         usb_autopm_put_interface(dev->intf);
1320
1321         return ret;
1322 }
1323
1324 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1325                                 u8 *data)
1326 {
1327         if (stringset == ETH_SS_STATS)
1328                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1329 }
1330
1331 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1332 {
1333         if (sset == ETH_SS_STATS)
1334                 return ARRAY_SIZE(lan78xx_gstrings);
1335         else
1336                 return -EOPNOTSUPP;
1337 }
1338
1339 static void lan78xx_get_stats(struct net_device *netdev,
1340                               struct ethtool_stats *stats, u64 *data)
1341 {
1342         struct lan78xx_net *dev = netdev_priv(netdev);
1343
1344         lan78xx_update_stats(dev);
1345
1346         mutex_lock(&dev->stats.access_lock);
1347         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1348         mutex_unlock(&dev->stats.access_lock);
1349 }
1350
1351 static void lan78xx_get_wol(struct net_device *netdev,
1352                             struct ethtool_wolinfo *wol)
1353 {
1354         struct lan78xx_net *dev = netdev_priv(netdev);
1355         int ret;
1356         u32 buf;
1357         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1358
1359         if (usb_autopm_get_interface(dev->intf) < 0)
1360                         return;
1361
1362         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1363         if (unlikely(ret < 0)) {
1364                 wol->supported = 0;
1365                 wol->wolopts = 0;
1366         } else {
1367                 if (buf & USB_CFG_RMT_WKP_) {
1368                         wol->supported = WAKE_ALL;
1369                         wol->wolopts = pdata->wol;
1370                 } else {
1371                         wol->supported = 0;
1372                         wol->wolopts = 0;
1373                 }
1374         }
1375
1376         usb_autopm_put_interface(dev->intf);
1377 }
1378
1379 static int lan78xx_set_wol(struct net_device *netdev,
1380                            struct ethtool_wolinfo *wol)
1381 {
1382         struct lan78xx_net *dev = netdev_priv(netdev);
1383         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1384         int ret;
1385
1386         ret = usb_autopm_get_interface(dev->intf);
1387         if (ret < 0)
1388                 return ret;
1389
1390         if (wol->wolopts & ~WAKE_ALL)
1391                 return -EINVAL;
1392
1393         pdata->wol = wol->wolopts;
1394
1395         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1396
1397         phy_ethtool_set_wol(netdev->phydev, wol);
1398
1399         usb_autopm_put_interface(dev->intf);
1400
1401         return ret;
1402 }
1403
1404 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1405 {
1406         struct lan78xx_net *dev = netdev_priv(net);
1407         struct phy_device *phydev = net->phydev;
1408         int ret;
1409         u32 buf;
1410
1411         ret = usb_autopm_get_interface(dev->intf);
1412         if (ret < 0)
1413                 return ret;
1414
1415         ret = phy_ethtool_get_eee(phydev, edata);
1416         if (ret < 0)
1417                 goto exit;
1418
1419         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1420         if (buf & MAC_CR_EEE_EN_) {
1421                 edata->eee_enabled = true;
1422                 edata->eee_active = !!(edata->advertised &
1423                                        edata->lp_advertised);
1424                 edata->tx_lpi_enabled = true;
1425                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1426                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1427                 edata->tx_lpi_timer = buf;
1428         } else {
1429                 edata->eee_enabled = false;
1430                 edata->eee_active = false;
1431                 edata->tx_lpi_enabled = false;
1432                 edata->tx_lpi_timer = 0;
1433         }
1434
1435         ret = 0;
1436 exit:
1437         usb_autopm_put_interface(dev->intf);
1438
1439         return ret;
1440 }
1441
1442 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1443 {
1444         struct lan78xx_net *dev = netdev_priv(net);
1445         int ret;
1446         u32 buf;
1447
1448         ret = usb_autopm_get_interface(dev->intf);
1449         if (ret < 0)
1450                 return ret;
1451
1452         if (edata->eee_enabled) {
1453                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1454                 buf |= MAC_CR_EEE_EN_;
1455                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1456
1457                 phy_ethtool_set_eee(net->phydev, edata);
1458
1459                 buf = (u32)edata->tx_lpi_timer;
1460                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1461         } else {
1462                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1463                 buf &= ~MAC_CR_EEE_EN_;
1464                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1465         }
1466
1467         usb_autopm_put_interface(dev->intf);
1468
1469         return 0;
1470 }
1471
1472 static u32 lan78xx_get_link(struct net_device *net)
1473 {
1474         phy_read_status(net->phydev);
1475
1476         return net->phydev->link;
1477 }
1478
1479 static void lan78xx_get_drvinfo(struct net_device *net,
1480                                 struct ethtool_drvinfo *info)
1481 {
1482         struct lan78xx_net *dev = netdev_priv(net);
1483
1484         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1485         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1486 }
1487
1488 static u32 lan78xx_get_msglevel(struct net_device *net)
1489 {
1490         struct lan78xx_net *dev = netdev_priv(net);
1491
1492         return dev->msg_enable;
1493 }
1494
1495 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1496 {
1497         struct lan78xx_net *dev = netdev_priv(net);
1498
1499         dev->msg_enable = level;
1500 }
1501
1502 static int lan78xx_get_link_ksettings(struct net_device *net,
1503                                       struct ethtool_link_ksettings *cmd)
1504 {
1505         struct lan78xx_net *dev = netdev_priv(net);
1506         struct phy_device *phydev = net->phydev;
1507         int ret;
1508
1509         ret = usb_autopm_get_interface(dev->intf);
1510         if (ret < 0)
1511                 return ret;
1512
1513         phy_ethtool_ksettings_get(phydev, cmd);
1514
1515         usb_autopm_put_interface(dev->intf);
1516
1517         return ret;
1518 }
1519
1520 static int lan78xx_set_link_ksettings(struct net_device *net,
1521                                       const struct ethtool_link_ksettings *cmd)
1522 {
1523         struct lan78xx_net *dev = netdev_priv(net);
1524         struct phy_device *phydev = net->phydev;
1525         int ret = 0;
1526         int temp;
1527
1528         ret = usb_autopm_get_interface(dev->intf);
1529         if (ret < 0)
1530                 return ret;
1531
1532         /* change speed & duplex */
1533         ret = phy_ethtool_ksettings_set(phydev, cmd);
1534
1535         if (!cmd->base.autoneg) {
1536                 /* force link down */
1537                 temp = phy_read(phydev, MII_BMCR);
1538                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1539                 mdelay(1);
1540                 phy_write(phydev, MII_BMCR, temp);
1541         }
1542
1543         usb_autopm_put_interface(dev->intf);
1544
1545         return ret;
1546 }
1547
1548 static void lan78xx_get_pause(struct net_device *net,
1549                               struct ethtool_pauseparam *pause)
1550 {
1551         struct lan78xx_net *dev = netdev_priv(net);
1552         struct phy_device *phydev = net->phydev;
1553         struct ethtool_link_ksettings ecmd;
1554
1555         phy_ethtool_ksettings_get(phydev, &ecmd);
1556
1557         pause->autoneg = dev->fc_autoneg;
1558
1559         if (dev->fc_request_control & FLOW_CTRL_TX)
1560                 pause->tx_pause = 1;
1561
1562         if (dev->fc_request_control & FLOW_CTRL_RX)
1563                 pause->rx_pause = 1;
1564 }
1565
1566 static int lan78xx_set_pause(struct net_device *net,
1567                              struct ethtool_pauseparam *pause)
1568 {
1569         struct lan78xx_net *dev = netdev_priv(net);
1570         struct phy_device *phydev = net->phydev;
1571         struct ethtool_link_ksettings ecmd;
1572         int ret;
1573
1574         phy_ethtool_ksettings_get(phydev, &ecmd);
1575
1576         if (pause->autoneg && !ecmd.base.autoneg) {
1577                 ret = -EINVAL;
1578                 goto exit;
1579         }
1580
1581         dev->fc_request_control = 0;
1582         if (pause->rx_pause)
1583                 dev->fc_request_control |= FLOW_CTRL_RX;
1584
1585         if (pause->tx_pause)
1586                 dev->fc_request_control |= FLOW_CTRL_TX;
1587
1588         if (ecmd.base.autoneg) {
1589                 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1590                 u32 mii_adv;
1591
1592                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1593                                    ecmd.link_modes.advertising);
1594                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1595                                    ecmd.link_modes.advertising);
1596                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1597                 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1598                 linkmode_or(ecmd.link_modes.advertising, fc,
1599                             ecmd.link_modes.advertising);
1600
1601                 phy_ethtool_ksettings_set(phydev, &ecmd);
1602         }
1603
1604         dev->fc_autoneg = pause->autoneg;
1605
1606         ret = 0;
1607 exit:
1608         return ret;
1609 }
1610
1611 static int lan78xx_get_regs_len(struct net_device *netdev)
1612 {
1613         if (!netdev->phydev)
1614                 return (sizeof(lan78xx_regs));
1615         else
1616                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1617 }
1618
1619 static void
1620 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1621                  void *buf)
1622 {
1623         u32 *data = buf;
1624         int i, j;
1625         struct lan78xx_net *dev = netdev_priv(netdev);
1626
1627         /* Read Device/MAC registers */
1628         for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1629                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1630
1631         if (!netdev->phydev)
1632                 return;
1633
1634         /* Read PHY registers */
1635         for (j = 0; j < 32; i++, j++)
1636                 data[i] = phy_read(netdev->phydev, j);
1637 }
1638
1639 static const struct ethtool_ops lan78xx_ethtool_ops = {
1640         .get_link       = lan78xx_get_link,
1641         .nway_reset     = phy_ethtool_nway_reset,
1642         .get_drvinfo    = lan78xx_get_drvinfo,
1643         .get_msglevel   = lan78xx_get_msglevel,
1644         .set_msglevel   = lan78xx_set_msglevel,
1645         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1646         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1647         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1648         .get_ethtool_stats = lan78xx_get_stats,
1649         .get_sset_count = lan78xx_get_sset_count,
1650         .get_strings    = lan78xx_get_strings,
1651         .get_wol        = lan78xx_get_wol,
1652         .set_wol        = lan78xx_set_wol,
1653         .get_eee        = lan78xx_get_eee,
1654         .set_eee        = lan78xx_set_eee,
1655         .get_pauseparam = lan78xx_get_pause,
1656         .set_pauseparam = lan78xx_set_pause,
1657         .get_link_ksettings = lan78xx_get_link_ksettings,
1658         .set_link_ksettings = lan78xx_set_link_ksettings,
1659         .get_regs_len   = lan78xx_get_regs_len,
1660         .get_regs       = lan78xx_get_regs,
1661 };
1662
1663 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1664 {
1665         if (!netif_running(netdev))
1666                 return -EINVAL;
1667
1668         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1669 }
1670
1671 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1672 {
1673         u32 addr_lo, addr_hi;
1674         int ret;
1675         u8 addr[6];
1676
1677         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1678         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1679
1680         addr[0] = addr_lo & 0xFF;
1681         addr[1] = (addr_lo >> 8) & 0xFF;
1682         addr[2] = (addr_lo >> 16) & 0xFF;
1683         addr[3] = (addr_lo >> 24) & 0xFF;
1684         addr[4] = addr_hi & 0xFF;
1685         addr[5] = (addr_hi >> 8) & 0xFF;
1686
1687         if (!is_valid_ether_addr(addr)) {
1688                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1689                         /* valid address present in Device Tree */
1690                         netif_dbg(dev, ifup, dev->net,
1691                                   "MAC address read from Device Tree");
1692                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1693                                                  ETH_ALEN, addr) == 0) ||
1694                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1695                                               ETH_ALEN, addr) == 0)) &&
1696                            is_valid_ether_addr(addr)) {
1697                         /* eeprom values are valid so use them */
1698                         netif_dbg(dev, ifup, dev->net,
1699                                   "MAC address read from EEPROM");
1700                 } else {
1701                         /* generate random MAC */
1702                         eth_random_addr(addr);
1703                         netif_dbg(dev, ifup, dev->net,
1704                                   "MAC address set to random addr");
1705                 }
1706
1707                 addr_lo = addr[0] | (addr[1] << 8) |
1708                           (addr[2] << 16) | (addr[3] << 24);
1709                 addr_hi = addr[4] | (addr[5] << 8);
1710
1711                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1712                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1713         }
1714
1715         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1716         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1717
1718         ether_addr_copy(dev->net->dev_addr, addr);
1719 }
1720
1721 /* MDIO read and write wrappers for phylib */
1722 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1723 {
1724         struct lan78xx_net *dev = bus->priv;
1725         u32 val, addr;
1726         int ret;
1727
1728         ret = usb_autopm_get_interface(dev->intf);
1729         if (ret < 0)
1730                 return ret;
1731
1732         mutex_lock(&dev->phy_mutex);
1733
1734         /* confirm MII not busy */
1735         ret = lan78xx_phy_wait_not_busy(dev);
1736         if (ret < 0)
1737                 goto done;
1738
1739         /* set the address, index & direction (read from PHY) */
1740         addr = mii_access(phy_id, idx, MII_READ);
1741         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1742
1743         ret = lan78xx_phy_wait_not_busy(dev);
1744         if (ret < 0)
1745                 goto done;
1746
1747         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1748
1749         ret = (int)(val & 0xFFFF);
1750
1751 done:
1752         mutex_unlock(&dev->phy_mutex);
1753         usb_autopm_put_interface(dev->intf);
1754
1755         return ret;
1756 }
1757
1758 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1759                                  u16 regval)
1760 {
1761         struct lan78xx_net *dev = bus->priv;
1762         u32 val, addr;
1763         int ret;
1764
1765         ret = usb_autopm_get_interface(dev->intf);
1766         if (ret < 0)
1767                 return ret;
1768
1769         mutex_lock(&dev->phy_mutex);
1770
1771         /* confirm MII not busy */
1772         ret = lan78xx_phy_wait_not_busy(dev);
1773         if (ret < 0)
1774                 goto done;
1775
1776         val = (u32)regval;
1777         ret = lan78xx_write_reg(dev, MII_DATA, val);
1778
1779         /* set the address, index & direction (write to PHY) */
1780         addr = mii_access(phy_id, idx, MII_WRITE);
1781         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1782
1783         ret = lan78xx_phy_wait_not_busy(dev);
1784         if (ret < 0)
1785                 goto done;
1786
1787 done:
1788         mutex_unlock(&dev->phy_mutex);
1789         usb_autopm_put_interface(dev->intf);
1790         return 0;
1791 }
1792
1793 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1794 {
1795         struct device_node *node;
1796         int ret;
1797
1798         dev->mdiobus = mdiobus_alloc();
1799         if (!dev->mdiobus) {
1800                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1801                 return -ENOMEM;
1802         }
1803
1804         dev->mdiobus->priv = (void *)dev;
1805         dev->mdiobus->read = lan78xx_mdiobus_read;
1806         dev->mdiobus->write = lan78xx_mdiobus_write;
1807         dev->mdiobus->name = "lan78xx-mdiobus";
1808
1809         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1810                  dev->udev->bus->busnum, dev->udev->devnum);
1811
1812         switch (dev->chipid) {
1813         case ID_REV_CHIP_ID_7800_:
1814         case ID_REV_CHIP_ID_7850_:
1815                 /* set to internal PHY id */
1816                 dev->mdiobus->phy_mask = ~(1 << 1);
1817                 break;
1818         case ID_REV_CHIP_ID_7801_:
1819                 /* scan thru PHYAD[2..0] */
1820                 dev->mdiobus->phy_mask = ~(0xFF);
1821                 break;
1822         }
1823
1824         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1825         ret = of_mdiobus_register(dev->mdiobus, node);
1826         of_node_put(node);
1827         if (ret) {
1828                 netdev_err(dev->net, "can't register MDIO bus\n");
1829                 goto exit1;
1830         }
1831
1832         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1833         return 0;
1834 exit1:
1835         mdiobus_free(dev->mdiobus);
1836         return ret;
1837 }
1838
1839 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1840 {
1841         mdiobus_unregister(dev->mdiobus);
1842         mdiobus_free(dev->mdiobus);
1843 }
1844
1845 static void lan78xx_link_status_change(struct net_device *net)
1846 {
1847         struct phy_device *phydev = net->phydev;
1848         int ret, temp;
1849
1850         /* At forced 100 F/H mode, chip may fail to set mode correctly
1851          * when cable is switched between long(~50+m) and short one.
1852          * As workaround, set to 10 before setting to 100
1853          * at forced 100 F/H mode.
1854          */
1855         if (!phydev->autoneg && (phydev->speed == 100)) {
1856                 /* disable phy interrupt */
1857                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1858                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1859                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1860
1861                 temp = phy_read(phydev, MII_BMCR);
1862                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1863                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1864                 temp |= BMCR_SPEED100;
1865                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1866
1867                 /* clear pending interrupt generated while workaround */
1868                 temp = phy_read(phydev, LAN88XX_INT_STS);
1869
1870                 /* enable phy interrupt back */
1871                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1872                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1873                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1874         }
1875 }
1876
1877 static int irq_map(struct irq_domain *d, unsigned int irq,
1878                    irq_hw_number_t hwirq)
1879 {
1880         struct irq_domain_data *data = d->host_data;
1881
1882         irq_set_chip_data(irq, data);
1883         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1884         irq_set_noprobe(irq);
1885
1886         return 0;
1887 }
1888
1889 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1890 {
1891         irq_set_chip_and_handler(irq, NULL, NULL);
1892         irq_set_chip_data(irq, NULL);
1893 }
1894
1895 static const struct irq_domain_ops chip_domain_ops = {
1896         .map    = irq_map,
1897         .unmap  = irq_unmap,
1898 };
1899
1900 static void lan78xx_irq_mask(struct irq_data *irqd)
1901 {
1902         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1903
1904         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1905 }
1906
1907 static void lan78xx_irq_unmask(struct irq_data *irqd)
1908 {
1909         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1910
1911         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1912 }
1913
1914 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1915 {
1916         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1917
1918         mutex_lock(&data->irq_lock);
1919 }
1920
1921 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1922 {
1923         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1924         struct lan78xx_net *dev =
1925                         container_of(data, struct lan78xx_net, domain_data);
1926         u32 buf;
1927         int ret;
1928
1929         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1930          * are only two callbacks executed in non-atomic contex.
1931          */
1932         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1933         if (buf != data->irqenable)
1934                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1935
1936         mutex_unlock(&data->irq_lock);
1937 }
1938
1939 static struct irq_chip lan78xx_irqchip = {
1940         .name                   = "lan78xx-irqs",
1941         .irq_mask               = lan78xx_irq_mask,
1942         .irq_unmask             = lan78xx_irq_unmask,
1943         .irq_bus_lock           = lan78xx_irq_bus_lock,
1944         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1945 };
1946
1947 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1948 {
1949         struct device_node *of_node;
1950         struct irq_domain *irqdomain;
1951         unsigned int irqmap = 0;
1952         u32 buf;
1953         int ret = 0;
1954
1955         of_node = dev->udev->dev.parent->of_node;
1956
1957         mutex_init(&dev->domain_data.irq_lock);
1958
1959         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1960         dev->domain_data.irqenable = buf;
1961
1962         dev->domain_data.irqchip = &lan78xx_irqchip;
1963         dev->domain_data.irq_handler = handle_simple_irq;
1964
1965         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1966                                           &chip_domain_ops, &dev->domain_data);
1967         if (irqdomain) {
1968                 /* create mapping for PHY interrupt */
1969                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1970                 if (!irqmap) {
1971                         irq_domain_remove(irqdomain);
1972
1973                         irqdomain = NULL;
1974                         ret = -EINVAL;
1975                 }
1976         } else {
1977                 ret = -EINVAL;
1978         }
1979
1980         dev->domain_data.irqdomain = irqdomain;
1981         dev->domain_data.phyirq = irqmap;
1982
1983         return ret;
1984 }
1985
1986 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1987 {
1988         if (dev->domain_data.phyirq > 0) {
1989                 irq_dispose_mapping(dev->domain_data.phyirq);
1990
1991                 if (dev->domain_data.irqdomain)
1992                         irq_domain_remove(dev->domain_data.irqdomain);
1993         }
1994         dev->domain_data.phyirq = 0;
1995         dev->domain_data.irqdomain = NULL;
1996 }
1997
1998 static int lan8835_fixup(struct phy_device *phydev)
1999 {
2000         int buf;
2001         int ret;
2002         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2003
2004         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2005         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2006         buf &= ~0x1800;
2007         buf |= 0x0800;
2008         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2009
2010         /* RGMII MAC TXC Delay Enable */
2011         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2012                                 MAC_RGMII_ID_TXC_DELAY_EN_);
2013
2014         /* RGMII TX DLL Tune Adjust */
2015         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2016
2017         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2018
2019         return 1;
2020 }
2021
2022 static int ksz9031rnx_fixup(struct phy_device *phydev)
2023 {
2024         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2025
2026         /* Micrel9301RNX PHY configuration */
2027         /* RGMII Control Signal Pad Skew */
2028         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2029         /* RGMII RX Data Pad Skew */
2030         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2031         /* RGMII RX Clock Pad Skew */
2032         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2033
2034         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2035
2036         return 1;
2037 }
2038
2039 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2040 {
2041         u32 buf;
2042         int ret;
2043         struct fixed_phy_status fphy_status = {
2044                 .link = 1,
2045                 .speed = SPEED_1000,
2046                 .duplex = DUPLEX_FULL,
2047         };
2048         struct phy_device *phydev;
2049
2050         phydev = phy_find_first(dev->mdiobus);
2051         if (!phydev) {
2052                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2053                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2054                 if (IS_ERR(phydev)) {
2055                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2056                         return NULL;
2057                 }
2058                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2059                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2060                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2061                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2062                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2063                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2064                 buf |= HW_CFG_CLK125_EN_;
2065                 buf |= HW_CFG_REFCLK25_EN_;
2066                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2067         } else {
2068                 if (!phydev->drv) {
2069                         netdev_err(dev->net, "no PHY driver found\n");
2070                         return NULL;
2071                 }
2072                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2073                 /* external PHY fixup for KSZ9031RNX */
2074                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2075                                                  ksz9031rnx_fixup);
2076                 if (ret < 0) {
2077                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2078                         return NULL;
2079                 }
2080                 /* external PHY fixup for LAN8835 */
2081                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2082                                                  lan8835_fixup);
2083                 if (ret < 0) {
2084                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2085                         return NULL;
2086                 }
2087                 /* add more external PHY fixup here if needed */
2088
2089                 phydev->is_internal = false;
2090         }
2091         return phydev;
2092 }
2093
2094 static int lan78xx_phy_init(struct lan78xx_net *dev)
2095 {
2096         __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2097         int ret;
2098         u32 mii_adv;
2099         struct phy_device *phydev;
2100
2101         switch (dev->chipid) {
2102         case ID_REV_CHIP_ID_7801_:
2103                 phydev = lan7801_phy_init(dev);
2104                 if (!phydev) {
2105                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2106                         return -EIO;
2107                 }
2108                 break;
2109
2110         case ID_REV_CHIP_ID_7800_:
2111         case ID_REV_CHIP_ID_7850_:
2112                 phydev = phy_find_first(dev->mdiobus);
2113                 if (!phydev) {
2114                         netdev_err(dev->net, "no PHY found\n");
2115                         return -EIO;
2116                 }
2117                 phydev->is_internal = true;
2118                 dev->interface = PHY_INTERFACE_MODE_GMII;
2119                 break;
2120
2121         default:
2122                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2123                 return -EIO;
2124         }
2125
2126         /* if phyirq is not set, use polling mode in phylib */
2127         if (dev->domain_data.phyirq > 0)
2128                 phydev->irq = dev->domain_data.phyirq;
2129         else
2130                 phydev->irq = 0;
2131         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2132
2133         /* set to AUTOMDIX */
2134         phydev->mdix = ETH_TP_MDI_AUTO;
2135
2136         ret = phy_connect_direct(dev->net, phydev,
2137                                  lan78xx_link_status_change,
2138                                  dev->interface);
2139         if (ret) {
2140                 netdev_err(dev->net, "can't attach PHY to %s\n",
2141                            dev->mdiobus->id);
2142                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2143                         if (phy_is_pseudo_fixed_link(phydev)) {
2144                                 fixed_phy_unregister(phydev);
2145                         } else {
2146                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2147                                                              0xfffffff0);
2148                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2149                                                              0xfffffff0);
2150                         }
2151                 }
2152                 return -EIO;
2153         }
2154
2155         /* MAC doesn't support 1000T Half */
2156         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2157
2158         /* support both flow controls */
2159         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2160         linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2161                            phydev->advertising);
2162         linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2163                            phydev->advertising);
2164         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2165         mii_adv_to_linkmode_adv_t(fc, mii_adv);
2166         linkmode_or(phydev->advertising, fc, phydev->advertising);
2167
2168         if (phydev->mdio.dev.of_node) {
2169                 u32 reg;
2170                 int len;
2171
2172                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2173                                                       "microchip,led-modes",
2174                                                       sizeof(u32));
2175                 if (len >= 0) {
2176                         /* Ensure the appropriate LEDs are enabled */
2177                         lan78xx_read_reg(dev, HW_CFG, &reg);
2178                         reg &= ~(HW_CFG_LED0_EN_ |
2179                                  HW_CFG_LED1_EN_ |
2180                                  HW_CFG_LED2_EN_ |
2181                                  HW_CFG_LED3_EN_);
2182                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2183                                 (len > 1) * HW_CFG_LED1_EN_ |
2184                                 (len > 2) * HW_CFG_LED2_EN_ |
2185                                 (len > 3) * HW_CFG_LED3_EN_;
2186                         lan78xx_write_reg(dev, HW_CFG, reg);
2187                 }
2188         }
2189
2190         genphy_config_aneg(phydev);
2191
2192         dev->fc_autoneg = phydev->autoneg;
2193
2194         return 0;
2195 }
2196
2197 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2198 {
2199         int ret = 0;
2200         u32 buf;
2201         bool rxenabled;
2202
2203         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2204
2205         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2206
2207         if (rxenabled) {
2208                 buf &= ~MAC_RX_RXEN_;
2209                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2210         }
2211
2212         /* add 4 to size for FCS */
2213         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2214         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2215
2216         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2217
2218         if (rxenabled) {
2219                 buf |= MAC_RX_RXEN_;
2220                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2221         }
2222
2223         return 0;
2224 }
2225
2226 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2227 {
2228         struct sk_buff *skb;
2229         unsigned long flags;
2230         int count = 0;
2231
2232         spin_lock_irqsave(&q->lock, flags);
2233         while (!skb_queue_empty(q)) {
2234                 struct skb_data *entry;
2235                 struct urb *urb;
2236                 int ret;
2237
2238                 skb_queue_walk(q, skb) {
2239                         entry = (struct skb_data *)skb->cb;
2240                         if (entry->state != unlink_start)
2241                                 goto found;
2242                 }
2243                 break;
2244 found:
2245                 entry->state = unlink_start;
2246                 urb = entry->urb;
2247
2248                 /* Get reference count of the URB to avoid it to be
2249                  * freed during usb_unlink_urb, which may trigger
2250                  * use-after-free problem inside usb_unlink_urb since
2251                  * usb_unlink_urb is always racing with .complete
2252                  * handler(include defer_bh).
2253                  */
2254                 usb_get_urb(urb);
2255                 spin_unlock_irqrestore(&q->lock, flags);
2256                 /* during some PM-driven resume scenarios,
2257                  * these (async) unlinks complete immediately
2258                  */
2259                 ret = usb_unlink_urb(urb);
2260                 if (ret != -EINPROGRESS && ret != 0)
2261                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2262                 else
2263                         count++;
2264                 usb_put_urb(urb);
2265                 spin_lock_irqsave(&q->lock, flags);
2266         }
2267         spin_unlock_irqrestore(&q->lock, flags);
2268         return count;
2269 }
2270
2271 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2272 {
2273         struct lan78xx_net *dev = netdev_priv(netdev);
2274         int ll_mtu = new_mtu + netdev->hard_header_len;
2275         int old_hard_mtu = dev->hard_mtu;
2276         int old_rx_urb_size = dev->rx_urb_size;
2277         int ret;
2278
2279         /* no second zero-length packet read wanted after mtu-sized packets */
2280         if ((ll_mtu % dev->maxpacket) == 0)
2281                 return -EDOM;
2282
2283         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2284
2285         netdev->mtu = new_mtu;
2286
2287         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2288         if (dev->rx_urb_size == old_hard_mtu) {
2289                 dev->rx_urb_size = dev->hard_mtu;
2290                 if (dev->rx_urb_size > old_rx_urb_size) {
2291                         if (netif_running(dev->net)) {
2292                                 unlink_urbs(dev, &dev->rxq);
2293                                 tasklet_schedule(&dev->bh);
2294                         }
2295                 }
2296         }
2297
2298         return 0;
2299 }
2300
2301 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2302 {
2303         struct lan78xx_net *dev = netdev_priv(netdev);
2304         struct sockaddr *addr = p;
2305         u32 addr_lo, addr_hi;
2306         int ret;
2307
2308         if (netif_running(netdev))
2309                 return -EBUSY;
2310
2311         if (!is_valid_ether_addr(addr->sa_data))
2312                 return -EADDRNOTAVAIL;
2313
2314         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2315
2316         addr_lo = netdev->dev_addr[0] |
2317                   netdev->dev_addr[1] << 8 |
2318                   netdev->dev_addr[2] << 16 |
2319                   netdev->dev_addr[3] << 24;
2320         addr_hi = netdev->dev_addr[4] |
2321                   netdev->dev_addr[5] << 8;
2322
2323         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2324         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2325
2326         /* Added to support MAC address changes */
2327         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2328         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2329
2330         return 0;
2331 }
2332
2333 /* Enable or disable Rx checksum offload engine */
2334 static int lan78xx_set_features(struct net_device *netdev,
2335                                 netdev_features_t features)
2336 {
2337         struct lan78xx_net *dev = netdev_priv(netdev);
2338         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2339         unsigned long flags;
2340         int ret;
2341
2342         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2343
2344         if (features & NETIF_F_RXCSUM) {
2345                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2346                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2347         } else {
2348                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2349                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2350         }
2351
2352         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2353                 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2354         else
2355                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2356
2357         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2358                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2359         else
2360                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2361
2362         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2363
2364         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2365
2366         return 0;
2367 }
2368
2369 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2370 {
2371         struct lan78xx_priv *pdata =
2372                         container_of(param, struct lan78xx_priv, set_vlan);
2373         struct lan78xx_net *dev = pdata->dev;
2374
2375         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2376                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2377 }
2378
2379 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2380                                    __be16 proto, u16 vid)
2381 {
2382         struct lan78xx_net *dev = netdev_priv(netdev);
2383         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2384         u16 vid_bit_index;
2385         u16 vid_dword_index;
2386
2387         vid_dword_index = (vid >> 5) & 0x7F;
2388         vid_bit_index = vid & 0x1F;
2389
2390         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2391
2392         /* defer register writes to a sleepable context */
2393         schedule_work(&pdata->set_vlan);
2394
2395         return 0;
2396 }
2397
2398 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2399                                     __be16 proto, u16 vid)
2400 {
2401         struct lan78xx_net *dev = netdev_priv(netdev);
2402         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2403         u16 vid_bit_index;
2404         u16 vid_dword_index;
2405
2406         vid_dword_index = (vid >> 5) & 0x7F;
2407         vid_bit_index = vid & 0x1F;
2408
2409         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2410
2411         /* defer register writes to a sleepable context */
2412         schedule_work(&pdata->set_vlan);
2413
2414         return 0;
2415 }
2416
2417 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2418 {
2419         int ret;
2420         u32 buf;
2421         u32 regs[6] = { 0 };
2422
2423         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2424         if (buf & USB_CFG1_LTM_ENABLE_) {
2425                 u8 temp[2];
2426                 /* Get values from EEPROM first */
2427                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2428                         if (temp[0] == 24) {
2429                                 ret = lan78xx_read_raw_eeprom(dev,
2430                                                               temp[1] * 2,
2431                                                               24,
2432                                                               (u8 *)regs);
2433                                 if (ret < 0)
2434                                         return;
2435                         }
2436                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2437                         if (temp[0] == 24) {
2438                                 ret = lan78xx_read_raw_otp(dev,
2439                                                            temp[1] * 2,
2440                                                            24,
2441                                                            (u8 *)regs);
2442                                 if (ret < 0)
2443                                         return;
2444                         }
2445                 }
2446         }
2447
2448         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2449         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2450         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2451         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2452         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2453         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2454 }
2455
2456 static int lan78xx_reset(struct lan78xx_net *dev)
2457 {
2458         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2459         u32 buf;
2460         int ret = 0;
2461         unsigned long timeout;
2462         u8 sig;
2463
2464         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2465         buf |= HW_CFG_LRST_;
2466         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2467
2468         timeout = jiffies + HZ;
2469         do {
2470                 mdelay(1);
2471                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2472                 if (time_after(jiffies, timeout)) {
2473                         netdev_warn(dev->net,
2474                                     "timeout on completion of LiteReset");
2475                         return -EIO;
2476                 }
2477         } while (buf & HW_CFG_LRST_);
2478
2479         lan78xx_init_mac_address(dev);
2480
2481         /* save DEVID for later usage */
2482         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2483         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2484         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2485
2486         /* Respond to the IN token with a NAK */
2487         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2488         buf |= USB_CFG_BIR_;
2489         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2490
2491         /* Init LTM */
2492         lan78xx_init_ltm(dev);
2493
2494         if (dev->udev->speed == USB_SPEED_SUPER) {
2495                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2496                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2497                 dev->rx_qlen = 4;
2498                 dev->tx_qlen = 4;
2499         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2500                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2501                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2502                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2503                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2504         } else {
2505                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2506                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2507                 dev->rx_qlen = 4;
2508                 dev->tx_qlen = 4;
2509         }
2510
2511         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2512         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2513
2514         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2515         buf |= HW_CFG_MEF_;
2516         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2517
2518         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2519         buf |= USB_CFG_BCE_;
2520         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2521
2522         /* set FIFO sizes */
2523         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2524         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2525
2526         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2527         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2528
2529         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2530         ret = lan78xx_write_reg(dev, FLOW, 0);
2531         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2532
2533         /* Don't need rfe_ctl_lock during initialisation */
2534         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2535         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2536         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2537
2538         /* Enable or disable checksum offload engines */
2539         lan78xx_set_features(dev->net, dev->net->features);
2540
2541         lan78xx_set_multicast(dev->net);
2542
2543         /* reset PHY */
2544         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2545         buf |= PMT_CTL_PHY_RST_;
2546         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2547
2548         timeout = jiffies + HZ;
2549         do {
2550                 mdelay(1);
2551                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2552                 if (time_after(jiffies, timeout)) {
2553                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2554                         return -EIO;
2555                 }
2556         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2557
2558         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2559         /* LAN7801 only has RGMII mode */
2560         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2561                 buf &= ~MAC_CR_GMII_EN_;
2562
2563         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2564                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2565                 if (!ret && sig != EEPROM_INDICATOR) {
2566                         /* Implies there is no external eeprom. Set mac speed */
2567                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2568                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2569                 }
2570         }
2571         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2572
2573         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2574         buf |= MAC_TX_TXEN_;
2575         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2576
2577         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2578         buf |= FCT_TX_CTL_EN_;
2579         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2580
2581         ret = lan78xx_set_rx_max_frame_length(dev,
2582                                               dev->net->mtu + VLAN_ETH_HLEN);
2583
2584         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2585         buf |= MAC_RX_RXEN_;
2586         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2587
2588         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2589         buf |= FCT_RX_CTL_EN_;
2590         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2591
2592         return 0;
2593 }
2594
2595 static void lan78xx_init_stats(struct lan78xx_net *dev)
2596 {
2597         u32 *p;
2598         int i;
2599
2600         /* initialize for stats update
2601          * some counters are 20bits and some are 32bits
2602          */
2603         p = (u32 *)&dev->stats.rollover_max;
2604         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2605                 p[i] = 0xFFFFF;
2606
2607         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2608         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2609         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2610         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2611         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2612         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2613         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2614         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2615         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2616         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2617
2618         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2619 }
2620
2621 static int lan78xx_open(struct net_device *net)
2622 {
2623         struct lan78xx_net *dev = netdev_priv(net);
2624         int ret;
2625
2626         ret = usb_autopm_get_interface(dev->intf);
2627         if (ret < 0)
2628                 goto out;
2629
2630         phy_start(net->phydev);
2631
2632         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2633
2634         /* for Link Check */
2635         if (dev->urb_intr) {
2636                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2637                 if (ret < 0) {
2638                         netif_err(dev, ifup, dev->net,
2639                                   "intr submit %d\n", ret);
2640                         goto done;
2641                 }
2642         }
2643
2644         lan78xx_init_stats(dev);
2645
2646         set_bit(EVENT_DEV_OPEN, &dev->flags);
2647
2648         netif_start_queue(net);
2649
2650         dev->link_on = false;
2651
2652         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2653 done:
2654         usb_autopm_put_interface(dev->intf);
2655
2656 out:
2657         return ret;
2658 }
2659
2660 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2661 {
2662         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2663         DECLARE_WAITQUEUE(wait, current);
2664         int temp;
2665
2666         /* ensure there are no more active urbs */
2667         add_wait_queue(&unlink_wakeup, &wait);
2668         set_current_state(TASK_UNINTERRUPTIBLE);
2669         dev->wait = &unlink_wakeup;
2670         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2671
2672         /* maybe wait for deletions to finish. */
2673         while (!skb_queue_empty(&dev->rxq) &&
2674                !skb_queue_empty(&dev->txq) &&
2675                !skb_queue_empty(&dev->done)) {
2676                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2677                 set_current_state(TASK_UNINTERRUPTIBLE);
2678                 netif_dbg(dev, ifdown, dev->net,
2679                           "waited for %d urb completions\n", temp);
2680         }
2681         set_current_state(TASK_RUNNING);
2682         dev->wait = NULL;
2683         remove_wait_queue(&unlink_wakeup, &wait);
2684 }
2685
2686 static int lan78xx_stop(struct net_device *net)
2687 {
2688         struct lan78xx_net *dev = netdev_priv(net);
2689
2690         if (timer_pending(&dev->stat_monitor))
2691                 del_timer_sync(&dev->stat_monitor);
2692
2693         if (net->phydev)
2694                 phy_stop(net->phydev);
2695
2696         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2697         netif_stop_queue(net);
2698
2699         netif_info(dev, ifdown, dev->net,
2700                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2701                    net->stats.rx_packets, net->stats.tx_packets,
2702                    net->stats.rx_errors, net->stats.tx_errors);
2703
2704         lan78xx_terminate_urbs(dev);
2705
2706         usb_kill_urb(dev->urb_intr);
2707
2708         skb_queue_purge(&dev->rxq_pause);
2709
2710         /* deferred work (task, timer, softirq) must also stop.
2711          * can't flush_scheduled_work() until we drop rtnl (later),
2712          * else workers could deadlock; so make workers a NOP.
2713          */
2714         dev->flags = 0;
2715         cancel_delayed_work_sync(&dev->wq);
2716         tasklet_kill(&dev->bh);
2717
2718         usb_autopm_put_interface(dev->intf);
2719
2720         return 0;
2721 }
2722
2723 static int lan78xx_linearize(struct sk_buff *skb)
2724 {
2725         return skb_linearize(skb);
2726 }
2727
2728 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2729                                        struct sk_buff *skb, gfp_t flags)
2730 {
2731         u32 tx_cmd_a, tx_cmd_b;
2732         void *ptr;
2733
2734         if (skb_cow_head(skb, TX_OVERHEAD)) {
2735                 dev_kfree_skb_any(skb);
2736                 return NULL;
2737         }
2738
2739         if (lan78xx_linearize(skb) < 0)
2740                 return NULL;
2741
2742         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2743
2744         if (skb->ip_summed == CHECKSUM_PARTIAL)
2745                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2746
2747         tx_cmd_b = 0;
2748         if (skb_is_gso(skb)) {
2749                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2750
2751                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2752
2753                 tx_cmd_a |= TX_CMD_A_LSO_;
2754         }
2755
2756         if (skb_vlan_tag_present(skb)) {
2757                 tx_cmd_a |= TX_CMD_A_IVTG_;
2758                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2759         }
2760
2761         ptr = skb_push(skb, 8);
2762         put_unaligned_le32(tx_cmd_a, ptr);
2763         put_unaligned_le32(tx_cmd_b, ptr + 4);
2764
2765         return skb;
2766 }
2767
2768 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2769                                struct sk_buff_head *list, enum skb_state state)
2770 {
2771         unsigned long flags;
2772         enum skb_state old_state;
2773         struct skb_data *entry = (struct skb_data *)skb->cb;
2774
2775         spin_lock_irqsave(&list->lock, flags);
2776         old_state = entry->state;
2777         entry->state = state;
2778
2779         __skb_unlink(skb, list);
2780         spin_unlock(&list->lock);
2781         spin_lock(&dev->done.lock);
2782
2783         __skb_queue_tail(&dev->done, skb);
2784         if (skb_queue_len(&dev->done) == 1)
2785                 tasklet_schedule(&dev->bh);
2786         spin_unlock_irqrestore(&dev->done.lock, flags);
2787
2788         return old_state;
2789 }
2790
2791 static void tx_complete(struct urb *urb)
2792 {
2793         struct sk_buff *skb = (struct sk_buff *)urb->context;
2794         struct skb_data *entry = (struct skb_data *)skb->cb;
2795         struct lan78xx_net *dev = entry->dev;
2796
2797         if (urb->status == 0) {
2798                 dev->net->stats.tx_packets += entry->num_of_packet;
2799                 dev->net->stats.tx_bytes += entry->length;
2800         } else {
2801                 dev->net->stats.tx_errors++;
2802
2803                 switch (urb->status) {
2804                 case -EPIPE:
2805                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2806                         break;
2807
2808                 /* software-driven interface shutdown */
2809                 case -ECONNRESET:
2810                 case -ESHUTDOWN:
2811                         break;
2812
2813                 case -EPROTO:
2814                 case -ETIME:
2815                 case -EILSEQ:
2816                         netif_stop_queue(dev->net);
2817                         break;
2818                 default:
2819                         netif_dbg(dev, tx_err, dev->net,
2820                                   "tx err %d\n", entry->urb->status);
2821                         break;
2822                 }
2823         }
2824
2825         usb_autopm_put_interface_async(dev->intf);
2826
2827         defer_bh(dev, skb, &dev->txq, tx_done);
2828 }
2829
2830 static void lan78xx_queue_skb(struct sk_buff_head *list,
2831                               struct sk_buff *newsk, enum skb_state state)
2832 {
2833         struct skb_data *entry = (struct skb_data *)newsk->cb;
2834
2835         __skb_queue_tail(list, newsk);
2836         entry->state = state;
2837 }
2838
2839 static netdev_tx_t
2840 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2841 {
2842         struct lan78xx_net *dev = netdev_priv(net);
2843         struct sk_buff *skb2 = NULL;
2844
2845         if (skb) {
2846                 skb_tx_timestamp(skb);
2847                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2848         }
2849
2850         if (skb2) {
2851                 skb_queue_tail(&dev->txq_pend, skb2);
2852
2853                 /* throttle TX patch at slower than SUPER SPEED USB */
2854                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2855                     (skb_queue_len(&dev->txq_pend) > 10))
2856                         netif_stop_queue(net);
2857         } else {
2858                 netif_dbg(dev, tx_err, dev->net,
2859                           "lan78xx_tx_prep return NULL\n");
2860                 dev->net->stats.tx_errors++;
2861                 dev->net->stats.tx_dropped++;
2862         }
2863
2864         tasklet_schedule(&dev->bh);
2865
2866         return NETDEV_TX_OK;
2867 }
2868
2869 static int
2870 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2871 {
2872         int tmp;
2873         struct usb_host_interface *alt = NULL;
2874         struct usb_host_endpoint *in = NULL, *out = NULL;
2875         struct usb_host_endpoint *status = NULL;
2876
2877         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2878                 unsigned ep;
2879
2880                 in = NULL;
2881                 out = NULL;
2882                 status = NULL;
2883                 alt = intf->altsetting + tmp;
2884
2885                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2886                         struct usb_host_endpoint *e;
2887                         int intr = 0;
2888
2889                         e = alt->endpoint + ep;
2890                         switch (e->desc.bmAttributes) {
2891                         case USB_ENDPOINT_XFER_INT:
2892                                 if (!usb_endpoint_dir_in(&e->desc))
2893                                         continue;
2894                                 intr = 1;
2895                                 /* FALLTHROUGH */
2896                         case USB_ENDPOINT_XFER_BULK:
2897                                 break;
2898                         default:
2899                                 continue;
2900                         }
2901                         if (usb_endpoint_dir_in(&e->desc)) {
2902                                 if (!intr && !in)
2903                                         in = e;
2904                                 else if (intr && !status)
2905                                         status = e;
2906                         } else {
2907                                 if (!out)
2908                                         out = e;
2909                         }
2910                 }
2911                 if (in && out)
2912                         break;
2913         }
2914         if (!alt || !in || !out)
2915                 return -EINVAL;
2916
2917         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2918                                        in->desc.bEndpointAddress &
2919                                        USB_ENDPOINT_NUMBER_MASK);
2920         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2921                                         out->desc.bEndpointAddress &
2922                                         USB_ENDPOINT_NUMBER_MASK);
2923         dev->ep_intr = status;
2924
2925         return 0;
2926 }
2927
2928 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2929 {
2930         struct lan78xx_priv *pdata = NULL;
2931         int ret;
2932         int i;
2933
2934         ret = lan78xx_get_endpoints(dev, intf);
2935         if (ret) {
2936                 netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2937                             ret);
2938                 return ret;
2939         }
2940
2941         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2942
2943         pdata = (struct lan78xx_priv *)(dev->data[0]);
2944         if (!pdata) {
2945                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2946                 return -ENOMEM;
2947         }
2948
2949         pdata->dev = dev;
2950
2951         spin_lock_init(&pdata->rfe_ctl_lock);
2952         mutex_init(&pdata->dataport_mutex);
2953
2954         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2955
2956         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2957                 pdata->vlan_table[i] = 0;
2958
2959         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2960
2961         dev->net->features = 0;
2962
2963         if (DEFAULT_TX_CSUM_ENABLE)
2964                 dev->net->features |= NETIF_F_HW_CSUM;
2965
2966         if (DEFAULT_RX_CSUM_ENABLE)
2967                 dev->net->features |= NETIF_F_RXCSUM;
2968
2969         if (DEFAULT_TSO_CSUM_ENABLE)
2970                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2971
2972         if (DEFAULT_VLAN_RX_OFFLOAD)
2973                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2974
2975         if (DEFAULT_VLAN_FILTER_ENABLE)
2976                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2977
2978         dev->net->hw_features = dev->net->features;
2979
2980         ret = lan78xx_setup_irq_domain(dev);
2981         if (ret < 0) {
2982                 netdev_warn(dev->net,
2983                             "lan78xx_setup_irq_domain() failed : %d", ret);
2984                 goto out1;
2985         }
2986
2987         dev->net->hard_header_len += TX_OVERHEAD;
2988         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2989
2990         /* Init all registers */
2991         ret = lan78xx_reset(dev);
2992         if (ret) {
2993                 netdev_warn(dev->net, "Registers INIT FAILED....");
2994                 goto out2;
2995         }
2996
2997         ret = lan78xx_mdio_init(dev);
2998         if (ret) {
2999                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3000                 goto out2;
3001         }
3002
3003         dev->net->flags |= IFF_MULTICAST;
3004
3005         pdata->wol = WAKE_MAGIC;
3006
3007         return ret;
3008
3009 out2:
3010         lan78xx_remove_irq_domain(dev);
3011
3012 out1:
3013         netdev_warn(dev->net, "Bind routine FAILED");
3014         cancel_work_sync(&pdata->set_multicast);
3015         cancel_work_sync(&pdata->set_vlan);
3016         kfree(pdata);
3017         return ret;
3018 }
3019
3020 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3021 {
3022         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3023
3024         lan78xx_remove_irq_domain(dev);
3025
3026         lan78xx_remove_mdio(dev);
3027
3028         if (pdata) {
3029                 cancel_work_sync(&pdata->set_multicast);
3030                 cancel_work_sync(&pdata->set_vlan);
3031                 netif_dbg(dev, ifdown, dev->net, "free pdata");
3032                 kfree(pdata);
3033                 pdata = NULL;
3034                 dev->data[0] = 0;
3035         }
3036 }
3037
3038 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3039                                     struct sk_buff *skb,
3040                                     u32 rx_cmd_a, u32 rx_cmd_b)
3041 {
3042         /* HW Checksum offload appears to be flawed if used when not stripping
3043          * VLAN headers. Drop back to S/W checksums under these conditions.
3044          */
3045         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3046             unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3047             ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3048              !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3049                 skb->ip_summed = CHECKSUM_NONE;
3050         } else {
3051                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3052                 skb->ip_summed = CHECKSUM_COMPLETE;
3053         }
3054 }
3055
3056 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3057                                     struct sk_buff *skb,
3058                                     u32 rx_cmd_a, u32 rx_cmd_b)
3059 {
3060         if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3061             (rx_cmd_a & RX_CMD_A_FVTG_))
3062                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3063                                        (rx_cmd_b & 0xffff));
3064 }
3065
3066 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3067 {
3068         int status;
3069
3070         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3071                 skb_queue_tail(&dev->rxq_pause, skb);
3072                 return;
3073         }
3074
3075         dev->net->stats.rx_packets++;
3076         dev->net->stats.rx_bytes += skb->len;
3077
3078         skb->protocol = eth_type_trans(skb, dev->net);
3079
3080         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3081                   skb->len + sizeof(struct ethhdr), skb->protocol);
3082         memset(skb->cb, 0, sizeof(struct skb_data));
3083
3084         if (skb_defer_rx_timestamp(skb))
3085                 return;
3086
3087         status = netif_rx(skb);
3088         if (status != NET_RX_SUCCESS)
3089                 netif_dbg(dev, rx_err, dev->net,
3090                           "netif_rx status %d\n", status);
3091 }
3092
3093 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3094 {
3095         if (skb->len < dev->net->hard_header_len)
3096                 return 0;
3097
3098         while (skb->len > 0) {
3099                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3100                 u16 rx_cmd_c;
3101                 struct sk_buff *skb2;
3102                 unsigned char *packet;
3103
3104                 rx_cmd_a = get_unaligned_le32(skb->data);
3105                 skb_pull(skb, sizeof(rx_cmd_a));
3106
3107                 rx_cmd_b = get_unaligned_le32(skb->data);
3108                 skb_pull(skb, sizeof(rx_cmd_b));
3109
3110                 rx_cmd_c = get_unaligned_le16(skb->data);
3111                 skb_pull(skb, sizeof(rx_cmd_c));
3112
3113                 packet = skb->data;
3114
3115                 /* get the packet length */
3116                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3117                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3118
3119                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3120                         netif_dbg(dev, rx_err, dev->net,
3121                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3122                 } else {
3123                         /* last frame in this batch */
3124                         if (skb->len == size) {
3125                                 lan78xx_rx_csum_offload(dev, skb,
3126                                                         rx_cmd_a, rx_cmd_b);
3127                                 lan78xx_rx_vlan_offload(dev, skb,
3128                                                         rx_cmd_a, rx_cmd_b);
3129
3130                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3131                                 skb->truesize = size + sizeof(struct sk_buff);
3132
3133                                 return 1;
3134                         }
3135
3136                         skb2 = skb_clone(skb, GFP_ATOMIC);
3137                         if (unlikely(!skb2)) {
3138                                 netdev_warn(dev->net, "Error allocating skb");
3139                                 return 0;
3140                         }
3141
3142                         skb2->len = size;
3143                         skb2->data = packet;
3144                         skb_set_tail_pointer(skb2, size);
3145
3146                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3147                         lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3148
3149                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3150                         skb2->truesize = size + sizeof(struct sk_buff);
3151
3152                         lan78xx_skb_return(dev, skb2);
3153                 }
3154
3155                 skb_pull(skb, size);
3156
3157                 /* padding bytes before the next frame starts */
3158                 if (skb->len)
3159                         skb_pull(skb, align_count);
3160         }
3161
3162         return 1;
3163 }
3164
3165 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3166 {
3167         if (!lan78xx_rx(dev, skb)) {
3168                 dev->net->stats.rx_errors++;
3169                 goto done;
3170         }
3171
3172         if (skb->len) {
3173                 lan78xx_skb_return(dev, skb);
3174                 return;
3175         }
3176
3177         netif_dbg(dev, rx_err, dev->net, "drop\n");
3178         dev->net->stats.rx_errors++;
3179 done:
3180         skb_queue_tail(&dev->done, skb);
3181 }
3182
3183 static void rx_complete(struct urb *urb);
3184
3185 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3186 {
3187         struct sk_buff *skb;
3188         struct skb_data *entry;
3189         unsigned long lockflags;
3190         size_t size = dev->rx_urb_size;
3191         int ret = 0;
3192
3193         skb = netdev_alloc_skb_ip_align(dev->net, size);
3194         if (!skb) {
3195                 usb_free_urb(urb);
3196                 return -ENOMEM;
3197         }
3198
3199         entry = (struct skb_data *)skb->cb;
3200         entry->urb = urb;
3201         entry->dev = dev;
3202         entry->length = 0;
3203
3204         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3205                           skb->data, size, rx_complete, skb);
3206
3207         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3208
3209         if (netif_device_present(dev->net) &&
3210             netif_running(dev->net) &&
3211             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3212             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3213                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3214                 switch (ret) {
3215                 case 0:
3216                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3217                         break;
3218                 case -EPIPE:
3219                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3220                         break;
3221                 case -ENODEV:
3222                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3223                         netif_device_detach(dev->net);
3224                         break;
3225                 case -EHOSTUNREACH:
3226                         ret = -ENOLINK;
3227                         break;
3228                 default:
3229                         netif_dbg(dev, rx_err, dev->net,
3230                                   "rx submit, %d\n", ret);
3231                         tasklet_schedule(&dev->bh);
3232                 }
3233         } else {
3234                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3235                 ret = -ENOLINK;
3236         }
3237         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3238         if (ret) {
3239                 dev_kfree_skb_any(skb);
3240                 usb_free_urb(urb);
3241         }
3242         return ret;
3243 }
3244
3245 static void rx_complete(struct urb *urb)
3246 {
3247         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3248         struct skb_data *entry = (struct skb_data *)skb->cb;
3249         struct lan78xx_net *dev = entry->dev;
3250         int urb_status = urb->status;
3251         enum skb_state state;
3252
3253         skb_put(skb, urb->actual_length);
3254         state = rx_done;
3255         entry->urb = NULL;
3256
3257         switch (urb_status) {
3258         case 0:
3259                 if (skb->len < dev->net->hard_header_len) {
3260                         state = rx_cleanup;
3261                         dev->net->stats.rx_errors++;
3262                         dev->net->stats.rx_length_errors++;
3263                         netif_dbg(dev, rx_err, dev->net,
3264                                   "rx length %d\n", skb->len);
3265                 }
3266                 usb_mark_last_busy(dev->udev);
3267                 break;
3268         case -EPIPE:
3269                 dev->net->stats.rx_errors++;
3270                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3271                 /* FALLTHROUGH */
3272         case -ECONNRESET:                               /* async unlink */
3273         case -ESHUTDOWN:                                /* hardware gone */
3274                 netif_dbg(dev, ifdown, dev->net,
3275                           "rx shutdown, code %d\n", urb_status);
3276                 state = rx_cleanup;
3277                 entry->urb = urb;
3278                 urb = NULL;
3279                 break;
3280         case -EPROTO:
3281         case -ETIME:
3282         case -EILSEQ:
3283                 dev->net->stats.rx_errors++;
3284                 state = rx_cleanup;
3285                 entry->urb = urb;
3286                 urb = NULL;
3287                 break;
3288
3289         /* data overrun ... flush fifo? */
3290         case -EOVERFLOW:
3291                 dev->net->stats.rx_over_errors++;
3292                 /* FALLTHROUGH */
3293
3294         default:
3295                 state = rx_cleanup;
3296                 dev->net->stats.rx_errors++;
3297                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3298                 break;
3299         }
3300
3301         state = defer_bh(dev, skb, &dev->rxq, state);
3302
3303         if (urb) {
3304                 if (netif_running(dev->net) &&
3305                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3306                     state != unlink_start) {
3307                         rx_submit(dev, urb, GFP_ATOMIC);
3308                         return;
3309                 }
3310                 usb_free_urb(urb);
3311         }
3312         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3313 }
3314
3315 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3316 {
3317         int length;
3318         struct urb *urb = NULL;
3319         struct skb_data *entry;
3320         unsigned long flags;
3321         struct sk_buff_head *tqp = &dev->txq_pend;
3322         struct sk_buff *skb, *skb2;
3323         int ret;
3324         int count, pos;
3325         int skb_totallen, pkt_cnt;
3326
3327         skb_totallen = 0;
3328         pkt_cnt = 0;
3329         count = 0;
3330         length = 0;
3331         spin_lock_irqsave(&tqp->lock, flags);
3332         skb_queue_walk(tqp, skb) {
3333                 if (skb_is_gso(skb)) {
3334                         if (!skb_queue_is_first(tqp, skb)) {
3335                                 /* handle previous packets first */
3336                                 break;
3337                         }
3338                         count = 1;
3339                         length = skb->len - TX_OVERHEAD;
3340                         __skb_unlink(skb, tqp);
3341                         spin_unlock_irqrestore(&tqp->lock, flags);
3342                         goto gso_skb;
3343                 }
3344
3345                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3346                         break;
3347                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3348                 pkt_cnt++;
3349         }
3350         spin_unlock_irqrestore(&tqp->lock, flags);
3351
3352         /* copy to a single skb */
3353         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3354         if (!skb)
3355                 goto drop;
3356
3357         skb_put(skb, skb_totallen);
3358
3359         for (count = pos = 0; count < pkt_cnt; count++) {
3360                 skb2 = skb_dequeue(tqp);
3361                 if (skb2) {
3362                         length += (skb2->len - TX_OVERHEAD);
3363                         memcpy(skb->data + pos, skb2->data, skb2->len);
3364                         pos += roundup(skb2->len, sizeof(u32));
3365                         dev_kfree_skb(skb2);
3366                 }
3367         }
3368
3369 gso_skb:
3370         urb = usb_alloc_urb(0, GFP_ATOMIC);
3371         if (!urb)
3372                 goto drop;
3373
3374         entry = (struct skb_data *)skb->cb;
3375         entry->urb = urb;
3376         entry->dev = dev;
3377         entry->length = length;
3378         entry->num_of_packet = count;
3379
3380         spin_lock_irqsave(&dev->txq.lock, flags);
3381         ret = usb_autopm_get_interface_async(dev->intf);
3382         if (ret < 0) {
3383                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3384                 goto drop;
3385         }
3386
3387         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3388                           skb->data, skb->len, tx_complete, skb);
3389
3390         if (length % dev->maxpacket == 0) {
3391                 /* send USB_ZERO_PACKET */
3392                 urb->transfer_flags |= URB_ZERO_PACKET;
3393         }
3394
3395 #ifdef CONFIG_PM
3396         /* if this triggers the device is still a sleep */
3397         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3398                 /* transmission will be done in resume */
3399                 usb_anchor_urb(urb, &dev->deferred);
3400                 /* no use to process more packets */
3401                 netif_stop_queue(dev->net);
3402                 usb_put_urb(urb);
3403                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3404                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3405                 return;
3406         }
3407 #endif
3408
3409         ret = usb_submit_urb(urb, GFP_ATOMIC);
3410         switch (ret) {
3411         case 0:
3412                 netif_trans_update(dev->net);
3413                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3414                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3415                         netif_stop_queue(dev->net);
3416                 break;
3417         case -EPIPE:
3418                 netif_stop_queue(dev->net);
3419                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3420                 usb_autopm_put_interface_async(dev->intf);
3421                 break;
3422         default:
3423                 usb_autopm_put_interface_async(dev->intf);
3424                 netif_dbg(dev, tx_err, dev->net,
3425                           "tx: submit urb err %d\n", ret);
3426                 break;
3427         }
3428
3429         spin_unlock_irqrestore(&dev->txq.lock, flags);
3430
3431         if (ret) {
3432                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3433 drop:
3434                 dev->net->stats.tx_dropped++;
3435                 if (skb)
3436                         dev_kfree_skb_any(skb);
3437                 usb_free_urb(urb);
3438         } else
3439                 netif_dbg(dev, tx_queued, dev->net,
3440                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3441 }
3442
3443 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3444 {
3445         struct urb *urb;
3446         int i;
3447
3448         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3449                 for (i = 0; i < 10; i++) {
3450                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3451                                 break;
3452                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3453                         if (urb)
3454                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3455                                         return;
3456                 }
3457
3458                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3459                         tasklet_schedule(&dev->bh);
3460         }
3461         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3462                 netif_wake_queue(dev->net);
3463 }
3464
3465 static void lan78xx_bh(unsigned long param)
3466 {
3467         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3468         struct sk_buff *skb;
3469         struct skb_data *entry;
3470
3471         while ((skb = skb_dequeue(&dev->done))) {
3472                 entry = (struct skb_data *)(skb->cb);
3473                 switch (entry->state) {
3474                 case rx_done:
3475                         entry->state = rx_cleanup;
3476                         rx_process(dev, skb);
3477                         continue;
3478                 case tx_done:
3479                         usb_free_urb(entry->urb);
3480                         dev_kfree_skb(skb);
3481                         continue;
3482                 case rx_cleanup:
3483                         usb_free_urb(entry->urb);
3484                         dev_kfree_skb(skb);
3485                         continue;
3486                 default:
3487                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3488                         return;
3489                 }
3490         }
3491
3492         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3493                 /* reset update timer delta */
3494                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3495                         dev->delta = 1;
3496                         mod_timer(&dev->stat_monitor,
3497                                   jiffies + STAT_UPDATE_TIMER);
3498                 }
3499
3500                 if (!skb_queue_empty(&dev->txq_pend))
3501                         lan78xx_tx_bh(dev);
3502
3503                 if (!timer_pending(&dev->delay) &&
3504                     !test_bit(EVENT_RX_HALT, &dev->flags))
3505                         lan78xx_rx_bh(dev);
3506         }
3507 }
3508
3509 static void lan78xx_delayedwork(struct work_struct *work)
3510 {
3511         int status;
3512         struct lan78xx_net *dev;
3513
3514         dev = container_of(work, struct lan78xx_net, wq.work);
3515
3516         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3517                 unlink_urbs(dev, &dev->txq);
3518                 status = usb_autopm_get_interface(dev->intf);
3519                 if (status < 0)
3520                         goto fail_pipe;
3521                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3522                 usb_autopm_put_interface(dev->intf);
3523                 if (status < 0 &&
3524                     status != -EPIPE &&
3525                     status != -ESHUTDOWN) {
3526                         if (netif_msg_tx_err(dev))
3527 fail_pipe:
3528                                 netdev_err(dev->net,
3529                                            "can't clear tx halt, status %d\n",
3530                                            status);
3531                 } else {
3532                         clear_bit(EVENT_TX_HALT, &dev->flags);
3533                         if (status != -ESHUTDOWN)
3534                                 netif_wake_queue(dev->net);
3535                 }
3536         }
3537         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3538                 unlink_urbs(dev, &dev->rxq);
3539                 status = usb_autopm_get_interface(dev->intf);
3540                 if (status < 0)
3541                                 goto fail_halt;
3542                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3543                 usb_autopm_put_interface(dev->intf);
3544                 if (status < 0 &&
3545                     status != -EPIPE &&
3546                     status != -ESHUTDOWN) {
3547                         if (netif_msg_rx_err(dev))
3548 fail_halt:
3549                                 netdev_err(dev->net,
3550                                            "can't clear rx halt, status %d\n",
3551                                            status);
3552                 } else {
3553                         clear_bit(EVENT_RX_HALT, &dev->flags);
3554                         tasklet_schedule(&dev->bh);
3555                 }
3556         }
3557
3558         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3559                 int ret = 0;
3560
3561                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3562                 status = usb_autopm_get_interface(dev->intf);
3563                 if (status < 0)
3564                         goto skip_reset;
3565                 if (lan78xx_link_reset(dev) < 0) {
3566                         usb_autopm_put_interface(dev->intf);
3567 skip_reset:
3568                         netdev_info(dev->net, "link reset failed (%d)\n",
3569                                     ret);
3570                 } else {
3571                         usb_autopm_put_interface(dev->intf);
3572                 }
3573         }
3574
3575         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3576                 lan78xx_update_stats(dev);
3577
3578                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3579
3580                 mod_timer(&dev->stat_monitor,
3581                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3582
3583                 dev->delta = min((dev->delta * 2), 50);
3584         }
3585 }
3586
3587 static void intr_complete(struct urb *urb)
3588 {
3589         struct lan78xx_net *dev = urb->context;
3590         int status = urb->status;
3591
3592         switch (status) {
3593         /* success */
3594         case 0:
3595                 lan78xx_status(dev, urb);
3596                 break;
3597
3598         /* software-driven interface shutdown */
3599         case -ENOENT:                   /* urb killed */
3600         case -ESHUTDOWN:                /* hardware gone */
3601                 netif_dbg(dev, ifdown, dev->net,
3602                           "intr shutdown, code %d\n", status);
3603                 return;
3604
3605         /* NOTE:  not throttling like RX/TX, since this endpoint
3606          * already polls infrequently
3607          */
3608         default:
3609                 netdev_dbg(dev->net, "intr status %d\n", status);
3610                 break;
3611         }
3612
3613         if (!netif_running(dev->net))
3614                 return;
3615
3616         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3617         status = usb_submit_urb(urb, GFP_ATOMIC);
3618         if (status != 0)
3619                 netif_err(dev, timer, dev->net,
3620                           "intr resubmit --> %d\n", status);
3621 }
3622
3623 static void lan78xx_disconnect(struct usb_interface *intf)
3624 {
3625         struct lan78xx_net *dev;
3626         struct usb_device *udev;
3627         struct net_device *net;
3628         struct phy_device *phydev;
3629
3630         dev = usb_get_intfdata(intf);
3631         usb_set_intfdata(intf, NULL);
3632         if (!dev)
3633                 return;
3634
3635         udev = interface_to_usbdev(intf);
3636         net = dev->net;
3637         phydev = net->phydev;
3638
3639         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3640         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3641
3642         phy_disconnect(net->phydev);
3643
3644         if (phy_is_pseudo_fixed_link(phydev))
3645                 fixed_phy_unregister(phydev);
3646
3647         unregister_netdev(net);
3648
3649         cancel_delayed_work_sync(&dev->wq);
3650
3651         usb_scuttle_anchored_urbs(&dev->deferred);
3652
3653         lan78xx_unbind(dev, intf);
3654
3655         usb_kill_urb(dev->urb_intr);
3656         usb_free_urb(dev->urb_intr);
3657
3658         free_netdev(net);
3659         usb_put_dev(udev);
3660 }
3661
3662 static void lan78xx_tx_timeout(struct net_device *net)
3663 {
3664         struct lan78xx_net *dev = netdev_priv(net);
3665
3666         unlink_urbs(dev, &dev->txq);
3667         tasklet_schedule(&dev->bh);
3668 }
3669
3670 static const struct net_device_ops lan78xx_netdev_ops = {
3671         .ndo_open               = lan78xx_open,
3672         .ndo_stop               = lan78xx_stop,
3673         .ndo_start_xmit         = lan78xx_start_xmit,
3674         .ndo_tx_timeout         = lan78xx_tx_timeout,
3675         .ndo_change_mtu         = lan78xx_change_mtu,
3676         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3677         .ndo_validate_addr      = eth_validate_addr,
3678         .ndo_do_ioctl           = lan78xx_ioctl,
3679         .ndo_set_rx_mode        = lan78xx_set_multicast,
3680         .ndo_set_features       = lan78xx_set_features,
3681         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3682         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3683 };
3684
3685 static void lan78xx_stat_monitor(struct timer_list *t)
3686 {
3687         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3688
3689         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3690 }
3691
3692 static int lan78xx_probe(struct usb_interface *intf,
3693                          const struct usb_device_id *id)
3694 {
3695         struct lan78xx_net *dev;
3696         struct net_device *netdev;
3697         struct usb_device *udev;
3698         int ret;
3699         unsigned maxp;
3700         unsigned period;
3701         u8 *buf = NULL;
3702
3703         udev = interface_to_usbdev(intf);
3704         udev = usb_get_dev(udev);
3705
3706         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3707         if (!netdev) {
3708                 dev_err(&intf->dev, "Error: OOM\n");
3709                 ret = -ENOMEM;
3710                 goto out1;
3711         }
3712
3713         /* netdev_printk() needs this */
3714         SET_NETDEV_DEV(netdev, &intf->dev);
3715
3716         dev = netdev_priv(netdev);
3717         dev->udev = udev;
3718         dev->intf = intf;
3719         dev->net = netdev;
3720         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3721                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3722
3723         skb_queue_head_init(&dev->rxq);
3724         skb_queue_head_init(&dev->txq);
3725         skb_queue_head_init(&dev->done);
3726         skb_queue_head_init(&dev->rxq_pause);
3727         skb_queue_head_init(&dev->txq_pend);
3728         mutex_init(&dev->phy_mutex);
3729
3730         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3731         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3732         init_usb_anchor(&dev->deferred);
3733
3734         netdev->netdev_ops = &lan78xx_netdev_ops;
3735         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3736         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3737
3738         dev->delta = 1;
3739         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3740
3741         mutex_init(&dev->stats.access_lock);
3742
3743         ret = lan78xx_bind(dev, intf);
3744         if (ret < 0)
3745                 goto out2;
3746
3747         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3748                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3749
3750         /* MTU range: 68 - 9000 */
3751         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3752
3753         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3754         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3755         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3756
3757         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3758         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3759
3760         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3761                                         dev->ep_intr->desc.bEndpointAddress &
3762                                         USB_ENDPOINT_NUMBER_MASK);
3763         period = dev->ep_intr->desc.bInterval;
3764
3765         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3766         buf = kmalloc(maxp, GFP_KERNEL);
3767         if (buf) {
3768                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3769                 if (!dev->urb_intr) {
3770                         ret = -ENOMEM;
3771                         kfree(buf);
3772                         goto out3;
3773                 } else {
3774                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3775                                          dev->pipe_intr, buf, maxp,
3776                                          intr_complete, dev, period);
3777                 }
3778         }
3779
3780         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3781
3782         /* driver requires remote-wakeup capability during autosuspend. */
3783         intf->needs_remote_wakeup = 1;
3784
3785         ret = register_netdev(netdev);
3786         if (ret != 0) {
3787                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3788                 goto out3;
3789         }
3790
3791         usb_set_intfdata(intf, dev);
3792
3793         ret = device_set_wakeup_enable(&udev->dev, true);
3794
3795          /* Default delay of 2sec has more overhead than advantage.
3796           * Set to 10sec as default.
3797           */
3798         pm_runtime_set_autosuspend_delay(&udev->dev,
3799                                          DEFAULT_AUTOSUSPEND_DELAY);
3800
3801         ret = lan78xx_phy_init(dev);
3802         if (ret < 0)
3803                 goto out4;
3804
3805         return 0;
3806
3807 out4:
3808         unregister_netdev(netdev);
3809 out3:
3810         lan78xx_unbind(dev, intf);
3811 out2:
3812         free_netdev(netdev);
3813 out1:
3814         usb_put_dev(udev);
3815
3816         return ret;
3817 }
3818
3819 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3820 {
3821         const u16 crc16poly = 0x8005;
3822         int i;
3823         u16 bit, crc, msb;
3824         u8 data;
3825
3826         crc = 0xFFFF;
3827         for (i = 0; i < len; i++) {
3828                 data = *buf++;
3829                 for (bit = 0; bit < 8; bit++) {
3830                         msb = crc >> 15;
3831                         crc <<= 1;
3832
3833                         if (msb ^ (u16)(data & 1)) {
3834                                 crc ^= crc16poly;
3835                                 crc |= (u16)0x0001U;
3836                         }
3837                         data >>= 1;
3838                 }
3839         }
3840
3841         return crc;
3842 }
3843
3844 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3845 {
3846         u32 buf;
3847         int ret;
3848         int mask_index;
3849         u16 crc;
3850         u32 temp_wucsr;
3851         u32 temp_pmt_ctl;
3852         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3853         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3854         const u8 arp_type[2] = { 0x08, 0x06 };
3855
3856         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3857         buf &= ~MAC_TX_TXEN_;
3858         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3859         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3860         buf &= ~MAC_RX_RXEN_;
3861         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3862
3863         ret = lan78xx_write_reg(dev, WUCSR, 0);
3864         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3865         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3866
3867         temp_wucsr = 0;
3868
3869         temp_pmt_ctl = 0;
3870         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3871         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3872         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3873
3874         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3875                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3876
3877         mask_index = 0;
3878         if (wol & WAKE_PHY) {
3879                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3880
3881                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3882                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3883                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3884         }
3885         if (wol & WAKE_MAGIC) {
3886                 temp_wucsr |= WUCSR_MPEN_;
3887
3888                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3889                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3890                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3891         }
3892         if (wol & WAKE_BCAST) {
3893                 temp_wucsr |= WUCSR_BCST_EN_;
3894
3895                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3896                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3897                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3898         }
3899         if (wol & WAKE_MCAST) {
3900                 temp_wucsr |= WUCSR_WAKE_EN_;
3901
3902                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3903                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3904                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3905                                         WUF_CFGX_EN_ |
3906                                         WUF_CFGX_TYPE_MCAST_ |
3907                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3908                                         (crc & WUF_CFGX_CRC16_MASK_));
3909
3910                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3911                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3912                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3913                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3914                 mask_index++;
3915
3916                 /* for IPv6 Multicast */
3917                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3918                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3919                                         WUF_CFGX_EN_ |
3920                                         WUF_CFGX_TYPE_MCAST_ |
3921                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3922                                         (crc & WUF_CFGX_CRC16_MASK_));
3923
3924                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3925                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3926                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3927                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3928                 mask_index++;
3929
3930                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3931                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3932                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3933         }
3934         if (wol & WAKE_UCAST) {
3935                 temp_wucsr |= WUCSR_PFDA_EN_;
3936
3937                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3938                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3939                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3940         }
3941         if (wol & WAKE_ARP) {
3942                 temp_wucsr |= WUCSR_WAKE_EN_;
3943
3944                 /* set WUF_CFG & WUF_MASK
3945                  * for packettype (offset 12,13) = ARP (0x0806)
3946                  */
3947                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3948                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3949                                         WUF_CFGX_EN_ |
3950                                         WUF_CFGX_TYPE_ALL_ |
3951                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3952                                         (crc & WUF_CFGX_CRC16_MASK_));
3953
3954                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3955                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3956                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3957                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3958                 mask_index++;
3959
3960                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3961                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3962                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3963         }
3964
3965         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3966
3967         /* when multiple WOL bits are set */
3968         if (hweight_long((unsigned long)wol) > 1) {
3969                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3970                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3971                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3972         }
3973         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3974
3975         /* clear WUPS */
3976         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3977         buf |= PMT_CTL_WUPS_MASK_;
3978         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3979
3980         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3981         buf |= MAC_RX_RXEN_;
3982         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3983
3984         return 0;
3985 }
3986
3987 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3988 {
3989         struct lan78xx_net *dev = usb_get_intfdata(intf);
3990         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3991         u32 buf;
3992         int ret;
3993         int event;
3994
3995         event = message.event;
3996
3997         if (!dev->suspend_count++) {
3998                 spin_lock_irq(&dev->txq.lock);
3999                 /* don't autosuspend while transmitting */
4000                 if ((skb_queue_len(&dev->txq) ||
4001                      skb_queue_len(&dev->txq_pend)) &&
4002                         PMSG_IS_AUTO(message)) {
4003                         spin_unlock_irq(&dev->txq.lock);
4004                         ret = -EBUSY;
4005                         goto out;
4006                 } else {
4007                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4008                         spin_unlock_irq(&dev->txq.lock);
4009                 }
4010
4011                 /* stop TX & RX */
4012                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4013                 buf &= ~MAC_TX_TXEN_;
4014                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4015                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4016                 buf &= ~MAC_RX_RXEN_;
4017                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4018
4019                 /* empty out the rx and queues */
4020                 netif_device_detach(dev->net);
4021                 lan78xx_terminate_urbs(dev);
4022                 usb_kill_urb(dev->urb_intr);
4023
4024                 /* reattach */
4025                 netif_device_attach(dev->net);
4026         }
4027
4028         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4029                 del_timer(&dev->stat_monitor);
4030
4031                 if (PMSG_IS_AUTO(message)) {
4032                         /* auto suspend (selective suspend) */
4033                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4034                         buf &= ~MAC_TX_TXEN_;
4035                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4036                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4037                         buf &= ~MAC_RX_RXEN_;
4038                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4039
4040                         ret = lan78xx_write_reg(dev, WUCSR, 0);
4041                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4042                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4043
4044                         /* set goodframe wakeup */
4045                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4046
4047                         buf |= WUCSR_RFE_WAKE_EN_;
4048                         buf |= WUCSR_STORE_WAKE_;
4049
4050                         ret = lan78xx_write_reg(dev, WUCSR, buf);
4051
4052                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4053
4054                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4055                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4056
4057                         buf |= PMT_CTL_PHY_WAKE_EN_;
4058                         buf |= PMT_CTL_WOL_EN_;
4059                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4060                         buf |= PMT_CTL_SUS_MODE_3_;
4061
4062                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4063
4064                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4065
4066                         buf |= PMT_CTL_WUPS_MASK_;
4067
4068                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4069
4070                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4071                         buf |= MAC_RX_RXEN_;
4072                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4073                 } else {
4074                         lan78xx_set_suspend(dev, pdata->wol);
4075                 }
4076         }
4077
4078         ret = 0;
4079 out:
4080         return ret;
4081 }
4082
4083 static int lan78xx_resume(struct usb_interface *intf)
4084 {
4085         struct lan78xx_net *dev = usb_get_intfdata(intf);
4086         struct sk_buff *skb;
4087         struct urb *res;
4088         int ret;
4089         u32 buf;
4090
4091         if (!timer_pending(&dev->stat_monitor)) {
4092                 dev->delta = 1;
4093                 mod_timer(&dev->stat_monitor,
4094                           jiffies + STAT_UPDATE_TIMER);
4095         }
4096
4097         if (!--dev->suspend_count) {
4098                 /* resume interrupt URBs */
4099                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4100                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4101
4102                 spin_lock_irq(&dev->txq.lock);
4103                 while ((res = usb_get_from_anchor(&dev->deferred))) {
4104                         skb = (struct sk_buff *)res->context;
4105                         ret = usb_submit_urb(res, GFP_ATOMIC);
4106                         if (ret < 0) {
4107                                 dev_kfree_skb_any(skb);
4108                                 usb_free_urb(res);
4109                                 usb_autopm_put_interface_async(dev->intf);
4110                         } else {
4111                                 netif_trans_update(dev->net);
4112                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4113                         }
4114                 }
4115
4116                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4117                 spin_unlock_irq(&dev->txq.lock);
4118
4119                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4120                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4121                                 netif_start_queue(dev->net);
4122                         tasklet_schedule(&dev->bh);
4123                 }
4124         }
4125
4126         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4127         ret = lan78xx_write_reg(dev, WUCSR, 0);
4128         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4129
4130         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4131                                              WUCSR2_ARP_RCD_ |
4132                                              WUCSR2_IPV6_TCPSYN_RCD_ |
4133                                              WUCSR2_IPV4_TCPSYN_RCD_);
4134
4135         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4136                                             WUCSR_EEE_RX_WAKE_ |
4137                                             WUCSR_PFDA_FR_ |
4138                                             WUCSR_RFE_WAKE_FR_ |
4139                                             WUCSR_WUFR_ |
4140                                             WUCSR_MPR_ |
4141                                             WUCSR_BCST_FR_);
4142
4143         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4144         buf |= MAC_TX_TXEN_;
4145         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4146
4147         return 0;
4148 }
4149
4150 static int lan78xx_reset_resume(struct usb_interface *intf)
4151 {
4152         struct lan78xx_net *dev = usb_get_intfdata(intf);
4153
4154         lan78xx_reset(dev);
4155
4156         phy_start(dev->net->phydev);
4157
4158         return lan78xx_resume(intf);
4159 }
4160
4161 static const struct usb_device_id products[] = {
4162         {
4163         /* LAN7800 USB Gigabit Ethernet Device */
4164         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4165         },
4166         {
4167         /* LAN7850 USB Gigabit Ethernet Device */
4168         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4169         },
4170         {
4171         /* LAN7801 USB Gigabit Ethernet Device */
4172         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4173         },
4174         {},
4175 };
4176 MODULE_DEVICE_TABLE(usb, products);
4177
4178 static struct usb_driver lan78xx_driver = {
4179         .name                   = DRIVER_NAME,
4180         .id_table               = products,
4181         .probe                  = lan78xx_probe,
4182         .disconnect             = lan78xx_disconnect,
4183         .suspend                = lan78xx_suspend,
4184         .resume                 = lan78xx_resume,
4185         .reset_resume           = lan78xx_reset_resume,
4186         .supports_autosuspend   = 1,
4187         .disable_hub_initiated_lpm = 1,
4188 };
4189
4190 module_usb_driver(lan78xx_driver);
4191
4192 MODULE_AUTHOR(DRIVER_AUTHOR);
4193 MODULE_DESCRIPTION(DRIVER_DESC);
4194 MODULE_LICENSE("GPL");