]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/usb/lan78xx.c
a823f010de301f9519a4505b9c3094460543fcc3
[linux.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy.h>
40 #include <linux/of_net.h>
41 #include "lan78xx.h"
42
43 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
44 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
45 #define DRIVER_NAME     "lan78xx"
46 #define DRIVER_VERSION  "1.0.6"
47
48 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
49 #define THROTTLE_JIFFIES                (HZ / 8)
50 #define UNLINK_TIMEOUT_MS               3
51
52 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
53
54 #define SS_USB_PKT_SIZE                 (1024)
55 #define HS_USB_PKT_SIZE                 (512)
56 #define FS_USB_PKT_SIZE                 (64)
57
58 #define MAX_RX_FIFO_SIZE                (12 * 1024)
59 #define MAX_TX_FIFO_SIZE                (12 * 1024)
60 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
61 #define DEFAULT_BULK_IN_DELAY           (0x0800)
62 #define MAX_SINGLE_PACKET_SIZE          (9000)
63 #define DEFAULT_TX_CSUM_ENABLE          (true)
64 #define DEFAULT_RX_CSUM_ENABLE          (true)
65 #define DEFAULT_TSO_CSUM_ENABLE         (true)
66 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
67 #define TX_OVERHEAD                     (8)
68 #define RXW_PADDING                     2
69
70 #define LAN78XX_USB_VENDOR_ID           (0x0424)
71 #define LAN7800_USB_PRODUCT_ID          (0x7800)
72 #define LAN7850_USB_PRODUCT_ID          (0x7850)
73 #define LAN7801_USB_PRODUCT_ID          (0x7801)
74 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
75 #define LAN78XX_OTP_MAGIC               (0x78F3)
76
77 #define MII_READ                        1
78 #define MII_WRITE                       0
79
80 #define EEPROM_INDICATOR                (0xA5)
81 #define EEPROM_MAC_OFFSET               (0x01)
82 #define MAX_EEPROM_SIZE                 512
83 #define OTP_INDICATOR_1                 (0xF3)
84 #define OTP_INDICATOR_2                 (0xF7)
85
86 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
87                                          WAKE_MCAST | WAKE_BCAST | \
88                                          WAKE_ARP | WAKE_MAGIC)
89
90 /* USB related defines */
91 #define BULK_IN_PIPE                    1
92 #define BULK_OUT_PIPE                   2
93
94 /* default autosuspend delay (mSec)*/
95 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
96
97 /* statistic update interval (mSec) */
98 #define STAT_UPDATE_TIMER               (1 * 1000)
99
100 /* defines interrupts from interrupt EP */
101 #define MAX_INT_EP                      (32)
102 #define INT_EP_INTEP                    (31)
103 #define INT_EP_OTP_WR_DONE              (28)
104 #define INT_EP_EEE_TX_LPI_START         (26)
105 #define INT_EP_EEE_TX_LPI_STOP          (25)
106 #define INT_EP_EEE_RX_LPI               (24)
107 #define INT_EP_MAC_RESET_TIMEOUT        (23)
108 #define INT_EP_RDFO                     (22)
109 #define INT_EP_TXE                      (21)
110 #define INT_EP_USB_STATUS               (20)
111 #define INT_EP_TX_DIS                   (19)
112 #define INT_EP_RX_DIS                   (18)
113 #define INT_EP_PHY                      (17)
114 #define INT_EP_DP                       (16)
115 #define INT_EP_MAC_ERR                  (15)
116 #define INT_EP_TDFU                     (14)
117 #define INT_EP_TDFO                     (13)
118 #define INT_EP_UTX                      (12)
119 #define INT_EP_GPIO_11                  (11)
120 #define INT_EP_GPIO_10                  (10)
121 #define INT_EP_GPIO_9                   (9)
122 #define INT_EP_GPIO_8                   (8)
123 #define INT_EP_GPIO_7                   (7)
124 #define INT_EP_GPIO_6                   (6)
125 #define INT_EP_GPIO_5                   (5)
126 #define INT_EP_GPIO_4                   (4)
127 #define INT_EP_GPIO_3                   (3)
128 #define INT_EP_GPIO_2                   (2)
129 #define INT_EP_GPIO_1                   (1)
130 #define INT_EP_GPIO_0                   (0)
131
132 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
133         "RX FCS Errors",
134         "RX Alignment Errors",
135         "Rx Fragment Errors",
136         "RX Jabber Errors",
137         "RX Undersize Frame Errors",
138         "RX Oversize Frame Errors",
139         "RX Dropped Frames",
140         "RX Unicast Byte Count",
141         "RX Broadcast Byte Count",
142         "RX Multicast Byte Count",
143         "RX Unicast Frames",
144         "RX Broadcast Frames",
145         "RX Multicast Frames",
146         "RX Pause Frames",
147         "RX 64 Byte Frames",
148         "RX 65 - 127 Byte Frames",
149         "RX 128 - 255 Byte Frames",
150         "RX 256 - 511 Bytes Frames",
151         "RX 512 - 1023 Byte Frames",
152         "RX 1024 - 1518 Byte Frames",
153         "RX Greater 1518 Byte Frames",
154         "EEE RX LPI Transitions",
155         "EEE RX LPI Time",
156         "TX FCS Errors",
157         "TX Excess Deferral Errors",
158         "TX Carrier Errors",
159         "TX Bad Byte Count",
160         "TX Single Collisions",
161         "TX Multiple Collisions",
162         "TX Excessive Collision",
163         "TX Late Collisions",
164         "TX Unicast Byte Count",
165         "TX Broadcast Byte Count",
166         "TX Multicast Byte Count",
167         "TX Unicast Frames",
168         "TX Broadcast Frames",
169         "TX Multicast Frames",
170         "TX Pause Frames",
171         "TX 64 Byte Frames",
172         "TX 65 - 127 Byte Frames",
173         "TX 128 - 255 Byte Frames",
174         "TX 256 - 511 Bytes Frames",
175         "TX 512 - 1023 Byte Frames",
176         "TX 1024 - 1518 Byte Frames",
177         "TX Greater 1518 Byte Frames",
178         "EEE TX LPI Transitions",
179         "EEE TX LPI Time",
180 };
181
182 struct lan78xx_statstage {
183         u32 rx_fcs_errors;
184         u32 rx_alignment_errors;
185         u32 rx_fragment_errors;
186         u32 rx_jabber_errors;
187         u32 rx_undersize_frame_errors;
188         u32 rx_oversize_frame_errors;
189         u32 rx_dropped_frames;
190         u32 rx_unicast_byte_count;
191         u32 rx_broadcast_byte_count;
192         u32 rx_multicast_byte_count;
193         u32 rx_unicast_frames;
194         u32 rx_broadcast_frames;
195         u32 rx_multicast_frames;
196         u32 rx_pause_frames;
197         u32 rx_64_byte_frames;
198         u32 rx_65_127_byte_frames;
199         u32 rx_128_255_byte_frames;
200         u32 rx_256_511_bytes_frames;
201         u32 rx_512_1023_byte_frames;
202         u32 rx_1024_1518_byte_frames;
203         u32 rx_greater_1518_byte_frames;
204         u32 eee_rx_lpi_transitions;
205         u32 eee_rx_lpi_time;
206         u32 tx_fcs_errors;
207         u32 tx_excess_deferral_errors;
208         u32 tx_carrier_errors;
209         u32 tx_bad_byte_count;
210         u32 tx_single_collisions;
211         u32 tx_multiple_collisions;
212         u32 tx_excessive_collision;
213         u32 tx_late_collisions;
214         u32 tx_unicast_byte_count;
215         u32 tx_broadcast_byte_count;
216         u32 tx_multicast_byte_count;
217         u32 tx_unicast_frames;
218         u32 tx_broadcast_frames;
219         u32 tx_multicast_frames;
220         u32 tx_pause_frames;
221         u32 tx_64_byte_frames;
222         u32 tx_65_127_byte_frames;
223         u32 tx_128_255_byte_frames;
224         u32 tx_256_511_bytes_frames;
225         u32 tx_512_1023_byte_frames;
226         u32 tx_1024_1518_byte_frames;
227         u32 tx_greater_1518_byte_frames;
228         u32 eee_tx_lpi_transitions;
229         u32 eee_tx_lpi_time;
230 };
231
232 struct lan78xx_statstage64 {
233         u64 rx_fcs_errors;
234         u64 rx_alignment_errors;
235         u64 rx_fragment_errors;
236         u64 rx_jabber_errors;
237         u64 rx_undersize_frame_errors;
238         u64 rx_oversize_frame_errors;
239         u64 rx_dropped_frames;
240         u64 rx_unicast_byte_count;
241         u64 rx_broadcast_byte_count;
242         u64 rx_multicast_byte_count;
243         u64 rx_unicast_frames;
244         u64 rx_broadcast_frames;
245         u64 rx_multicast_frames;
246         u64 rx_pause_frames;
247         u64 rx_64_byte_frames;
248         u64 rx_65_127_byte_frames;
249         u64 rx_128_255_byte_frames;
250         u64 rx_256_511_bytes_frames;
251         u64 rx_512_1023_byte_frames;
252         u64 rx_1024_1518_byte_frames;
253         u64 rx_greater_1518_byte_frames;
254         u64 eee_rx_lpi_transitions;
255         u64 eee_rx_lpi_time;
256         u64 tx_fcs_errors;
257         u64 tx_excess_deferral_errors;
258         u64 tx_carrier_errors;
259         u64 tx_bad_byte_count;
260         u64 tx_single_collisions;
261         u64 tx_multiple_collisions;
262         u64 tx_excessive_collision;
263         u64 tx_late_collisions;
264         u64 tx_unicast_byte_count;
265         u64 tx_broadcast_byte_count;
266         u64 tx_multicast_byte_count;
267         u64 tx_unicast_frames;
268         u64 tx_broadcast_frames;
269         u64 tx_multicast_frames;
270         u64 tx_pause_frames;
271         u64 tx_64_byte_frames;
272         u64 tx_65_127_byte_frames;
273         u64 tx_128_255_byte_frames;
274         u64 tx_256_511_bytes_frames;
275         u64 tx_512_1023_byte_frames;
276         u64 tx_1024_1518_byte_frames;
277         u64 tx_greater_1518_byte_frames;
278         u64 eee_tx_lpi_transitions;
279         u64 eee_tx_lpi_time;
280 };
281
282 struct lan78xx_net;
283
284 struct lan78xx_priv {
285         struct lan78xx_net *dev;
286         u32 rfe_ctl;
287         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
288         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
289         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
290         struct mutex dataport_mutex; /* for dataport access */
291         spinlock_t rfe_ctl_lock; /* for rfe register access */
292         struct work_struct set_multicast;
293         struct work_struct set_vlan;
294         u32 wol;
295 };
296
297 enum skb_state {
298         illegal = 0,
299         tx_start,
300         tx_done,
301         rx_start,
302         rx_done,
303         rx_cleanup,
304         unlink_start
305 };
306
307 struct skb_data {               /* skb->cb is one of these */
308         struct urb *urb;
309         struct lan78xx_net *dev;
310         enum skb_state state;
311         size_t length;
312         int num_of_packet;
313 };
314
315 struct usb_context {
316         struct usb_ctrlrequest req;
317         struct lan78xx_net *dev;
318 };
319
320 #define EVENT_TX_HALT                   0
321 #define EVENT_RX_HALT                   1
322 #define EVENT_RX_MEMORY                 2
323 #define EVENT_STS_SPLIT                 3
324 #define EVENT_LINK_RESET                4
325 #define EVENT_RX_PAUSED                 5
326 #define EVENT_DEV_WAKING                6
327 #define EVENT_DEV_ASLEEP                7
328 #define EVENT_DEV_OPEN                  8
329 #define EVENT_STAT_UPDATE               9
330
331 struct statstage {
332         struct mutex                    access_lock;    /* for stats access */
333         struct lan78xx_statstage        saved;
334         struct lan78xx_statstage        rollover_count;
335         struct lan78xx_statstage        rollover_max;
336         struct lan78xx_statstage64      curr_stat;
337 };
338
339 struct irq_domain_data {
340         struct irq_domain       *irqdomain;
341         unsigned int            phyirq;
342         struct irq_chip         *irqchip;
343         irq_flow_handler_t      irq_handler;
344         u32                     irqenable;
345         struct mutex            irq_lock;               /* for irq bus access */
346 };
347
348 struct lan78xx_net {
349         struct net_device       *net;
350         struct usb_device       *udev;
351         struct usb_interface    *intf;
352         void                    *driver_priv;
353
354         int                     rx_qlen;
355         int                     tx_qlen;
356         struct sk_buff_head     rxq;
357         struct sk_buff_head     txq;
358         struct sk_buff_head     done;
359         struct sk_buff_head     rxq_pause;
360         struct sk_buff_head     txq_pend;
361
362         struct tasklet_struct   bh;
363         struct delayed_work     wq;
364
365         struct usb_host_endpoint *ep_blkin;
366         struct usb_host_endpoint *ep_blkout;
367         struct usb_host_endpoint *ep_intr;
368
369         int                     msg_enable;
370
371         struct urb              *urb_intr;
372         struct usb_anchor       deferred;
373
374         struct mutex            phy_mutex; /* for phy access */
375         unsigned                pipe_in, pipe_out, pipe_intr;
376
377         u32                     hard_mtu;       /* count any extra framing */
378         size_t                  rx_urb_size;    /* size for rx urbs */
379
380         unsigned long           flags;
381
382         wait_queue_head_t       *wait;
383         unsigned char           suspend_count;
384
385         unsigned                maxpacket;
386         struct timer_list       delay;
387         struct timer_list       stat_monitor;
388
389         unsigned long           data[5];
390
391         int                     link_on;
392         u8                      mdix_ctrl;
393
394         u32                     chipid;
395         u32                     chiprev;
396         struct mii_bus          *mdiobus;
397         phy_interface_t         interface;
398
399         int                     fc_autoneg;
400         u8                      fc_request_control;
401
402         int                     delta;
403         struct statstage        stats;
404
405         struct irq_domain_data  domain_data;
406 };
407
408 /* define external phy id */
409 #define PHY_LAN8835                     (0x0007C130)
410 #define PHY_KSZ9031RNX                  (0x00221620)
411
412 /* use ethtool to change the level for any given device */
413 static int msg_level = -1;
414 module_param(msg_level, int, 0);
415 MODULE_PARM_DESC(msg_level, "Override default message level");
416
417 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
418 {
419         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
420         int ret;
421
422         if (!buf)
423                 return -ENOMEM;
424
425         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
426                               USB_VENDOR_REQUEST_READ_REGISTER,
427                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
428                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
429         if (likely(ret >= 0)) {
430                 le32_to_cpus(buf);
431                 *data = *buf;
432         } else {
433                 netdev_warn(dev->net,
434                             "Failed to read register index 0x%08x. ret = %d",
435                             index, ret);
436         }
437
438         kfree(buf);
439
440         return ret;
441 }
442
443 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
444 {
445         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
446         int ret;
447
448         if (!buf)
449                 return -ENOMEM;
450
451         *buf = data;
452         cpu_to_le32s(buf);
453
454         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
455                               USB_VENDOR_REQUEST_WRITE_REGISTER,
456                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
457                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
458         if (unlikely(ret < 0)) {
459                 netdev_warn(dev->net,
460                             "Failed to write register index 0x%08x. ret = %d",
461                             index, ret);
462         }
463
464         kfree(buf);
465
466         return ret;
467 }
468
469 static int lan78xx_read_stats(struct lan78xx_net *dev,
470                               struct lan78xx_statstage *data)
471 {
472         int ret = 0;
473         int i;
474         struct lan78xx_statstage *stats;
475         u32 *src;
476         u32 *dst;
477
478         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
479         if (!stats)
480                 return -ENOMEM;
481
482         ret = usb_control_msg(dev->udev,
483                               usb_rcvctrlpipe(dev->udev, 0),
484                               USB_VENDOR_REQUEST_GET_STATS,
485                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
486                               0,
487                               0,
488                               (void *)stats,
489                               sizeof(*stats),
490                               USB_CTRL_SET_TIMEOUT);
491         if (likely(ret >= 0)) {
492                 src = (u32 *)stats;
493                 dst = (u32 *)data;
494                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
495                         le32_to_cpus(&src[i]);
496                         dst[i] = src[i];
497                 }
498         } else {
499                 netdev_warn(dev->net,
500                             "Failed to read stat ret = 0x%x", ret);
501         }
502
503         kfree(stats);
504
505         return ret;
506 }
507
508 #define check_counter_rollover(struct1, dev_stats, member) {    \
509         if (struct1->member < dev_stats.saved.member)           \
510                 dev_stats.rollover_count.member++;              \
511         }
512
513 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
514                                         struct lan78xx_statstage *stats)
515 {
516         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
517         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
518         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
519         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
520         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
521         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
522         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
523         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
524         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
525         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
526         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
527         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
528         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
529         check_counter_rollover(stats, dev->stats, rx_pause_frames);
530         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
531         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
532         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
533         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
534         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
535         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
536         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
537         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
538         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
539         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
540         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
541         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
542         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
543         check_counter_rollover(stats, dev->stats, tx_single_collisions);
544         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
545         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
546         check_counter_rollover(stats, dev->stats, tx_late_collisions);
547         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
548         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
549         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
550         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
551         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
552         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
553         check_counter_rollover(stats, dev->stats, tx_pause_frames);
554         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
555         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
556         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
557         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
558         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
559         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
560         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
561         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
562         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
563
564         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
565 }
566
567 static void lan78xx_update_stats(struct lan78xx_net *dev)
568 {
569         u32 *p, *count, *max;
570         u64 *data;
571         int i;
572         struct lan78xx_statstage lan78xx_stats;
573
574         if (usb_autopm_get_interface(dev->intf) < 0)
575                 return;
576
577         p = (u32 *)&lan78xx_stats;
578         count = (u32 *)&dev->stats.rollover_count;
579         max = (u32 *)&dev->stats.rollover_max;
580         data = (u64 *)&dev->stats.curr_stat;
581
582         mutex_lock(&dev->stats.access_lock);
583
584         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
585                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
586
587         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
588                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
589
590         mutex_unlock(&dev->stats.access_lock);
591
592         usb_autopm_put_interface(dev->intf);
593 }
594
595 /* Loop until the read is completed with timeout called with phy_mutex held */
596 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
597 {
598         unsigned long start_time = jiffies;
599         u32 val;
600         int ret;
601
602         do {
603                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
604                 if (unlikely(ret < 0))
605                         return -EIO;
606
607                 if (!(val & MII_ACC_MII_BUSY_))
608                         return 0;
609         } while (!time_after(jiffies, start_time + HZ));
610
611         return -EIO;
612 }
613
614 static inline u32 mii_access(int id, int index, int read)
615 {
616         u32 ret;
617
618         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
619         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
620         if (read)
621                 ret |= MII_ACC_MII_READ_;
622         else
623                 ret |= MII_ACC_MII_WRITE_;
624         ret |= MII_ACC_MII_BUSY_;
625
626         return ret;
627 }
628
629 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
630 {
631         unsigned long start_time = jiffies;
632         u32 val;
633         int ret;
634
635         do {
636                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
637                 if (unlikely(ret < 0))
638                         return -EIO;
639
640                 if (!(val & E2P_CMD_EPC_BUSY_) ||
641                     (val & E2P_CMD_EPC_TIMEOUT_))
642                         break;
643                 usleep_range(40, 100);
644         } while (!time_after(jiffies, start_time + HZ));
645
646         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
647                 netdev_warn(dev->net, "EEPROM read operation timeout");
648                 return -EIO;
649         }
650
651         return 0;
652 }
653
654 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
655 {
656         unsigned long start_time = jiffies;
657         u32 val;
658         int ret;
659
660         do {
661                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
662                 if (unlikely(ret < 0))
663                         return -EIO;
664
665                 if (!(val & E2P_CMD_EPC_BUSY_))
666                         return 0;
667
668                 usleep_range(40, 100);
669         } while (!time_after(jiffies, start_time + HZ));
670
671         netdev_warn(dev->net, "EEPROM is busy");
672         return -EIO;
673 }
674
675 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
676                                    u32 length, u8 *data)
677 {
678         u32 val;
679         u32 saved;
680         int i, ret;
681         int retval;
682
683         /* depends on chip, some EEPROM pins are muxed with LED function.
684          * disable & restore LED function to access EEPROM.
685          */
686         ret = lan78xx_read_reg(dev, HW_CFG, &val);
687         saved = val;
688         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
689                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
690                 ret = lan78xx_write_reg(dev, HW_CFG, val);
691         }
692
693         retval = lan78xx_eeprom_confirm_not_busy(dev);
694         if (retval)
695                 return retval;
696
697         for (i = 0; i < length; i++) {
698                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
699                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
700                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
701                 if (unlikely(ret < 0)) {
702                         retval = -EIO;
703                         goto exit;
704                 }
705
706                 retval = lan78xx_wait_eeprom(dev);
707                 if (retval < 0)
708                         goto exit;
709
710                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
711                 if (unlikely(ret < 0)) {
712                         retval = -EIO;
713                         goto exit;
714                 }
715
716                 data[i] = val & 0xFF;
717                 offset++;
718         }
719
720         retval = 0;
721 exit:
722         if (dev->chipid == ID_REV_CHIP_ID_7800_)
723                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
724
725         return retval;
726 }
727
728 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
729                                u32 length, u8 *data)
730 {
731         u8 sig;
732         int ret;
733
734         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
735         if ((ret == 0) && (sig == EEPROM_INDICATOR))
736                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
737         else
738                 ret = -EINVAL;
739
740         return ret;
741 }
742
743 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
744                                     u32 length, u8 *data)
745 {
746         u32 val;
747         u32 saved;
748         int i, ret;
749         int retval;
750
751         /* depends on chip, some EEPROM pins are muxed with LED function.
752          * disable & restore LED function to access EEPROM.
753          */
754         ret = lan78xx_read_reg(dev, HW_CFG, &val);
755         saved = val;
756         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
757                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
758                 ret = lan78xx_write_reg(dev, HW_CFG, val);
759         }
760
761         retval = lan78xx_eeprom_confirm_not_busy(dev);
762         if (retval)
763                 goto exit;
764
765         /* Issue write/erase enable command */
766         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
767         ret = lan78xx_write_reg(dev, E2P_CMD, val);
768         if (unlikely(ret < 0)) {
769                 retval = -EIO;
770                 goto exit;
771         }
772
773         retval = lan78xx_wait_eeprom(dev);
774         if (retval < 0)
775                 goto exit;
776
777         for (i = 0; i < length; i++) {
778                 /* Fill data register */
779                 val = data[i];
780                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
781                 if (ret < 0) {
782                         retval = -EIO;
783                         goto exit;
784                 }
785
786                 /* Send "write" command */
787                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
788                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
789                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
790                 if (ret < 0) {
791                         retval = -EIO;
792                         goto exit;
793                 }
794
795                 retval = lan78xx_wait_eeprom(dev);
796                 if (retval < 0)
797                         goto exit;
798
799                 offset++;
800         }
801
802         retval = 0;
803 exit:
804         if (dev->chipid == ID_REV_CHIP_ID_7800_)
805                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
806
807         return retval;
808 }
809
810 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
811                                 u32 length, u8 *data)
812 {
813         int i;
814         int ret;
815         u32 buf;
816         unsigned long timeout;
817
818         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
819
820         if (buf & OTP_PWR_DN_PWRDN_N_) {
821                 /* clear it and wait to be cleared */
822                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
823
824                 timeout = jiffies + HZ;
825                 do {
826                         usleep_range(1, 10);
827                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
828                         if (time_after(jiffies, timeout)) {
829                                 netdev_warn(dev->net,
830                                             "timeout on OTP_PWR_DN");
831                                 return -EIO;
832                         }
833                 } while (buf & OTP_PWR_DN_PWRDN_N_);
834         }
835
836         for (i = 0; i < length; i++) {
837                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
838                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
839                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
840                                         ((offset + i) & OTP_ADDR2_10_3));
841
842                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
843                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
844
845                 timeout = jiffies + HZ;
846                 do {
847                         udelay(1);
848                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
849                         if (time_after(jiffies, timeout)) {
850                                 netdev_warn(dev->net,
851                                             "timeout on OTP_STATUS");
852                                 return -EIO;
853                         }
854                 } while (buf & OTP_STATUS_BUSY_);
855
856                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
857
858                 data[i] = (u8)(buf & 0xFF);
859         }
860
861         return 0;
862 }
863
864 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
865                                  u32 length, u8 *data)
866 {
867         int i;
868         int ret;
869         u32 buf;
870         unsigned long timeout;
871
872         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
873
874         if (buf & OTP_PWR_DN_PWRDN_N_) {
875                 /* clear it and wait to be cleared */
876                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
877
878                 timeout = jiffies + HZ;
879                 do {
880                         udelay(1);
881                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
882                         if (time_after(jiffies, timeout)) {
883                                 netdev_warn(dev->net,
884                                             "timeout on OTP_PWR_DN completion");
885                                 return -EIO;
886                         }
887                 } while (buf & OTP_PWR_DN_PWRDN_N_);
888         }
889
890         /* set to BYTE program mode */
891         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
892
893         for (i = 0; i < length; i++) {
894                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
895                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
896                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
897                                         ((offset + i) & OTP_ADDR2_10_3));
898                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
899                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
900                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
901
902                 timeout = jiffies + HZ;
903                 do {
904                         udelay(1);
905                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
906                         if (time_after(jiffies, timeout)) {
907                                 netdev_warn(dev->net,
908                                             "Timeout on OTP_STATUS completion");
909                                 return -EIO;
910                         }
911                 } while (buf & OTP_STATUS_BUSY_);
912         }
913
914         return 0;
915 }
916
917 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
918                             u32 length, u8 *data)
919 {
920         u8 sig;
921         int ret;
922
923         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
924
925         if (ret == 0) {
926                 if (sig == OTP_INDICATOR_1)
927                         offset = offset;
928                 else if (sig == OTP_INDICATOR_2)
929                         offset += 0x100;
930                 else
931                         ret = -EINVAL;
932                 if (!ret)
933                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
934         }
935
936         return ret;
937 }
938
939 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
940 {
941         int i, ret;
942
943         for (i = 0; i < 100; i++) {
944                 u32 dp_sel;
945
946                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
947                 if (unlikely(ret < 0))
948                         return -EIO;
949
950                 if (dp_sel & DP_SEL_DPRDY_)
951                         return 0;
952
953                 usleep_range(40, 100);
954         }
955
956         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
957
958         return -EIO;
959 }
960
961 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
962                                   u32 addr, u32 length, u32 *buf)
963 {
964         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
965         u32 dp_sel;
966         int i, ret;
967
968         if (usb_autopm_get_interface(dev->intf) < 0)
969                         return 0;
970
971         mutex_lock(&pdata->dataport_mutex);
972
973         ret = lan78xx_dataport_wait_not_busy(dev);
974         if (ret < 0)
975                 goto done;
976
977         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
978
979         dp_sel &= ~DP_SEL_RSEL_MASK_;
980         dp_sel |= ram_select;
981         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
982
983         for (i = 0; i < length; i++) {
984                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
985
986                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
987
988                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
989
990                 ret = lan78xx_dataport_wait_not_busy(dev);
991                 if (ret < 0)
992                         goto done;
993         }
994
995 done:
996         mutex_unlock(&pdata->dataport_mutex);
997         usb_autopm_put_interface(dev->intf);
998
999         return ret;
1000 }
1001
1002 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1003                                     int index, u8 addr[ETH_ALEN])
1004 {
1005         u32     temp;
1006
1007         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1008                 temp = addr[3];
1009                 temp = addr[2] | (temp << 8);
1010                 temp = addr[1] | (temp << 8);
1011                 temp = addr[0] | (temp << 8);
1012                 pdata->pfilter_table[index][1] = temp;
1013                 temp = addr[5];
1014                 temp = addr[4] | (temp << 8);
1015                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1016                 pdata->pfilter_table[index][0] = temp;
1017         }
1018 }
1019
1020 /* returns hash bit number for given MAC address */
1021 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1022 {
1023         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1024 }
1025
1026 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1027 {
1028         struct lan78xx_priv *pdata =
1029                         container_of(param, struct lan78xx_priv, set_multicast);
1030         struct lan78xx_net *dev = pdata->dev;
1031         int i;
1032         int ret;
1033
1034         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1035                   pdata->rfe_ctl);
1036
1037         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1038                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1039
1040         for (i = 1; i < NUM_OF_MAF; i++) {
1041                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1042                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1043                                         pdata->pfilter_table[i][1]);
1044                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1045                                         pdata->pfilter_table[i][0]);
1046         }
1047
1048         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1049 }
1050
1051 static void lan78xx_set_multicast(struct net_device *netdev)
1052 {
1053         struct lan78xx_net *dev = netdev_priv(netdev);
1054         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1055         unsigned long flags;
1056         int i;
1057
1058         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1059
1060         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1061                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1062
1063         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1064                         pdata->mchash_table[i] = 0;
1065         /* pfilter_table[0] has own HW address */
1066         for (i = 1; i < NUM_OF_MAF; i++) {
1067                         pdata->pfilter_table[i][0] =
1068                         pdata->pfilter_table[i][1] = 0;
1069         }
1070
1071         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1072
1073         if (dev->net->flags & IFF_PROMISC) {
1074                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1075                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1076         } else {
1077                 if (dev->net->flags & IFF_ALLMULTI) {
1078                         netif_dbg(dev, drv, dev->net,
1079                                   "receive all multicast enabled");
1080                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1081                 }
1082         }
1083
1084         if (netdev_mc_count(dev->net)) {
1085                 struct netdev_hw_addr *ha;
1086                 int i;
1087
1088                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1089
1090                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1091
1092                 i = 1;
1093                 netdev_for_each_mc_addr(ha, netdev) {
1094                         /* set first 32 into Perfect Filter */
1095                         if (i < 33) {
1096                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1097                         } else {
1098                                 u32 bitnum = lan78xx_hash(ha->addr);
1099
1100                                 pdata->mchash_table[bitnum / 32] |=
1101                                                         (1 << (bitnum % 32));
1102                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1103                         }
1104                         i++;
1105                 }
1106         }
1107
1108         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1109
1110         /* defer register writes to a sleepable context */
1111         schedule_work(&pdata->set_multicast);
1112 }
1113
1114 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1115                                       u16 lcladv, u16 rmtadv)
1116 {
1117         u32 flow = 0, fct_flow = 0;
1118         int ret;
1119         u8 cap;
1120
1121         if (dev->fc_autoneg)
1122                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1123         else
1124                 cap = dev->fc_request_control;
1125
1126         if (cap & FLOW_CTRL_TX)
1127                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1128
1129         if (cap & FLOW_CTRL_RX)
1130                 flow |= FLOW_CR_RX_FCEN_;
1131
1132         if (dev->udev->speed == USB_SPEED_SUPER)
1133                 fct_flow = 0x817;
1134         else if (dev->udev->speed == USB_SPEED_HIGH)
1135                 fct_flow = 0x211;
1136
1137         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1138                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1139                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1140
1141         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1142
1143         /* threshold value should be set before enabling flow */
1144         ret = lan78xx_write_reg(dev, FLOW, flow);
1145
1146         return 0;
1147 }
1148
1149 static int lan78xx_link_reset(struct lan78xx_net *dev)
1150 {
1151         struct phy_device *phydev = dev->net->phydev;
1152         struct ethtool_link_ksettings ecmd;
1153         int ladv, radv, ret;
1154         u32 buf;
1155
1156         /* clear LAN78xx interrupt status */
1157         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1158         if (unlikely(ret < 0))
1159                 return -EIO;
1160
1161         phy_read_status(phydev);
1162
1163         if (!phydev->link && dev->link_on) {
1164                 dev->link_on = false;
1165
1166                 /* reset MAC */
1167                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1168                 if (unlikely(ret < 0))
1169                         return -EIO;
1170                 buf |= MAC_CR_RST_;
1171                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1172                 if (unlikely(ret < 0))
1173                         return -EIO;
1174
1175                 del_timer(&dev->stat_monitor);
1176         } else if (phydev->link && !dev->link_on) {
1177                 dev->link_on = true;
1178
1179                 phy_ethtool_ksettings_get(phydev, &ecmd);
1180
1181                 if (dev->udev->speed == USB_SPEED_SUPER) {
1182                         if (ecmd.base.speed == 1000) {
1183                                 /* disable U2 */
1184                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1185                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1186                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1187                                 /* enable U1 */
1188                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1189                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1190                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1191                         } else {
1192                                 /* enable U1 & U2 */
1193                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1194                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1195                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1196                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1197                         }
1198                 }
1199
1200                 ladv = phy_read(phydev, MII_ADVERTISE);
1201                 if (ladv < 0)
1202                         return ladv;
1203
1204                 radv = phy_read(phydev, MII_LPA);
1205                 if (radv < 0)
1206                         return radv;
1207
1208                 netif_dbg(dev, link, dev->net,
1209                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1210                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1211
1212                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1213                                                  radv);
1214
1215                 if (!timer_pending(&dev->stat_monitor)) {
1216                         dev->delta = 1;
1217                         mod_timer(&dev->stat_monitor,
1218                                   jiffies + STAT_UPDATE_TIMER);
1219                 }
1220         }
1221
1222         return ret;
1223 }
1224
1225 /* some work can't be done in tasklets, so we use keventd
1226  *
1227  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1228  * but tasklet_schedule() doesn't.      hope the failure is rare.
1229  */
1230 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1231 {
1232         set_bit(work, &dev->flags);
1233         if (!schedule_delayed_work(&dev->wq, 0))
1234                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1235 }
1236
1237 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1238 {
1239         u32 intdata;
1240
1241         if (urb->actual_length != 4) {
1242                 netdev_warn(dev->net,
1243                             "unexpected urb length %d", urb->actual_length);
1244                 return;
1245         }
1246
1247         memcpy(&intdata, urb->transfer_buffer, 4);
1248         le32_to_cpus(&intdata);
1249
1250         if (intdata & INT_ENP_PHY_INT) {
1251                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1252                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1253
1254                 if (dev->domain_data.phyirq > 0)
1255                         generic_handle_irq(dev->domain_data.phyirq);
1256         } else
1257                 netdev_warn(dev->net,
1258                             "unexpected interrupt: 0x%08x\n", intdata);
1259 }
1260
1261 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1262 {
1263         return MAX_EEPROM_SIZE;
1264 }
1265
1266 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1267                                       struct ethtool_eeprom *ee, u8 *data)
1268 {
1269         struct lan78xx_net *dev = netdev_priv(netdev);
1270         int ret;
1271
1272         ret = usb_autopm_get_interface(dev->intf);
1273         if (ret)
1274                 return ret;
1275
1276         ee->magic = LAN78XX_EEPROM_MAGIC;
1277
1278         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1279
1280         usb_autopm_put_interface(dev->intf);
1281
1282         return ret;
1283 }
1284
1285 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1286                                       struct ethtool_eeprom *ee, u8 *data)
1287 {
1288         struct lan78xx_net *dev = netdev_priv(netdev);
1289         int ret;
1290
1291         ret = usb_autopm_get_interface(dev->intf);
1292         if (ret)
1293                 return ret;
1294
1295         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1296          * to load data from EEPROM
1297          */
1298         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1299                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1300         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1301                  (ee->offset == 0) &&
1302                  (ee->len == 512) &&
1303                  (data[0] == OTP_INDICATOR_1))
1304                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1305
1306         usb_autopm_put_interface(dev->intf);
1307
1308         return ret;
1309 }
1310
1311 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1312                                 u8 *data)
1313 {
1314         if (stringset == ETH_SS_STATS)
1315                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1316 }
1317
1318 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1319 {
1320         if (sset == ETH_SS_STATS)
1321                 return ARRAY_SIZE(lan78xx_gstrings);
1322         else
1323                 return -EOPNOTSUPP;
1324 }
1325
1326 static void lan78xx_get_stats(struct net_device *netdev,
1327                               struct ethtool_stats *stats, u64 *data)
1328 {
1329         struct lan78xx_net *dev = netdev_priv(netdev);
1330
1331         lan78xx_update_stats(dev);
1332
1333         mutex_lock(&dev->stats.access_lock);
1334         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1335         mutex_unlock(&dev->stats.access_lock);
1336 }
1337
1338 static void lan78xx_get_wol(struct net_device *netdev,
1339                             struct ethtool_wolinfo *wol)
1340 {
1341         struct lan78xx_net *dev = netdev_priv(netdev);
1342         int ret;
1343         u32 buf;
1344         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1345
1346         if (usb_autopm_get_interface(dev->intf) < 0)
1347                         return;
1348
1349         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1350         if (unlikely(ret < 0)) {
1351                 wol->supported = 0;
1352                 wol->wolopts = 0;
1353         } else {
1354                 if (buf & USB_CFG_RMT_WKP_) {
1355                         wol->supported = WAKE_ALL;
1356                         wol->wolopts = pdata->wol;
1357                 } else {
1358                         wol->supported = 0;
1359                         wol->wolopts = 0;
1360                 }
1361         }
1362
1363         usb_autopm_put_interface(dev->intf);
1364 }
1365
1366 static int lan78xx_set_wol(struct net_device *netdev,
1367                            struct ethtool_wolinfo *wol)
1368 {
1369         struct lan78xx_net *dev = netdev_priv(netdev);
1370         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1371         int ret;
1372
1373         ret = usb_autopm_get_interface(dev->intf);
1374         if (ret < 0)
1375                 return ret;
1376
1377         pdata->wol = 0;
1378         if (wol->wolopts & WAKE_UCAST)
1379                 pdata->wol |= WAKE_UCAST;
1380         if (wol->wolopts & WAKE_MCAST)
1381                 pdata->wol |= WAKE_MCAST;
1382         if (wol->wolopts & WAKE_BCAST)
1383                 pdata->wol |= WAKE_BCAST;
1384         if (wol->wolopts & WAKE_MAGIC)
1385                 pdata->wol |= WAKE_MAGIC;
1386         if (wol->wolopts & WAKE_PHY)
1387                 pdata->wol |= WAKE_PHY;
1388         if (wol->wolopts & WAKE_ARP)
1389                 pdata->wol |= WAKE_ARP;
1390
1391         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1392
1393         phy_ethtool_set_wol(netdev->phydev, wol);
1394
1395         usb_autopm_put_interface(dev->intf);
1396
1397         return ret;
1398 }
1399
1400 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1401 {
1402         struct lan78xx_net *dev = netdev_priv(net);
1403         struct phy_device *phydev = net->phydev;
1404         int ret;
1405         u32 buf;
1406
1407         ret = usb_autopm_get_interface(dev->intf);
1408         if (ret < 0)
1409                 return ret;
1410
1411         ret = phy_ethtool_get_eee(phydev, edata);
1412         if (ret < 0)
1413                 goto exit;
1414
1415         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1416         if (buf & MAC_CR_EEE_EN_) {
1417                 edata->eee_enabled = true;
1418                 edata->eee_active = !!(edata->advertised &
1419                                        edata->lp_advertised);
1420                 edata->tx_lpi_enabled = true;
1421                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1422                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1423                 edata->tx_lpi_timer = buf;
1424         } else {
1425                 edata->eee_enabled = false;
1426                 edata->eee_active = false;
1427                 edata->tx_lpi_enabled = false;
1428                 edata->tx_lpi_timer = 0;
1429         }
1430
1431         ret = 0;
1432 exit:
1433         usb_autopm_put_interface(dev->intf);
1434
1435         return ret;
1436 }
1437
1438 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1439 {
1440         struct lan78xx_net *dev = netdev_priv(net);
1441         int ret;
1442         u32 buf;
1443
1444         ret = usb_autopm_get_interface(dev->intf);
1445         if (ret < 0)
1446                 return ret;
1447
1448         if (edata->eee_enabled) {
1449                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1450                 buf |= MAC_CR_EEE_EN_;
1451                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1452
1453                 phy_ethtool_set_eee(net->phydev, edata);
1454
1455                 buf = (u32)edata->tx_lpi_timer;
1456                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1457         } else {
1458                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1459                 buf &= ~MAC_CR_EEE_EN_;
1460                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1461         }
1462
1463         usb_autopm_put_interface(dev->intf);
1464
1465         return 0;
1466 }
1467
1468 static u32 lan78xx_get_link(struct net_device *net)
1469 {
1470         phy_read_status(net->phydev);
1471
1472         return net->phydev->link;
1473 }
1474
1475 static void lan78xx_get_drvinfo(struct net_device *net,
1476                                 struct ethtool_drvinfo *info)
1477 {
1478         struct lan78xx_net *dev = netdev_priv(net);
1479
1480         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1481         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1482         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1483 }
1484
1485 static u32 lan78xx_get_msglevel(struct net_device *net)
1486 {
1487         struct lan78xx_net *dev = netdev_priv(net);
1488
1489         return dev->msg_enable;
1490 }
1491
1492 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1493 {
1494         struct lan78xx_net *dev = netdev_priv(net);
1495
1496         dev->msg_enable = level;
1497 }
1498
1499 static int lan78xx_get_link_ksettings(struct net_device *net,
1500                                       struct ethtool_link_ksettings *cmd)
1501 {
1502         struct lan78xx_net *dev = netdev_priv(net);
1503         struct phy_device *phydev = net->phydev;
1504         int ret;
1505
1506         ret = usb_autopm_get_interface(dev->intf);
1507         if (ret < 0)
1508                 return ret;
1509
1510         phy_ethtool_ksettings_get(phydev, cmd);
1511
1512         usb_autopm_put_interface(dev->intf);
1513
1514         return ret;
1515 }
1516
1517 static int lan78xx_set_link_ksettings(struct net_device *net,
1518                                       const struct ethtool_link_ksettings *cmd)
1519 {
1520         struct lan78xx_net *dev = netdev_priv(net);
1521         struct phy_device *phydev = net->phydev;
1522         int ret = 0;
1523         int temp;
1524
1525         ret = usb_autopm_get_interface(dev->intf);
1526         if (ret < 0)
1527                 return ret;
1528
1529         /* change speed & duplex */
1530         ret = phy_ethtool_ksettings_set(phydev, cmd);
1531
1532         if (!cmd->base.autoneg) {
1533                 /* force link down */
1534                 temp = phy_read(phydev, MII_BMCR);
1535                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1536                 mdelay(1);
1537                 phy_write(phydev, MII_BMCR, temp);
1538         }
1539
1540         usb_autopm_put_interface(dev->intf);
1541
1542         return ret;
1543 }
1544
1545 static void lan78xx_get_pause(struct net_device *net,
1546                               struct ethtool_pauseparam *pause)
1547 {
1548         struct lan78xx_net *dev = netdev_priv(net);
1549         struct phy_device *phydev = net->phydev;
1550         struct ethtool_link_ksettings ecmd;
1551
1552         phy_ethtool_ksettings_get(phydev, &ecmd);
1553
1554         pause->autoneg = dev->fc_autoneg;
1555
1556         if (dev->fc_request_control & FLOW_CTRL_TX)
1557                 pause->tx_pause = 1;
1558
1559         if (dev->fc_request_control & FLOW_CTRL_RX)
1560                 pause->rx_pause = 1;
1561 }
1562
1563 static int lan78xx_set_pause(struct net_device *net,
1564                              struct ethtool_pauseparam *pause)
1565 {
1566         struct lan78xx_net *dev = netdev_priv(net);
1567         struct phy_device *phydev = net->phydev;
1568         struct ethtool_link_ksettings ecmd;
1569         int ret;
1570
1571         phy_ethtool_ksettings_get(phydev, &ecmd);
1572
1573         if (pause->autoneg && !ecmd.base.autoneg) {
1574                 ret = -EINVAL;
1575                 goto exit;
1576         }
1577
1578         dev->fc_request_control = 0;
1579         if (pause->rx_pause)
1580                 dev->fc_request_control |= FLOW_CTRL_RX;
1581
1582         if (pause->tx_pause)
1583                 dev->fc_request_control |= FLOW_CTRL_TX;
1584
1585         if (ecmd.base.autoneg) {
1586                 u32 mii_adv;
1587                 u32 advertising;
1588
1589                 ethtool_convert_link_mode_to_legacy_u32(
1590                         &advertising, ecmd.link_modes.advertising);
1591
1592                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1593                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1594                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1595
1596                 ethtool_convert_legacy_u32_to_link_mode(
1597                         ecmd.link_modes.advertising, advertising);
1598
1599                 phy_ethtool_ksettings_set(phydev, &ecmd);
1600         }
1601
1602         dev->fc_autoneg = pause->autoneg;
1603
1604         ret = 0;
1605 exit:
1606         return ret;
1607 }
1608
1609 static const struct ethtool_ops lan78xx_ethtool_ops = {
1610         .get_link       = lan78xx_get_link,
1611         .nway_reset     = phy_ethtool_nway_reset,
1612         .get_drvinfo    = lan78xx_get_drvinfo,
1613         .get_msglevel   = lan78xx_get_msglevel,
1614         .set_msglevel   = lan78xx_set_msglevel,
1615         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1616         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1617         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1618         .get_ethtool_stats = lan78xx_get_stats,
1619         .get_sset_count = lan78xx_get_sset_count,
1620         .get_strings    = lan78xx_get_strings,
1621         .get_wol        = lan78xx_get_wol,
1622         .set_wol        = lan78xx_set_wol,
1623         .get_eee        = lan78xx_get_eee,
1624         .set_eee        = lan78xx_set_eee,
1625         .get_pauseparam = lan78xx_get_pause,
1626         .set_pauseparam = lan78xx_set_pause,
1627         .get_link_ksettings = lan78xx_get_link_ksettings,
1628         .set_link_ksettings = lan78xx_set_link_ksettings,
1629 };
1630
1631 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1632 {
1633         if (!netif_running(netdev))
1634                 return -EINVAL;
1635
1636         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1637 }
1638
1639 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1640 {
1641         u32 addr_lo, addr_hi;
1642         int ret;
1643         u8 addr[6];
1644
1645         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1646         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1647
1648         addr[0] = addr_lo & 0xFF;
1649         addr[1] = (addr_lo >> 8) & 0xFF;
1650         addr[2] = (addr_lo >> 16) & 0xFF;
1651         addr[3] = (addr_lo >> 24) & 0xFF;
1652         addr[4] = addr_hi & 0xFF;
1653         addr[5] = (addr_hi >> 8) & 0xFF;
1654
1655         if (!is_valid_ether_addr(addr)) {
1656                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1657                         /* valid address present in Device Tree */
1658                         netif_dbg(dev, ifup, dev->net,
1659                                   "MAC address read from Device Tree");
1660                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1661                                                  ETH_ALEN, addr) == 0) ||
1662                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1663                                               ETH_ALEN, addr) == 0)) &&
1664                            is_valid_ether_addr(addr)) {
1665                         /* eeprom values are valid so use them */
1666                         netif_dbg(dev, ifup, dev->net,
1667                                   "MAC address read from EEPROM");
1668                 } else {
1669                         /* generate random MAC */
1670                         random_ether_addr(addr);
1671                         netif_dbg(dev, ifup, dev->net,
1672                                   "MAC address set to random addr");
1673                 }
1674
1675                 addr_lo = addr[0] | (addr[1] << 8) |
1676                           (addr[2] << 16) | (addr[3] << 24);
1677                 addr_hi = addr[4] | (addr[5] << 8);
1678
1679                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1680                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1681         }
1682
1683         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1684         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1685
1686         ether_addr_copy(dev->net->dev_addr, addr);
1687 }
1688
1689 /* MDIO read and write wrappers for phylib */
1690 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1691 {
1692         struct lan78xx_net *dev = bus->priv;
1693         u32 val, addr;
1694         int ret;
1695
1696         ret = usb_autopm_get_interface(dev->intf);
1697         if (ret < 0)
1698                 return ret;
1699
1700         mutex_lock(&dev->phy_mutex);
1701
1702         /* confirm MII not busy */
1703         ret = lan78xx_phy_wait_not_busy(dev);
1704         if (ret < 0)
1705                 goto done;
1706
1707         /* set the address, index & direction (read from PHY) */
1708         addr = mii_access(phy_id, idx, MII_READ);
1709         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1710
1711         ret = lan78xx_phy_wait_not_busy(dev);
1712         if (ret < 0)
1713                 goto done;
1714
1715         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1716
1717         ret = (int)(val & 0xFFFF);
1718
1719 done:
1720         mutex_unlock(&dev->phy_mutex);
1721         usb_autopm_put_interface(dev->intf);
1722
1723         return ret;
1724 }
1725
1726 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1727                                  u16 regval)
1728 {
1729         struct lan78xx_net *dev = bus->priv;
1730         u32 val, addr;
1731         int ret;
1732
1733         ret = usb_autopm_get_interface(dev->intf);
1734         if (ret < 0)
1735                 return ret;
1736
1737         mutex_lock(&dev->phy_mutex);
1738
1739         /* confirm MII not busy */
1740         ret = lan78xx_phy_wait_not_busy(dev);
1741         if (ret < 0)
1742                 goto done;
1743
1744         val = (u32)regval;
1745         ret = lan78xx_write_reg(dev, MII_DATA, val);
1746
1747         /* set the address, index & direction (write to PHY) */
1748         addr = mii_access(phy_id, idx, MII_WRITE);
1749         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1750
1751         ret = lan78xx_phy_wait_not_busy(dev);
1752         if (ret < 0)
1753                 goto done;
1754
1755 done:
1756         mutex_unlock(&dev->phy_mutex);
1757         usb_autopm_put_interface(dev->intf);
1758         return 0;
1759 }
1760
1761 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1762 {
1763         int ret;
1764
1765         dev->mdiobus = mdiobus_alloc();
1766         if (!dev->mdiobus) {
1767                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1768                 return -ENOMEM;
1769         }
1770
1771         dev->mdiobus->priv = (void *)dev;
1772         dev->mdiobus->read = lan78xx_mdiobus_read;
1773         dev->mdiobus->write = lan78xx_mdiobus_write;
1774         dev->mdiobus->name = "lan78xx-mdiobus";
1775
1776         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1777                  dev->udev->bus->busnum, dev->udev->devnum);
1778
1779         switch (dev->chipid) {
1780         case ID_REV_CHIP_ID_7800_:
1781         case ID_REV_CHIP_ID_7850_:
1782                 /* set to internal PHY id */
1783                 dev->mdiobus->phy_mask = ~(1 << 1);
1784                 break;
1785         case ID_REV_CHIP_ID_7801_:
1786                 /* scan thru PHYAD[2..0] */
1787                 dev->mdiobus->phy_mask = ~(0xFF);
1788                 break;
1789         }
1790
1791         ret = mdiobus_register(dev->mdiobus);
1792         if (ret) {
1793                 netdev_err(dev->net, "can't register MDIO bus\n");
1794                 goto exit1;
1795         }
1796
1797         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1798         return 0;
1799 exit1:
1800         mdiobus_free(dev->mdiobus);
1801         return ret;
1802 }
1803
1804 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1805 {
1806         mdiobus_unregister(dev->mdiobus);
1807         mdiobus_free(dev->mdiobus);
1808 }
1809
1810 static void lan78xx_link_status_change(struct net_device *net)
1811 {
1812         struct phy_device *phydev = net->phydev;
1813         int ret, temp;
1814
1815         /* At forced 100 F/H mode, chip may fail to set mode correctly
1816          * when cable is switched between long(~50+m) and short one.
1817          * As workaround, set to 10 before setting to 100
1818          * at forced 100 F/H mode.
1819          */
1820         if (!phydev->autoneg && (phydev->speed == 100)) {
1821                 /* disable phy interrupt */
1822                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1823                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1824                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1825
1826                 temp = phy_read(phydev, MII_BMCR);
1827                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1828                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1829                 temp |= BMCR_SPEED100;
1830                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1831
1832                 /* clear pending interrupt generated while workaround */
1833                 temp = phy_read(phydev, LAN88XX_INT_STS);
1834
1835                 /* enable phy interrupt back */
1836                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1837                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1838                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1839         }
1840 }
1841
1842 static int irq_map(struct irq_domain *d, unsigned int irq,
1843                    irq_hw_number_t hwirq)
1844 {
1845         struct irq_domain_data *data = d->host_data;
1846
1847         irq_set_chip_data(irq, data);
1848         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1849         irq_set_noprobe(irq);
1850
1851         return 0;
1852 }
1853
1854 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1855 {
1856         irq_set_chip_and_handler(irq, NULL, NULL);
1857         irq_set_chip_data(irq, NULL);
1858 }
1859
1860 static const struct irq_domain_ops chip_domain_ops = {
1861         .map    = irq_map,
1862         .unmap  = irq_unmap,
1863 };
1864
1865 static void lan78xx_irq_mask(struct irq_data *irqd)
1866 {
1867         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1868
1869         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1870 }
1871
1872 static void lan78xx_irq_unmask(struct irq_data *irqd)
1873 {
1874         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1875
1876         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1877 }
1878
1879 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1880 {
1881         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1882
1883         mutex_lock(&data->irq_lock);
1884 }
1885
1886 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1887 {
1888         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1889         struct lan78xx_net *dev =
1890                         container_of(data, struct lan78xx_net, domain_data);
1891         u32 buf;
1892         int ret;
1893
1894         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1895          * are only two callbacks executed in non-atomic contex.
1896          */
1897         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1898         if (buf != data->irqenable)
1899                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1900
1901         mutex_unlock(&data->irq_lock);
1902 }
1903
1904 static struct irq_chip lan78xx_irqchip = {
1905         .name                   = "lan78xx-irqs",
1906         .irq_mask               = lan78xx_irq_mask,
1907         .irq_unmask             = lan78xx_irq_unmask,
1908         .irq_bus_lock           = lan78xx_irq_bus_lock,
1909         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1910 };
1911
1912 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1913 {
1914         struct device_node *of_node;
1915         struct irq_domain *irqdomain;
1916         unsigned int irqmap = 0;
1917         u32 buf;
1918         int ret = 0;
1919
1920         of_node = dev->udev->dev.parent->of_node;
1921
1922         mutex_init(&dev->domain_data.irq_lock);
1923
1924         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1925         dev->domain_data.irqenable = buf;
1926
1927         dev->domain_data.irqchip = &lan78xx_irqchip;
1928         dev->domain_data.irq_handler = handle_simple_irq;
1929
1930         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1931                                           &chip_domain_ops, &dev->domain_data);
1932         if (irqdomain) {
1933                 /* create mapping for PHY interrupt */
1934                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1935                 if (!irqmap) {
1936                         irq_domain_remove(irqdomain);
1937
1938                         irqdomain = NULL;
1939                         ret = -EINVAL;
1940                 }
1941         } else {
1942                 ret = -EINVAL;
1943         }
1944
1945         dev->domain_data.irqdomain = irqdomain;
1946         dev->domain_data.phyirq = irqmap;
1947
1948         return ret;
1949 }
1950
1951 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1952 {
1953         if (dev->domain_data.phyirq > 0) {
1954                 irq_dispose_mapping(dev->domain_data.phyirq);
1955
1956                 if (dev->domain_data.irqdomain)
1957                         irq_domain_remove(dev->domain_data.irqdomain);
1958         }
1959         dev->domain_data.phyirq = 0;
1960         dev->domain_data.irqdomain = NULL;
1961 }
1962
1963 static int lan8835_fixup(struct phy_device *phydev)
1964 {
1965         int buf;
1966         int ret;
1967         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1968
1969         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1970         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1971         buf &= ~0x1800;
1972         buf |= 0x0800;
1973         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1974
1975         /* RGMII MAC TXC Delay Enable */
1976         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1977                                 MAC_RGMII_ID_TXC_DELAY_EN_);
1978
1979         /* RGMII TX DLL Tune Adjust */
1980         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1981
1982         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1983
1984         return 1;
1985 }
1986
1987 static int ksz9031rnx_fixup(struct phy_device *phydev)
1988 {
1989         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1990
1991         /* Micrel9301RNX PHY configuration */
1992         /* RGMII Control Signal Pad Skew */
1993         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1994         /* RGMII RX Data Pad Skew */
1995         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1996         /* RGMII RX Clock Pad Skew */
1997         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1998
1999         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2000
2001         return 1;
2002 }
2003
2004 static int lan78xx_phy_init(struct lan78xx_net *dev)
2005 {
2006         int ret;
2007         u32 mii_adv;
2008         struct phy_device *phydev;
2009
2010         phydev = phy_find_first(dev->mdiobus);
2011         if (!phydev) {
2012                 netdev_err(dev->net, "no PHY found\n");
2013                 return -EIO;
2014         }
2015
2016         if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2017             (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2018                 phydev->is_internal = true;
2019                 dev->interface = PHY_INTERFACE_MODE_GMII;
2020
2021         } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2022                 if (!phydev->drv) {
2023                         netdev_err(dev->net, "no PHY driver found\n");
2024                         return -EIO;
2025                 }
2026
2027                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2028
2029                 /* external PHY fixup for KSZ9031RNX */
2030                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2031                                                  ksz9031rnx_fixup);
2032                 if (ret < 0) {
2033                         netdev_err(dev->net, "fail to register fixup\n");
2034                         return ret;
2035                 }
2036                 /* external PHY fixup for LAN8835 */
2037                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2038                                                  lan8835_fixup);
2039                 if (ret < 0) {
2040                         netdev_err(dev->net, "fail to register fixup\n");
2041                         return ret;
2042                 }
2043                 /* add more external PHY fixup here if needed */
2044
2045                 phydev->is_internal = false;
2046         } else {
2047                 netdev_err(dev->net, "unknown ID found\n");
2048                 ret = -EIO;
2049                 goto error;
2050         }
2051
2052         /* if phyirq is not set, use polling mode in phylib */
2053         if (dev->domain_data.phyirq > 0)
2054                 phydev->irq = dev->domain_data.phyirq;
2055         else
2056                 phydev->irq = 0;
2057         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2058
2059         /* set to AUTOMDIX */
2060         phydev->mdix = ETH_TP_MDI_AUTO;
2061
2062         ret = phy_connect_direct(dev->net, phydev,
2063                                  lan78xx_link_status_change,
2064                                  dev->interface);
2065         if (ret) {
2066                 netdev_err(dev->net, "can't attach PHY to %s\n",
2067                            dev->mdiobus->id);
2068                 return -EIO;
2069         }
2070
2071         /* MAC doesn't support 1000T Half */
2072         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2073
2074         /* support both flow controls */
2075         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2076         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2077         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2078         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2079
2080         genphy_config_aneg(phydev);
2081
2082         dev->fc_autoneg = phydev->autoneg;
2083
2084         return 0;
2085
2086 error:
2087         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2088         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2089
2090         return ret;
2091 }
2092
2093 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2094 {
2095         int ret = 0;
2096         u32 buf;
2097         bool rxenabled;
2098
2099         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2100
2101         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2102
2103         if (rxenabled) {
2104                 buf &= ~MAC_RX_RXEN_;
2105                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2106         }
2107
2108         /* add 4 to size for FCS */
2109         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2110         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2111
2112         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2113
2114         if (rxenabled) {
2115                 buf |= MAC_RX_RXEN_;
2116                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2117         }
2118
2119         return 0;
2120 }
2121
2122 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2123 {
2124         struct sk_buff *skb;
2125         unsigned long flags;
2126         int count = 0;
2127
2128         spin_lock_irqsave(&q->lock, flags);
2129         while (!skb_queue_empty(q)) {
2130                 struct skb_data *entry;
2131                 struct urb *urb;
2132                 int ret;
2133
2134                 skb_queue_walk(q, skb) {
2135                         entry = (struct skb_data *)skb->cb;
2136                         if (entry->state != unlink_start)
2137                                 goto found;
2138                 }
2139                 break;
2140 found:
2141                 entry->state = unlink_start;
2142                 urb = entry->urb;
2143
2144                 /* Get reference count of the URB to avoid it to be
2145                  * freed during usb_unlink_urb, which may trigger
2146                  * use-after-free problem inside usb_unlink_urb since
2147                  * usb_unlink_urb is always racing with .complete
2148                  * handler(include defer_bh).
2149                  */
2150                 usb_get_urb(urb);
2151                 spin_unlock_irqrestore(&q->lock, flags);
2152                 /* during some PM-driven resume scenarios,
2153                  * these (async) unlinks complete immediately
2154                  */
2155                 ret = usb_unlink_urb(urb);
2156                 if (ret != -EINPROGRESS && ret != 0)
2157                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2158                 else
2159                         count++;
2160                 usb_put_urb(urb);
2161                 spin_lock_irqsave(&q->lock, flags);
2162         }
2163         spin_unlock_irqrestore(&q->lock, flags);
2164         return count;
2165 }
2166
2167 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2168 {
2169         struct lan78xx_net *dev = netdev_priv(netdev);
2170         int ll_mtu = new_mtu + netdev->hard_header_len;
2171         int old_hard_mtu = dev->hard_mtu;
2172         int old_rx_urb_size = dev->rx_urb_size;
2173         int ret;
2174
2175         /* no second zero-length packet read wanted after mtu-sized packets */
2176         if ((ll_mtu % dev->maxpacket) == 0)
2177                 return -EDOM;
2178
2179         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2180
2181         netdev->mtu = new_mtu;
2182
2183         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2184         if (dev->rx_urb_size == old_hard_mtu) {
2185                 dev->rx_urb_size = dev->hard_mtu;
2186                 if (dev->rx_urb_size > old_rx_urb_size) {
2187                         if (netif_running(dev->net)) {
2188                                 unlink_urbs(dev, &dev->rxq);
2189                                 tasklet_schedule(&dev->bh);
2190                         }
2191                 }
2192         }
2193
2194         return 0;
2195 }
2196
2197 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2198 {
2199         struct lan78xx_net *dev = netdev_priv(netdev);
2200         struct sockaddr *addr = p;
2201         u32 addr_lo, addr_hi;
2202         int ret;
2203
2204         if (netif_running(netdev))
2205                 return -EBUSY;
2206
2207         if (!is_valid_ether_addr(addr->sa_data))
2208                 return -EADDRNOTAVAIL;
2209
2210         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2211
2212         addr_lo = netdev->dev_addr[0] |
2213                   netdev->dev_addr[1] << 8 |
2214                   netdev->dev_addr[2] << 16 |
2215                   netdev->dev_addr[3] << 24;
2216         addr_hi = netdev->dev_addr[4] |
2217                   netdev->dev_addr[5] << 8;
2218
2219         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2220         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2221
2222         return 0;
2223 }
2224
2225 /* Enable or disable Rx checksum offload engine */
2226 static int lan78xx_set_features(struct net_device *netdev,
2227                                 netdev_features_t features)
2228 {
2229         struct lan78xx_net *dev = netdev_priv(netdev);
2230         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2231         unsigned long flags;
2232         int ret;
2233
2234         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2235
2236         if (features & NETIF_F_RXCSUM) {
2237                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2238                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2239         } else {
2240                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2241                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2242         }
2243
2244         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2245                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2246         else
2247                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2248
2249         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2250
2251         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2252
2253         return 0;
2254 }
2255
2256 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2257 {
2258         struct lan78xx_priv *pdata =
2259                         container_of(param, struct lan78xx_priv, set_vlan);
2260         struct lan78xx_net *dev = pdata->dev;
2261
2262         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2263                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2264 }
2265
2266 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2267                                    __be16 proto, u16 vid)
2268 {
2269         struct lan78xx_net *dev = netdev_priv(netdev);
2270         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2271         u16 vid_bit_index;
2272         u16 vid_dword_index;
2273
2274         vid_dword_index = (vid >> 5) & 0x7F;
2275         vid_bit_index = vid & 0x1F;
2276
2277         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2278
2279         /* defer register writes to a sleepable context */
2280         schedule_work(&pdata->set_vlan);
2281
2282         return 0;
2283 }
2284
2285 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2286                                     __be16 proto, u16 vid)
2287 {
2288         struct lan78xx_net *dev = netdev_priv(netdev);
2289         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2290         u16 vid_bit_index;
2291         u16 vid_dword_index;
2292
2293         vid_dword_index = (vid >> 5) & 0x7F;
2294         vid_bit_index = vid & 0x1F;
2295
2296         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2297
2298         /* defer register writes to a sleepable context */
2299         schedule_work(&pdata->set_vlan);
2300
2301         return 0;
2302 }
2303
2304 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2305 {
2306         int ret;
2307         u32 buf;
2308         u32 regs[6] = { 0 };
2309
2310         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2311         if (buf & USB_CFG1_LTM_ENABLE_) {
2312                 u8 temp[2];
2313                 /* Get values from EEPROM first */
2314                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2315                         if (temp[0] == 24) {
2316                                 ret = lan78xx_read_raw_eeprom(dev,
2317                                                               temp[1] * 2,
2318                                                               24,
2319                                                               (u8 *)regs);
2320                                 if (ret < 0)
2321                                         return;
2322                         }
2323                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2324                         if (temp[0] == 24) {
2325                                 ret = lan78xx_read_raw_otp(dev,
2326                                                            temp[1] * 2,
2327                                                            24,
2328                                                            (u8 *)regs);
2329                                 if (ret < 0)
2330                                         return;
2331                         }
2332                 }
2333         }
2334
2335         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2336         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2337         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2338         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2339         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2340         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2341 }
2342
2343 static int lan78xx_reset(struct lan78xx_net *dev)
2344 {
2345         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2346         u32 buf;
2347         int ret = 0;
2348         unsigned long timeout;
2349         u8 sig;
2350
2351         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2352         buf |= HW_CFG_LRST_;
2353         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2354
2355         timeout = jiffies + HZ;
2356         do {
2357                 mdelay(1);
2358                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2359                 if (time_after(jiffies, timeout)) {
2360                         netdev_warn(dev->net,
2361                                     "timeout on completion of LiteReset");
2362                         return -EIO;
2363                 }
2364         } while (buf & HW_CFG_LRST_);
2365
2366         lan78xx_init_mac_address(dev);
2367
2368         /* save DEVID for later usage */
2369         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2370         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2371         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2372
2373         /* Respond to the IN token with a NAK */
2374         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2375         buf |= USB_CFG_BIR_;
2376         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2377
2378         /* Init LTM */
2379         lan78xx_init_ltm(dev);
2380
2381         if (dev->udev->speed == USB_SPEED_SUPER) {
2382                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2383                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2384                 dev->rx_qlen = 4;
2385                 dev->tx_qlen = 4;
2386         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2387                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2388                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2389                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2390                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2391         } else {
2392                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2393                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2394                 dev->rx_qlen = 4;
2395                 dev->tx_qlen = 4;
2396         }
2397
2398         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2399         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2400
2401         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2402         buf |= HW_CFG_MEF_;
2403         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2404
2405         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2406         buf |= USB_CFG_BCE_;
2407         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2408
2409         /* set FIFO sizes */
2410         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2411         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2412
2413         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2414         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2415
2416         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2417         ret = lan78xx_write_reg(dev, FLOW, 0);
2418         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2419
2420         /* Don't need rfe_ctl_lock during initialisation */
2421         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2422         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2423         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2424
2425         /* Enable or disable checksum offload engines */
2426         lan78xx_set_features(dev->net, dev->net->features);
2427
2428         lan78xx_set_multicast(dev->net);
2429
2430         /* reset PHY */
2431         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2432         buf |= PMT_CTL_PHY_RST_;
2433         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2434
2435         timeout = jiffies + HZ;
2436         do {
2437                 mdelay(1);
2438                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2439                 if (time_after(jiffies, timeout)) {
2440                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2441                         return -EIO;
2442                 }
2443         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2444
2445         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2446         /* LAN7801 only has RGMII mode */
2447         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2448                 buf &= ~MAC_CR_GMII_EN_;
2449
2450         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2451                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2452                 if (!ret && sig != EEPROM_INDICATOR) {
2453                         /* Implies there is no external eeprom. Set mac speed */
2454                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2455                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2456                 }
2457         }
2458         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2459
2460         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2461         buf |= MAC_TX_TXEN_;
2462         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2463
2464         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2465         buf |= FCT_TX_CTL_EN_;
2466         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2467
2468         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2469
2470         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2471         buf |= MAC_RX_RXEN_;
2472         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2473
2474         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2475         buf |= FCT_RX_CTL_EN_;
2476         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2477
2478         return 0;
2479 }
2480
2481 static void lan78xx_init_stats(struct lan78xx_net *dev)
2482 {
2483         u32 *p;
2484         int i;
2485
2486         /* initialize for stats update
2487          * some counters are 20bits and some are 32bits
2488          */
2489         p = (u32 *)&dev->stats.rollover_max;
2490         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2491                 p[i] = 0xFFFFF;
2492
2493         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2494         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2495         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2496         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2497         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2498         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2499         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2500         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2501         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2502         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2503
2504         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2505 }
2506
2507 static int lan78xx_open(struct net_device *net)
2508 {
2509         struct lan78xx_net *dev = netdev_priv(net);
2510         int ret;
2511
2512         ret = usb_autopm_get_interface(dev->intf);
2513         if (ret < 0)
2514                 goto out;
2515
2516         phy_start(net->phydev);
2517
2518         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2519
2520         /* for Link Check */
2521         if (dev->urb_intr) {
2522                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2523                 if (ret < 0) {
2524                         netif_err(dev, ifup, dev->net,
2525                                   "intr submit %d\n", ret);
2526                         goto done;
2527                 }
2528         }
2529
2530         lan78xx_init_stats(dev);
2531
2532         set_bit(EVENT_DEV_OPEN, &dev->flags);
2533
2534         netif_start_queue(net);
2535
2536         dev->link_on = false;
2537
2538         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2539 done:
2540         usb_autopm_put_interface(dev->intf);
2541
2542 out:
2543         return ret;
2544 }
2545
2546 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2547 {
2548         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2549         DECLARE_WAITQUEUE(wait, current);
2550         int temp;
2551
2552         /* ensure there are no more active urbs */
2553         add_wait_queue(&unlink_wakeup, &wait);
2554         set_current_state(TASK_UNINTERRUPTIBLE);
2555         dev->wait = &unlink_wakeup;
2556         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2557
2558         /* maybe wait for deletions to finish. */
2559         while (!skb_queue_empty(&dev->rxq) &&
2560                !skb_queue_empty(&dev->txq) &&
2561                !skb_queue_empty(&dev->done)) {
2562                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2563                 set_current_state(TASK_UNINTERRUPTIBLE);
2564                 netif_dbg(dev, ifdown, dev->net,
2565                           "waited for %d urb completions\n", temp);
2566         }
2567         set_current_state(TASK_RUNNING);
2568         dev->wait = NULL;
2569         remove_wait_queue(&unlink_wakeup, &wait);
2570 }
2571
2572 static int lan78xx_stop(struct net_device *net)
2573 {
2574         struct lan78xx_net              *dev = netdev_priv(net);
2575
2576         if (timer_pending(&dev->stat_monitor))
2577                 del_timer_sync(&dev->stat_monitor);
2578
2579         if (net->phydev)
2580                 phy_stop(net->phydev);
2581
2582         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2583         netif_stop_queue(net);
2584
2585         netif_info(dev, ifdown, dev->net,
2586                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2587                    net->stats.rx_packets, net->stats.tx_packets,
2588                    net->stats.rx_errors, net->stats.tx_errors);
2589
2590         lan78xx_terminate_urbs(dev);
2591
2592         usb_kill_urb(dev->urb_intr);
2593
2594         skb_queue_purge(&dev->rxq_pause);
2595
2596         /* deferred work (task, timer, softirq) must also stop.
2597          * can't flush_scheduled_work() until we drop rtnl (later),
2598          * else workers could deadlock; so make workers a NOP.
2599          */
2600         dev->flags = 0;
2601         cancel_delayed_work_sync(&dev->wq);
2602         tasklet_kill(&dev->bh);
2603
2604         usb_autopm_put_interface(dev->intf);
2605
2606         return 0;
2607 }
2608
2609 static int lan78xx_linearize(struct sk_buff *skb)
2610 {
2611         return skb_linearize(skb);
2612 }
2613
2614 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2615                                        struct sk_buff *skb, gfp_t flags)
2616 {
2617         u32 tx_cmd_a, tx_cmd_b;
2618
2619         if (skb_cow_head(skb, TX_OVERHEAD)) {
2620                 dev_kfree_skb_any(skb);
2621                 return NULL;
2622         }
2623
2624         if (lan78xx_linearize(skb) < 0)
2625                 return NULL;
2626
2627         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2628
2629         if (skb->ip_summed == CHECKSUM_PARTIAL)
2630                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2631
2632         tx_cmd_b = 0;
2633         if (skb_is_gso(skb)) {
2634                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2635
2636                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2637
2638                 tx_cmd_a |= TX_CMD_A_LSO_;
2639         }
2640
2641         if (skb_vlan_tag_present(skb)) {
2642                 tx_cmd_a |= TX_CMD_A_IVTG_;
2643                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2644         }
2645
2646         skb_push(skb, 4);
2647         cpu_to_le32s(&tx_cmd_b);
2648         memcpy(skb->data, &tx_cmd_b, 4);
2649
2650         skb_push(skb, 4);
2651         cpu_to_le32s(&tx_cmd_a);
2652         memcpy(skb->data, &tx_cmd_a, 4);
2653
2654         return skb;
2655 }
2656
2657 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2658                                struct sk_buff_head *list, enum skb_state state)
2659 {
2660         unsigned long flags;
2661         enum skb_state old_state;
2662         struct skb_data *entry = (struct skb_data *)skb->cb;
2663
2664         spin_lock_irqsave(&list->lock, flags);
2665         old_state = entry->state;
2666         entry->state = state;
2667
2668         __skb_unlink(skb, list);
2669         spin_unlock(&list->lock);
2670         spin_lock(&dev->done.lock);
2671
2672         __skb_queue_tail(&dev->done, skb);
2673         if (skb_queue_len(&dev->done) == 1)
2674                 tasklet_schedule(&dev->bh);
2675         spin_unlock_irqrestore(&dev->done.lock, flags);
2676
2677         return old_state;
2678 }
2679
2680 static void tx_complete(struct urb *urb)
2681 {
2682         struct sk_buff *skb = (struct sk_buff *)urb->context;
2683         struct skb_data *entry = (struct skb_data *)skb->cb;
2684         struct lan78xx_net *dev = entry->dev;
2685
2686         if (urb->status == 0) {
2687                 dev->net->stats.tx_packets += entry->num_of_packet;
2688                 dev->net->stats.tx_bytes += entry->length;
2689         } else {
2690                 dev->net->stats.tx_errors++;
2691
2692                 switch (urb->status) {
2693                 case -EPIPE:
2694                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2695                         break;
2696
2697                 /* software-driven interface shutdown */
2698                 case -ECONNRESET:
2699                 case -ESHUTDOWN:
2700                         break;
2701
2702                 case -EPROTO:
2703                 case -ETIME:
2704                 case -EILSEQ:
2705                         netif_stop_queue(dev->net);
2706                         break;
2707                 default:
2708                         netif_dbg(dev, tx_err, dev->net,
2709                                   "tx err %d\n", entry->urb->status);
2710                         break;
2711                 }
2712         }
2713
2714         usb_autopm_put_interface_async(dev->intf);
2715
2716         defer_bh(dev, skb, &dev->txq, tx_done);
2717 }
2718
2719 static void lan78xx_queue_skb(struct sk_buff_head *list,
2720                               struct sk_buff *newsk, enum skb_state state)
2721 {
2722         struct skb_data *entry = (struct skb_data *)newsk->cb;
2723
2724         __skb_queue_tail(list, newsk);
2725         entry->state = state;
2726 }
2727
2728 static netdev_tx_t
2729 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2730 {
2731         struct lan78xx_net *dev = netdev_priv(net);
2732         struct sk_buff *skb2 = NULL;
2733
2734         if (skb) {
2735                 skb_tx_timestamp(skb);
2736                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2737         }
2738
2739         if (skb2) {
2740                 skb_queue_tail(&dev->txq_pend, skb2);
2741
2742                 /* throttle TX patch at slower than SUPER SPEED USB */
2743                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2744                     (skb_queue_len(&dev->txq_pend) > 10))
2745                         netif_stop_queue(net);
2746         } else {
2747                 netif_dbg(dev, tx_err, dev->net,
2748                           "lan78xx_tx_prep return NULL\n");
2749                 dev->net->stats.tx_errors++;
2750                 dev->net->stats.tx_dropped++;
2751         }
2752
2753         tasklet_schedule(&dev->bh);
2754
2755         return NETDEV_TX_OK;
2756 }
2757
2758 static int
2759 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2760 {
2761         int tmp;
2762         struct usb_host_interface *alt = NULL;
2763         struct usb_host_endpoint *in = NULL, *out = NULL;
2764         struct usb_host_endpoint *status = NULL;
2765
2766         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2767                 unsigned ep;
2768
2769                 in = NULL;
2770                 out = NULL;
2771                 status = NULL;
2772                 alt = intf->altsetting + tmp;
2773
2774                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2775                         struct usb_host_endpoint *e;
2776                         int intr = 0;
2777
2778                         e = alt->endpoint + ep;
2779                         switch (e->desc.bmAttributes) {
2780                         case USB_ENDPOINT_XFER_INT:
2781                                 if (!usb_endpoint_dir_in(&e->desc))
2782                                         continue;
2783                                 intr = 1;
2784                                 /* FALLTHROUGH */
2785                         case USB_ENDPOINT_XFER_BULK:
2786                                 break;
2787                         default:
2788                                 continue;
2789                         }
2790                         if (usb_endpoint_dir_in(&e->desc)) {
2791                                 if (!intr && !in)
2792                                         in = e;
2793                                 else if (intr && !status)
2794                                         status = e;
2795                         } else {
2796                                 if (!out)
2797                                         out = e;
2798                         }
2799                 }
2800                 if (in && out)
2801                         break;
2802         }
2803         if (!alt || !in || !out)
2804                 return -EINVAL;
2805
2806         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2807                                        in->desc.bEndpointAddress &
2808                                        USB_ENDPOINT_NUMBER_MASK);
2809         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2810                                         out->desc.bEndpointAddress &
2811                                         USB_ENDPOINT_NUMBER_MASK);
2812         dev->ep_intr = status;
2813
2814         return 0;
2815 }
2816
2817 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2818 {
2819         struct lan78xx_priv *pdata = NULL;
2820         int ret;
2821         int i;
2822
2823         ret = lan78xx_get_endpoints(dev, intf);
2824
2825         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2826
2827         pdata = (struct lan78xx_priv *)(dev->data[0]);
2828         if (!pdata) {
2829                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2830                 return -ENOMEM;
2831         }
2832
2833         pdata->dev = dev;
2834
2835         spin_lock_init(&pdata->rfe_ctl_lock);
2836         mutex_init(&pdata->dataport_mutex);
2837
2838         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2839
2840         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2841                 pdata->vlan_table[i] = 0;
2842
2843         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2844
2845         dev->net->features = 0;
2846
2847         if (DEFAULT_TX_CSUM_ENABLE)
2848                 dev->net->features |= NETIF_F_HW_CSUM;
2849
2850         if (DEFAULT_RX_CSUM_ENABLE)
2851                 dev->net->features |= NETIF_F_RXCSUM;
2852
2853         if (DEFAULT_TSO_CSUM_ENABLE)
2854                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2855
2856         dev->net->hw_features = dev->net->features;
2857
2858         ret = lan78xx_setup_irq_domain(dev);
2859         if (ret < 0) {
2860                 netdev_warn(dev->net,
2861                             "lan78xx_setup_irq_domain() failed : %d", ret);
2862                 goto out1;
2863         }
2864
2865         dev->net->hard_header_len += TX_OVERHEAD;
2866         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2867
2868         /* Init all registers */
2869         ret = lan78xx_reset(dev);
2870         if (ret) {
2871                 netdev_warn(dev->net, "Registers INIT FAILED....");
2872                 goto out2;
2873         }
2874
2875         ret = lan78xx_mdio_init(dev);
2876         if (ret) {
2877                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2878                 goto out2;
2879         }
2880
2881         dev->net->flags |= IFF_MULTICAST;
2882
2883         pdata->wol = WAKE_MAGIC;
2884
2885         return ret;
2886
2887 out2:
2888         lan78xx_remove_irq_domain(dev);
2889
2890 out1:
2891         netdev_warn(dev->net, "Bind routine FAILED");
2892         cancel_work_sync(&pdata->set_multicast);
2893         cancel_work_sync(&pdata->set_vlan);
2894         kfree(pdata);
2895         return ret;
2896 }
2897
2898 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2899 {
2900         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2901
2902         lan78xx_remove_irq_domain(dev);
2903
2904         lan78xx_remove_mdio(dev);
2905
2906         if (pdata) {
2907                 cancel_work_sync(&pdata->set_multicast);
2908                 cancel_work_sync(&pdata->set_vlan);
2909                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2910                 kfree(pdata);
2911                 pdata = NULL;
2912                 dev->data[0] = 0;
2913         }
2914 }
2915
2916 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2917                                     struct sk_buff *skb,
2918                                     u32 rx_cmd_a, u32 rx_cmd_b)
2919 {
2920         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2921             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2922                 skb->ip_summed = CHECKSUM_NONE;
2923         } else {
2924                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2925                 skb->ip_summed = CHECKSUM_COMPLETE;
2926         }
2927 }
2928
2929 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2930 {
2931         int             status;
2932
2933         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2934                 skb_queue_tail(&dev->rxq_pause, skb);
2935                 return;
2936         }
2937
2938         dev->net->stats.rx_packets++;
2939         dev->net->stats.rx_bytes += skb->len;
2940
2941         skb->protocol = eth_type_trans(skb, dev->net);
2942
2943         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2944                   skb->len + sizeof(struct ethhdr), skb->protocol);
2945         memset(skb->cb, 0, sizeof(struct skb_data));
2946
2947         if (skb_defer_rx_timestamp(skb))
2948                 return;
2949
2950         status = netif_rx(skb);
2951         if (status != NET_RX_SUCCESS)
2952                 netif_dbg(dev, rx_err, dev->net,
2953                           "netif_rx status %d\n", status);
2954 }
2955
2956 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2957 {
2958         if (skb->len < dev->net->hard_header_len)
2959                 return 0;
2960
2961         while (skb->len > 0) {
2962                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2963                 u16 rx_cmd_c;
2964                 struct sk_buff *skb2;
2965                 unsigned char *packet;
2966
2967                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2968                 le32_to_cpus(&rx_cmd_a);
2969                 skb_pull(skb, sizeof(rx_cmd_a));
2970
2971                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2972                 le32_to_cpus(&rx_cmd_b);
2973                 skb_pull(skb, sizeof(rx_cmd_b));
2974
2975                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2976                 le16_to_cpus(&rx_cmd_c);
2977                 skb_pull(skb, sizeof(rx_cmd_c));
2978
2979                 packet = skb->data;
2980
2981                 /* get the packet length */
2982                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2983                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2984
2985                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2986                         netif_dbg(dev, rx_err, dev->net,
2987                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2988                 } else {
2989                         /* last frame in this batch */
2990                         if (skb->len == size) {
2991                                 lan78xx_rx_csum_offload(dev, skb,
2992                                                         rx_cmd_a, rx_cmd_b);
2993
2994                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2995                                 skb->truesize = size + sizeof(struct sk_buff);
2996
2997                                 return 1;
2998                         }
2999
3000                         skb2 = skb_clone(skb, GFP_ATOMIC);
3001                         if (unlikely(!skb2)) {
3002                                 netdev_warn(dev->net, "Error allocating skb");
3003                                 return 0;
3004                         }
3005
3006                         skb2->len = size;
3007                         skb2->data = packet;
3008                         skb_set_tail_pointer(skb2, size);
3009
3010                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3011
3012                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3013                         skb2->truesize = size + sizeof(struct sk_buff);
3014
3015                         lan78xx_skb_return(dev, skb2);
3016                 }
3017
3018                 skb_pull(skb, size);
3019
3020                 /* padding bytes before the next frame starts */
3021                 if (skb->len)
3022                         skb_pull(skb, align_count);
3023         }
3024
3025         return 1;
3026 }
3027
3028 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3029 {
3030         if (!lan78xx_rx(dev, skb)) {
3031                 dev->net->stats.rx_errors++;
3032                 goto done;
3033         }
3034
3035         if (skb->len) {
3036                 lan78xx_skb_return(dev, skb);
3037                 return;
3038         }
3039
3040         netif_dbg(dev, rx_err, dev->net, "drop\n");
3041         dev->net->stats.rx_errors++;
3042 done:
3043         skb_queue_tail(&dev->done, skb);
3044 }
3045
3046 static void rx_complete(struct urb *urb);
3047
3048 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3049 {
3050         struct sk_buff *skb;
3051         struct skb_data *entry;
3052         unsigned long lockflags;
3053         size_t size = dev->rx_urb_size;
3054         int ret = 0;
3055
3056         skb = netdev_alloc_skb_ip_align(dev->net, size);
3057         if (!skb) {
3058                 usb_free_urb(urb);
3059                 return -ENOMEM;
3060         }
3061
3062         entry = (struct skb_data *)skb->cb;
3063         entry->urb = urb;
3064         entry->dev = dev;
3065         entry->length = 0;
3066
3067         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3068                           skb->data, size, rx_complete, skb);
3069
3070         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3071
3072         if (netif_device_present(dev->net) &&
3073             netif_running(dev->net) &&
3074             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3075             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3076                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3077                 switch (ret) {
3078                 case 0:
3079                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3080                         break;
3081                 case -EPIPE:
3082                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3083                         break;
3084                 case -ENODEV:
3085                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3086                         netif_device_detach(dev->net);
3087                         break;
3088                 case -EHOSTUNREACH:
3089                         ret = -ENOLINK;
3090                         break;
3091                 default:
3092                         netif_dbg(dev, rx_err, dev->net,
3093                                   "rx submit, %d\n", ret);
3094                         tasklet_schedule(&dev->bh);
3095                 }
3096         } else {
3097                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3098                 ret = -ENOLINK;
3099         }
3100         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3101         if (ret) {
3102                 dev_kfree_skb_any(skb);
3103                 usb_free_urb(urb);
3104         }
3105         return ret;
3106 }
3107
3108 static void rx_complete(struct urb *urb)
3109 {
3110         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3111         struct skb_data *entry = (struct skb_data *)skb->cb;
3112         struct lan78xx_net *dev = entry->dev;
3113         int urb_status = urb->status;
3114         enum skb_state state;
3115
3116         skb_put(skb, urb->actual_length);
3117         state = rx_done;
3118         entry->urb = NULL;
3119
3120         switch (urb_status) {
3121         case 0:
3122                 if (skb->len < dev->net->hard_header_len) {
3123                         state = rx_cleanup;
3124                         dev->net->stats.rx_errors++;
3125                         dev->net->stats.rx_length_errors++;
3126                         netif_dbg(dev, rx_err, dev->net,
3127                                   "rx length %d\n", skb->len);
3128                 }
3129                 usb_mark_last_busy(dev->udev);
3130                 break;
3131         case -EPIPE:
3132                 dev->net->stats.rx_errors++;
3133                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3134                 /* FALLTHROUGH */
3135         case -ECONNRESET:                               /* async unlink */
3136         case -ESHUTDOWN:                                /* hardware gone */
3137                 netif_dbg(dev, ifdown, dev->net,
3138                           "rx shutdown, code %d\n", urb_status);
3139                 state = rx_cleanup;
3140                 entry->urb = urb;
3141                 urb = NULL;
3142                 break;
3143         case -EPROTO:
3144         case -ETIME:
3145         case -EILSEQ:
3146                 dev->net->stats.rx_errors++;
3147                 state = rx_cleanup;
3148                 entry->urb = urb;
3149                 urb = NULL;
3150                 break;
3151
3152         /* data overrun ... flush fifo? */
3153         case -EOVERFLOW:
3154                 dev->net->stats.rx_over_errors++;
3155                 /* FALLTHROUGH */
3156
3157         default:
3158                 state = rx_cleanup;
3159                 dev->net->stats.rx_errors++;
3160                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3161                 break;
3162         }
3163
3164         state = defer_bh(dev, skb, &dev->rxq, state);
3165
3166         if (urb) {
3167                 if (netif_running(dev->net) &&
3168                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3169                     state != unlink_start) {
3170                         rx_submit(dev, urb, GFP_ATOMIC);
3171                         return;
3172                 }
3173                 usb_free_urb(urb);
3174         }
3175         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3176 }
3177
3178 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3179 {
3180         int length;
3181         struct urb *urb = NULL;
3182         struct skb_data *entry;
3183         unsigned long flags;
3184         struct sk_buff_head *tqp = &dev->txq_pend;
3185         struct sk_buff *skb, *skb2;
3186         int ret;
3187         int count, pos;
3188         int skb_totallen, pkt_cnt;
3189
3190         skb_totallen = 0;
3191         pkt_cnt = 0;
3192         count = 0;
3193         length = 0;
3194         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3195                 if (skb_is_gso(skb)) {
3196                         if (pkt_cnt) {
3197                                 /* handle previous packets first */
3198                                 break;
3199                         }
3200                         count = 1;
3201                         length = skb->len - TX_OVERHEAD;
3202                         skb2 = skb_dequeue(tqp);
3203                         goto gso_skb;
3204                 }
3205
3206                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3207                         break;
3208                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3209                 pkt_cnt++;
3210         }
3211
3212         /* copy to a single skb */
3213         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3214         if (!skb)
3215                 goto drop;
3216
3217         skb_put(skb, skb_totallen);
3218
3219         for (count = pos = 0; count < pkt_cnt; count++) {
3220                 skb2 = skb_dequeue(tqp);
3221                 if (skb2) {
3222                         length += (skb2->len - TX_OVERHEAD);
3223                         memcpy(skb->data + pos, skb2->data, skb2->len);
3224                         pos += roundup(skb2->len, sizeof(u32));
3225                         dev_kfree_skb(skb2);
3226                 }
3227         }
3228
3229 gso_skb:
3230         urb = usb_alloc_urb(0, GFP_ATOMIC);
3231         if (!urb)
3232                 goto drop;
3233
3234         entry = (struct skb_data *)skb->cb;
3235         entry->urb = urb;
3236         entry->dev = dev;
3237         entry->length = length;
3238         entry->num_of_packet = count;
3239
3240         spin_lock_irqsave(&dev->txq.lock, flags);
3241         ret = usb_autopm_get_interface_async(dev->intf);
3242         if (ret < 0) {
3243                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3244                 goto drop;
3245         }
3246
3247         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3248                           skb->data, skb->len, tx_complete, skb);
3249
3250         if (length % dev->maxpacket == 0) {
3251                 /* send USB_ZERO_PACKET */
3252                 urb->transfer_flags |= URB_ZERO_PACKET;
3253         }
3254
3255 #ifdef CONFIG_PM
3256         /* if this triggers the device is still a sleep */
3257         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3258                 /* transmission will be done in resume */
3259                 usb_anchor_urb(urb, &dev->deferred);
3260                 /* no use to process more packets */
3261                 netif_stop_queue(dev->net);
3262                 usb_put_urb(urb);
3263                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3264                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3265                 return;
3266         }
3267 #endif
3268
3269         ret = usb_submit_urb(urb, GFP_ATOMIC);
3270         switch (ret) {
3271         case 0:
3272                 netif_trans_update(dev->net);
3273                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3274                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3275                         netif_stop_queue(dev->net);
3276                 break;
3277         case -EPIPE:
3278                 netif_stop_queue(dev->net);
3279                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3280                 usb_autopm_put_interface_async(dev->intf);
3281                 break;
3282         default:
3283                 usb_autopm_put_interface_async(dev->intf);
3284                 netif_dbg(dev, tx_err, dev->net,
3285                           "tx: submit urb err %d\n", ret);
3286                 break;
3287         }
3288
3289         spin_unlock_irqrestore(&dev->txq.lock, flags);
3290
3291         if (ret) {
3292                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3293 drop:
3294                 dev->net->stats.tx_dropped++;
3295                 if (skb)
3296                         dev_kfree_skb_any(skb);
3297                 usb_free_urb(urb);
3298         } else
3299                 netif_dbg(dev, tx_queued, dev->net,
3300                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3301 }
3302
3303 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3304 {
3305         struct urb *urb;
3306         int i;
3307
3308         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3309                 for (i = 0; i < 10; i++) {
3310                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3311                                 break;
3312                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3313                         if (urb)
3314                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3315                                         return;
3316                 }
3317
3318                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3319                         tasklet_schedule(&dev->bh);
3320         }
3321         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3322                 netif_wake_queue(dev->net);
3323 }
3324
3325 static void lan78xx_bh(unsigned long param)
3326 {
3327         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3328         struct sk_buff *skb;
3329         struct skb_data *entry;
3330
3331         while ((skb = skb_dequeue(&dev->done))) {
3332                 entry = (struct skb_data *)(skb->cb);
3333                 switch (entry->state) {
3334                 case rx_done:
3335                         entry->state = rx_cleanup;
3336                         rx_process(dev, skb);
3337                         continue;
3338                 case tx_done:
3339                         usb_free_urb(entry->urb);
3340                         dev_kfree_skb(skb);
3341                         continue;
3342                 case rx_cleanup:
3343                         usb_free_urb(entry->urb);
3344                         dev_kfree_skb(skb);
3345                         continue;
3346                 default:
3347                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3348                         return;
3349                 }
3350         }
3351
3352         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3353                 /* reset update timer delta */
3354                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3355                         dev->delta = 1;
3356                         mod_timer(&dev->stat_monitor,
3357                                   jiffies + STAT_UPDATE_TIMER);
3358                 }
3359
3360                 if (!skb_queue_empty(&dev->txq_pend))
3361                         lan78xx_tx_bh(dev);
3362
3363                 if (!timer_pending(&dev->delay) &&
3364                     !test_bit(EVENT_RX_HALT, &dev->flags))
3365                         lan78xx_rx_bh(dev);
3366         }
3367 }
3368
3369 static void lan78xx_delayedwork(struct work_struct *work)
3370 {
3371         int status;
3372         struct lan78xx_net *dev;
3373
3374         dev = container_of(work, struct lan78xx_net, wq.work);
3375
3376         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3377                 unlink_urbs(dev, &dev->txq);
3378                 status = usb_autopm_get_interface(dev->intf);
3379                 if (status < 0)
3380                         goto fail_pipe;
3381                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3382                 usb_autopm_put_interface(dev->intf);
3383                 if (status < 0 &&
3384                     status != -EPIPE &&
3385                     status != -ESHUTDOWN) {
3386                         if (netif_msg_tx_err(dev))
3387 fail_pipe:
3388                                 netdev_err(dev->net,
3389                                            "can't clear tx halt, status %d\n",
3390                                            status);
3391                 } else {
3392                         clear_bit(EVENT_TX_HALT, &dev->flags);
3393                         if (status != -ESHUTDOWN)
3394                                 netif_wake_queue(dev->net);
3395                 }
3396         }
3397         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3398                 unlink_urbs(dev, &dev->rxq);
3399                 status = usb_autopm_get_interface(dev->intf);
3400                 if (status < 0)
3401                                 goto fail_halt;
3402                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3403                 usb_autopm_put_interface(dev->intf);
3404                 if (status < 0 &&
3405                     status != -EPIPE &&
3406                     status != -ESHUTDOWN) {
3407                         if (netif_msg_rx_err(dev))
3408 fail_halt:
3409                                 netdev_err(dev->net,
3410                                            "can't clear rx halt, status %d\n",
3411                                            status);
3412                 } else {
3413                         clear_bit(EVENT_RX_HALT, &dev->flags);
3414                         tasklet_schedule(&dev->bh);
3415                 }
3416         }
3417
3418         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3419                 int ret = 0;
3420
3421                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3422                 status = usb_autopm_get_interface(dev->intf);
3423                 if (status < 0)
3424                         goto skip_reset;
3425                 if (lan78xx_link_reset(dev) < 0) {
3426                         usb_autopm_put_interface(dev->intf);
3427 skip_reset:
3428                         netdev_info(dev->net, "link reset failed (%d)\n",
3429                                     ret);
3430                 } else {
3431                         usb_autopm_put_interface(dev->intf);
3432                 }
3433         }
3434
3435         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3436                 lan78xx_update_stats(dev);
3437
3438                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3439
3440                 mod_timer(&dev->stat_monitor,
3441                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3442
3443                 dev->delta = min((dev->delta * 2), 50);
3444         }
3445 }
3446
3447 static void intr_complete(struct urb *urb)
3448 {
3449         struct lan78xx_net *dev = urb->context;
3450         int status = urb->status;
3451
3452         switch (status) {
3453         /* success */
3454         case 0:
3455                 lan78xx_status(dev, urb);
3456                 break;
3457
3458         /* software-driven interface shutdown */
3459         case -ENOENT:                   /* urb killed */
3460         case -ESHUTDOWN:                /* hardware gone */
3461                 netif_dbg(dev, ifdown, dev->net,
3462                           "intr shutdown, code %d\n", status);
3463                 return;
3464
3465         /* NOTE:  not throttling like RX/TX, since this endpoint
3466          * already polls infrequently
3467          */
3468         default:
3469                 netdev_dbg(dev->net, "intr status %d\n", status);
3470                 break;
3471         }
3472
3473         if (!netif_running(dev->net))
3474                 return;
3475
3476         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3477         status = usb_submit_urb(urb, GFP_ATOMIC);
3478         if (status != 0)
3479                 netif_err(dev, timer, dev->net,
3480                           "intr resubmit --> %d\n", status);
3481 }
3482
3483 static void lan78xx_disconnect(struct usb_interface *intf)
3484 {
3485         struct lan78xx_net              *dev;
3486         struct usb_device               *udev;
3487         struct net_device               *net;
3488
3489         dev = usb_get_intfdata(intf);
3490         usb_set_intfdata(intf, NULL);
3491         if (!dev)
3492                 return;
3493
3494         udev = interface_to_usbdev(intf);
3495         net = dev->net;
3496
3497         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3498         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3499
3500         phy_disconnect(net->phydev);
3501
3502         unregister_netdev(net);
3503
3504         cancel_delayed_work_sync(&dev->wq);
3505
3506         usb_scuttle_anchored_urbs(&dev->deferred);
3507
3508         lan78xx_unbind(dev, intf);
3509
3510         usb_kill_urb(dev->urb_intr);
3511         usb_free_urb(dev->urb_intr);
3512
3513         free_netdev(net);
3514         usb_put_dev(udev);
3515 }
3516
3517 static void lan78xx_tx_timeout(struct net_device *net)
3518 {
3519         struct lan78xx_net *dev = netdev_priv(net);
3520
3521         unlink_urbs(dev, &dev->txq);
3522         tasklet_schedule(&dev->bh);
3523 }
3524
3525 static const struct net_device_ops lan78xx_netdev_ops = {
3526         .ndo_open               = lan78xx_open,
3527         .ndo_stop               = lan78xx_stop,
3528         .ndo_start_xmit         = lan78xx_start_xmit,
3529         .ndo_tx_timeout         = lan78xx_tx_timeout,
3530         .ndo_change_mtu         = lan78xx_change_mtu,
3531         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3532         .ndo_validate_addr      = eth_validate_addr,
3533         .ndo_do_ioctl           = lan78xx_ioctl,
3534         .ndo_set_rx_mode        = lan78xx_set_multicast,
3535         .ndo_set_features       = lan78xx_set_features,
3536         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3537         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3538 };
3539
3540 static void lan78xx_stat_monitor(struct timer_list *t)
3541 {
3542         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3543
3544         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3545 }
3546
3547 static int lan78xx_probe(struct usb_interface *intf,
3548                          const struct usb_device_id *id)
3549 {
3550         struct lan78xx_net *dev;
3551         struct net_device *netdev;
3552         struct usb_device *udev;
3553         int ret;
3554         unsigned maxp;
3555         unsigned period;
3556         u8 *buf = NULL;
3557
3558         udev = interface_to_usbdev(intf);
3559         udev = usb_get_dev(udev);
3560
3561         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3562         if (!netdev) {
3563                 dev_err(&intf->dev, "Error: OOM\n");
3564                 ret = -ENOMEM;
3565                 goto out1;
3566         }
3567
3568         /* netdev_printk() needs this */
3569         SET_NETDEV_DEV(netdev, &intf->dev);
3570
3571         dev = netdev_priv(netdev);
3572         dev->udev = udev;
3573         dev->intf = intf;
3574         dev->net = netdev;
3575         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3576                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3577
3578         skb_queue_head_init(&dev->rxq);
3579         skb_queue_head_init(&dev->txq);
3580         skb_queue_head_init(&dev->done);
3581         skb_queue_head_init(&dev->rxq_pause);
3582         skb_queue_head_init(&dev->txq_pend);
3583         mutex_init(&dev->phy_mutex);
3584
3585         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3586         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3587         init_usb_anchor(&dev->deferred);
3588
3589         netdev->netdev_ops = &lan78xx_netdev_ops;
3590         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3591         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3592
3593         dev->delta = 1;
3594         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3595
3596         mutex_init(&dev->stats.access_lock);
3597
3598         ret = lan78xx_bind(dev, intf);
3599         if (ret < 0)
3600                 goto out2;
3601         strcpy(netdev->name, "eth%d");
3602
3603         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3604                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3605
3606         /* MTU range: 68 - 9000 */
3607         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3608
3609         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3610         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3611         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3612
3613         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3614         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3615
3616         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3617                                         dev->ep_intr->desc.bEndpointAddress &
3618                                         USB_ENDPOINT_NUMBER_MASK);
3619         period = dev->ep_intr->desc.bInterval;
3620
3621         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3622         buf = kmalloc(maxp, GFP_KERNEL);
3623         if (buf) {
3624                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3625                 if (!dev->urb_intr) {
3626                         ret = -ENOMEM;
3627                         kfree(buf);
3628                         goto out3;
3629                 } else {
3630                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3631                                          dev->pipe_intr, buf, maxp,
3632                                          intr_complete, dev, period);
3633                 }
3634         }
3635
3636         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3637
3638         /* driver requires remote-wakeup capability during autosuspend. */
3639         intf->needs_remote_wakeup = 1;
3640
3641         ret = register_netdev(netdev);
3642         if (ret != 0) {
3643                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3644                 goto out3;
3645         }
3646
3647         usb_set_intfdata(intf, dev);
3648
3649         ret = device_set_wakeup_enable(&udev->dev, true);
3650
3651          /* Default delay of 2sec has more overhead than advantage.
3652           * Set to 10sec as default.
3653           */
3654         pm_runtime_set_autosuspend_delay(&udev->dev,
3655                                          DEFAULT_AUTOSUSPEND_DELAY);
3656
3657         ret = lan78xx_phy_init(dev);
3658         if (ret < 0)
3659                 goto out4;
3660
3661         return 0;
3662
3663 out4:
3664         unregister_netdev(netdev);
3665 out3:
3666         lan78xx_unbind(dev, intf);
3667 out2:
3668         free_netdev(netdev);
3669 out1:
3670         usb_put_dev(udev);
3671
3672         return ret;
3673 }
3674
3675 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3676 {
3677         const u16 crc16poly = 0x8005;
3678         int i;
3679         u16 bit, crc, msb;
3680         u8 data;
3681
3682         crc = 0xFFFF;
3683         for (i = 0; i < len; i++) {
3684                 data = *buf++;
3685                 for (bit = 0; bit < 8; bit++) {
3686                         msb = crc >> 15;
3687                         crc <<= 1;
3688
3689                         if (msb ^ (u16)(data & 1)) {
3690                                 crc ^= crc16poly;
3691                                 crc |= (u16)0x0001U;
3692                         }
3693                         data >>= 1;
3694                 }
3695         }
3696
3697         return crc;
3698 }
3699
3700 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3701 {
3702         u32 buf;
3703         int ret;
3704         int mask_index;
3705         u16 crc;
3706         u32 temp_wucsr;
3707         u32 temp_pmt_ctl;
3708         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3709         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3710         const u8 arp_type[2] = { 0x08, 0x06 };
3711
3712         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3713         buf &= ~MAC_TX_TXEN_;
3714         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3715         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3716         buf &= ~MAC_RX_RXEN_;
3717         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3718
3719         ret = lan78xx_write_reg(dev, WUCSR, 0);
3720         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3721         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3722
3723         temp_wucsr = 0;
3724
3725         temp_pmt_ctl = 0;
3726         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3727         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3728         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3729
3730         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3731                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3732
3733         mask_index = 0;
3734         if (wol & WAKE_PHY) {
3735                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3736
3737                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3738                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3739                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3740         }
3741         if (wol & WAKE_MAGIC) {
3742                 temp_wucsr |= WUCSR_MPEN_;
3743
3744                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3745                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3746                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3747         }
3748         if (wol & WAKE_BCAST) {
3749                 temp_wucsr |= WUCSR_BCST_EN_;
3750
3751                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3752                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3753                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3754         }
3755         if (wol & WAKE_MCAST) {
3756                 temp_wucsr |= WUCSR_WAKE_EN_;
3757
3758                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3759                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3760                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3761                                         WUF_CFGX_EN_ |
3762                                         WUF_CFGX_TYPE_MCAST_ |
3763                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3764                                         (crc & WUF_CFGX_CRC16_MASK_));
3765
3766                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3767                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3768                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3769                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3770                 mask_index++;
3771
3772                 /* for IPv6 Multicast */
3773                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3774                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3775                                         WUF_CFGX_EN_ |
3776                                         WUF_CFGX_TYPE_MCAST_ |
3777                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3778                                         (crc & WUF_CFGX_CRC16_MASK_));
3779
3780                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3781                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3782                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3783                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3784                 mask_index++;
3785
3786                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3787                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3788                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3789         }
3790         if (wol & WAKE_UCAST) {
3791                 temp_wucsr |= WUCSR_PFDA_EN_;
3792
3793                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3794                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3795                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3796         }
3797         if (wol & WAKE_ARP) {
3798                 temp_wucsr |= WUCSR_WAKE_EN_;
3799
3800                 /* set WUF_CFG & WUF_MASK
3801                  * for packettype (offset 12,13) = ARP (0x0806)
3802                  */
3803                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3804                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3805                                         WUF_CFGX_EN_ |
3806                                         WUF_CFGX_TYPE_ALL_ |
3807                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3808                                         (crc & WUF_CFGX_CRC16_MASK_));
3809
3810                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3811                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3812                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3813                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3814                 mask_index++;
3815
3816                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3817                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3818                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3819         }
3820
3821         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3822
3823         /* when multiple WOL bits are set */
3824         if (hweight_long((unsigned long)wol) > 1) {
3825                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3826                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3827                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3828         }
3829         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3830
3831         /* clear WUPS */
3832         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3833         buf |= PMT_CTL_WUPS_MASK_;
3834         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3835
3836         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3837         buf |= MAC_RX_RXEN_;
3838         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3839
3840         return 0;
3841 }
3842
3843 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3844 {
3845         struct lan78xx_net *dev = usb_get_intfdata(intf);
3846         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3847         u32 buf;
3848         int ret;
3849         int event;
3850
3851         event = message.event;
3852
3853         if (!dev->suspend_count++) {
3854                 spin_lock_irq(&dev->txq.lock);
3855                 /* don't autosuspend while transmitting */
3856                 if ((skb_queue_len(&dev->txq) ||
3857                      skb_queue_len(&dev->txq_pend)) &&
3858                         PMSG_IS_AUTO(message)) {
3859                         spin_unlock_irq(&dev->txq.lock);
3860                         ret = -EBUSY;
3861                         goto out;
3862                 } else {
3863                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3864                         spin_unlock_irq(&dev->txq.lock);
3865                 }
3866
3867                 /* stop TX & RX */
3868                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3869                 buf &= ~MAC_TX_TXEN_;
3870                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3871                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3872                 buf &= ~MAC_RX_RXEN_;
3873                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3874
3875                 /* empty out the rx and queues */
3876                 netif_device_detach(dev->net);
3877                 lan78xx_terminate_urbs(dev);
3878                 usb_kill_urb(dev->urb_intr);
3879
3880                 /* reattach */
3881                 netif_device_attach(dev->net);
3882         }
3883
3884         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3885                 del_timer(&dev->stat_monitor);
3886
3887                 if (PMSG_IS_AUTO(message)) {
3888                         /* auto suspend (selective suspend) */
3889                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3890                         buf &= ~MAC_TX_TXEN_;
3891                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3892                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3893                         buf &= ~MAC_RX_RXEN_;
3894                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3895
3896                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3897                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3898                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3899
3900                         /* set goodframe wakeup */
3901                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3902
3903                         buf |= WUCSR_RFE_WAKE_EN_;
3904                         buf |= WUCSR_STORE_WAKE_;
3905
3906                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3907
3908                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3909
3910                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3911                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3912
3913                         buf |= PMT_CTL_PHY_WAKE_EN_;
3914                         buf |= PMT_CTL_WOL_EN_;
3915                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3916                         buf |= PMT_CTL_SUS_MODE_3_;
3917
3918                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3919
3920                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3921
3922                         buf |= PMT_CTL_WUPS_MASK_;
3923
3924                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3925
3926                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3927                         buf |= MAC_RX_RXEN_;
3928                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3929                 } else {
3930                         lan78xx_set_suspend(dev, pdata->wol);
3931                 }
3932         }
3933
3934         ret = 0;
3935 out:
3936         return ret;
3937 }
3938
3939 static int lan78xx_resume(struct usb_interface *intf)
3940 {
3941         struct lan78xx_net *dev = usb_get_intfdata(intf);
3942         struct sk_buff *skb;
3943         struct urb *res;
3944         int ret;
3945         u32 buf;
3946
3947         if (!timer_pending(&dev->stat_monitor)) {
3948                 dev->delta = 1;
3949                 mod_timer(&dev->stat_monitor,
3950                           jiffies + STAT_UPDATE_TIMER);
3951         }
3952
3953         if (!--dev->suspend_count) {
3954                 /* resume interrupt URBs */
3955                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3956                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3957
3958                 spin_lock_irq(&dev->txq.lock);
3959                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3960                         skb = (struct sk_buff *)res->context;
3961                         ret = usb_submit_urb(res, GFP_ATOMIC);
3962                         if (ret < 0) {
3963                                 dev_kfree_skb_any(skb);
3964                                 usb_free_urb(res);
3965                                 usb_autopm_put_interface_async(dev->intf);
3966                         } else {
3967                                 netif_trans_update(dev->net);
3968                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3969                         }
3970                 }
3971
3972                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3973                 spin_unlock_irq(&dev->txq.lock);
3974
3975                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3976                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3977                                 netif_start_queue(dev->net);
3978                         tasklet_schedule(&dev->bh);
3979                 }
3980         }
3981
3982         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3983         ret = lan78xx_write_reg(dev, WUCSR, 0);
3984         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3985
3986         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3987                                              WUCSR2_ARP_RCD_ |
3988                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3989                                              WUCSR2_IPV4_TCPSYN_RCD_);
3990
3991         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3992                                             WUCSR_EEE_RX_WAKE_ |
3993                                             WUCSR_PFDA_FR_ |
3994                                             WUCSR_RFE_WAKE_FR_ |
3995                                             WUCSR_WUFR_ |
3996                                             WUCSR_MPR_ |
3997                                             WUCSR_BCST_FR_);
3998
3999         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4000         buf |= MAC_TX_TXEN_;
4001         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4002
4003         return 0;
4004 }
4005
4006 static int lan78xx_reset_resume(struct usb_interface *intf)
4007 {
4008         struct lan78xx_net *dev = usb_get_intfdata(intf);
4009
4010         lan78xx_reset(dev);
4011
4012         phy_start(dev->net->phydev);
4013
4014         return lan78xx_resume(intf);
4015 }
4016
4017 static const struct usb_device_id products[] = {
4018         {
4019         /* LAN7800 USB Gigabit Ethernet Device */
4020         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4021         },
4022         {
4023         /* LAN7850 USB Gigabit Ethernet Device */
4024         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4025         },
4026         {
4027         /* LAN7801 USB Gigabit Ethernet Device */
4028         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4029         },
4030         {},
4031 };
4032 MODULE_DEVICE_TABLE(usb, products);
4033
4034 static struct usb_driver lan78xx_driver = {
4035         .name                   = DRIVER_NAME,
4036         .id_table               = products,
4037         .probe                  = lan78xx_probe,
4038         .disconnect             = lan78xx_disconnect,
4039         .suspend                = lan78xx_suspend,
4040         .resume                 = lan78xx_resume,
4041         .reset_resume           = lan78xx_reset_resume,
4042         .supports_autosuspend   = 1,
4043         .disable_hub_initiated_lpm = 1,
4044 };
4045
4046 module_usb_driver(lan78xx_driver);
4047
4048 MODULE_AUTHOR(DRIVER_AUTHOR);
4049 MODULE_DESCRIPTION(DRIVER_DESC);
4050 MODULE_LICENSE("GPL");