2 * Ethernet driver for the WIZnet W5100 chip.
4 * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
5 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
7 * Licensed under the GPL-2 or later.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/platform_data/wiznet.h>
16 #include <linux/ethtool.h>
17 #include <linux/skbuff.h>
18 #include <linux/types.h>
19 #include <linux/errno.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/gpio.h>
31 #define DRV_NAME "w5100"
32 #define DRV_VERSION "2012-04-04"
34 MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
35 MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
36 MODULE_ALIAS("platform:"DRV_NAME);
37 MODULE_LICENSE("GPL");
40 * W5100/W5200/W5500 common registers
42 #define W5100_COMMON_REGS 0x0000
43 #define W5100_MR 0x0000 /* Mode Register */
44 #define MR_RST 0x80 /* S/W reset */
45 #define MR_PB 0x10 /* Ping block */
46 #define MR_AI 0x02 /* Address Auto-Increment */
47 #define MR_IND 0x01 /* Indirect mode */
48 #define W5100_SHAR 0x0009 /* Source MAC address */
49 #define W5100_IR 0x0015 /* Interrupt Register */
50 #define W5100_COMMON_REGS_LEN 0x0040
52 #define W5100_Sn_MR 0x0000 /* Sn Mode Register */
53 #define W5100_Sn_CR 0x0001 /* Sn Command Register */
54 #define W5100_Sn_IR 0x0002 /* Sn Interrupt Register */
55 #define W5100_Sn_SR 0x0003 /* Sn Status Register */
56 #define W5100_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */
57 #define W5100_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */
58 #define W5100_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */
59 #define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */
60 #define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */
62 #define S0_REGS(priv) ((priv)->s0_regs)
64 #define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR)
65 #define S0_MR_MACRAW 0x04 /* MAC RAW mode */
66 #define S0_MR_MF 0x40 /* MAC Filter for W5100 and W5200 */
67 #define W5500_S0_MR_MF 0x80 /* MAC Filter for W5500 */
68 #define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR)
69 #define S0_CR_OPEN 0x01 /* OPEN command */
70 #define S0_CR_CLOSE 0x10 /* CLOSE command */
71 #define S0_CR_SEND 0x20 /* SEND command */
72 #define S0_CR_RECV 0x40 /* RECV command */
73 #define W5100_S0_IR(priv) (S0_REGS(priv) + W5100_Sn_IR)
74 #define S0_IR_SENDOK 0x10 /* complete sending */
75 #define S0_IR_RECV 0x04 /* receiving data */
76 #define W5100_S0_SR(priv) (S0_REGS(priv) + W5100_Sn_SR)
77 #define S0_SR_MACRAW 0x42 /* mac raw mode */
78 #define W5100_S0_TX_FSR(priv) (S0_REGS(priv) + W5100_Sn_TX_FSR)
79 #define W5100_S0_TX_RD(priv) (S0_REGS(priv) + W5100_Sn_TX_RD)
80 #define W5100_S0_TX_WR(priv) (S0_REGS(priv) + W5100_Sn_TX_WR)
81 #define W5100_S0_RX_RSR(priv) (S0_REGS(priv) + W5100_Sn_RX_RSR)
82 #define W5100_S0_RX_RD(priv) (S0_REGS(priv) + W5100_Sn_RX_RD)
84 #define W5100_S0_REGS_LEN 0x0040
87 * W5100 and W5200 common registers
89 #define W5100_IMR 0x0016 /* Interrupt Mask Register */
90 #define IR_S0 0x01 /* S0 interrupt */
91 #define W5100_RTR 0x0017 /* Retry Time-value Register */
92 #define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
95 * W5100 specific register and memory
97 #define W5100_RMSR 0x001a /* Receive Memory Size */
98 #define W5100_TMSR 0x001b /* Transmit Memory Size */
100 #define W5100_S0_REGS 0x0400
102 #define W5100_TX_MEM_START 0x4000
103 #define W5100_TX_MEM_SIZE 0x2000
104 #define W5100_RX_MEM_START 0x6000
105 #define W5100_RX_MEM_SIZE 0x2000
108 * W5200 specific register and memory
110 #define W5200_S0_REGS 0x4000
112 #define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */
113 #define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */
115 #define W5200_TX_MEM_START 0x8000
116 #define W5200_TX_MEM_SIZE 0x4000
117 #define W5200_RX_MEM_START 0xc000
118 #define W5200_RX_MEM_SIZE 0x4000
121 * W5500 specific register and memory
123 * W5500 register and memory are organized by multiple blocks. Each one is
124 * selected by 16bits offset address and 5bits block select bits. So we
125 * encode it into 32bits address. (lower 16bits is offset address and
126 * upper 16bits is block select bits)
128 #define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */
129 #define W5500_RTR 0x0019 /* Retry Time-value Register */
131 #define W5500_S0_REGS 0x10000
133 #define W5500_Sn_RXMEM_SIZE(n) \
134 (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */
135 #define W5500_Sn_TXMEM_SIZE(n) \
136 (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */
138 #define W5500_TX_MEM_START 0x20000
139 #define W5500_TX_MEM_SIZE 0x04000
140 #define W5500_RX_MEM_START 0x30000
141 #define W5500_RX_MEM_SIZE 0x04000
144 * Device driver private data structure
148 const struct w5100_ops *ops;
150 /* Socket 0 register offset address */
152 /* Socket 0 TX buffer offset address and size */
155 /* Socket 0 RX buffer offset address and size */
163 struct napi_struct napi;
164 struct net_device *ndev;
168 struct workqueue_struct *xfer_wq;
169 struct work_struct rx_work;
170 struct sk_buff *tx_skb;
171 struct work_struct tx_work;
172 struct work_struct setrx_work;
173 struct work_struct restart_work;
176 /************************************************************************
178 * Lowlevel I/O functions
180 ***********************************************************************/
182 struct w5100_mmio_priv {
184 /* Serialize access in indirect address mode */
188 static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
190 return w5100_ops_priv(dev);
193 static inline void __iomem *w5100_mmio(struct net_device *ndev)
195 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
197 return mmio_priv->base;
201 * In direct address mode host system can directly access W5100 registers
202 * after mapping to Memory-Mapped I/O space.
204 * 0x8000 bytes are required for memory space.
206 static inline int w5100_read_direct(struct net_device *ndev, u32 addr)
208 return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
211 static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
214 iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
219 static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
221 __w5100_write_direct(ndev, addr, data);
226 static int w5100_read16_direct(struct net_device *ndev, u32 addr)
229 data = w5100_read_direct(ndev, addr) << 8;
230 data |= w5100_read_direct(ndev, addr + 1);
234 static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
236 __w5100_write_direct(ndev, addr, data >> 8);
237 __w5100_write_direct(ndev, addr + 1, data);
242 static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf,
247 for (i = 0; i < len; i++, addr++)
248 *buf++ = w5100_read_direct(ndev, addr);
253 static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
254 const u8 *buf, int len)
258 for (i = 0; i < len; i++, addr++)
259 __w5100_write_direct(ndev, addr, *buf++);
264 static int w5100_mmio_init(struct net_device *ndev)
266 struct platform_device *pdev = to_platform_device(ndev->dev.parent);
267 struct w5100_priv *priv = netdev_priv(ndev);
268 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
269 struct resource *mem;
271 spin_lock_init(&mmio_priv->reg_lock);
273 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
274 mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
275 if (IS_ERR(mmio_priv->base))
276 return PTR_ERR(mmio_priv->base);
278 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
283 static const struct w5100_ops w5100_mmio_direct_ops = {
285 .read = w5100_read_direct,
286 .write = w5100_write_direct,
287 .read16 = w5100_read16_direct,
288 .write16 = w5100_write16_direct,
289 .readbulk = w5100_readbulk_direct,
290 .writebulk = w5100_writebulk_direct,
291 .init = w5100_mmio_init,
295 * In indirect address mode host system indirectly accesses registers by
296 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
297 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
298 * Mode Register (MR) is directly accessible.
300 * Only 0x04 bytes are required for memory space.
302 #define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
303 #define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
305 static int w5100_read_indirect(struct net_device *ndev, u32 addr)
307 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
311 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
312 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
313 data = w5100_read_direct(ndev, W5100_IDM_DR);
314 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
319 static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data)
321 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
324 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
325 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
326 w5100_write_direct(ndev, W5100_IDM_DR, data);
327 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
332 static int w5100_read16_indirect(struct net_device *ndev, u32 addr)
334 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
338 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
339 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
340 data = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
341 data |= w5100_read_direct(ndev, W5100_IDM_DR);
342 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
347 static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data)
349 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
352 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
353 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
354 __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
355 w5100_write_direct(ndev, W5100_IDM_DR, data);
356 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
361 static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
364 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
368 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
369 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
371 for (i = 0; i < len; i++)
372 *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
374 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
379 static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
380 const u8 *buf, int len)
382 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
386 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
387 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
389 for (i = 0; i < len; i++)
390 __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
392 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
397 static int w5100_reset_indirect(struct net_device *ndev)
399 w5100_write_direct(ndev, W5100_MR, MR_RST);
401 w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
406 static const struct w5100_ops w5100_mmio_indirect_ops = {
408 .read = w5100_read_indirect,
409 .write = w5100_write_indirect,
410 .read16 = w5100_read16_indirect,
411 .write16 = w5100_write16_indirect,
412 .readbulk = w5100_readbulk_indirect,
413 .writebulk = w5100_writebulk_indirect,
414 .init = w5100_mmio_init,
415 .reset = w5100_reset_indirect,
418 #if defined(CONFIG_WIZNET_BUS_DIRECT)
420 static int w5100_read(struct w5100_priv *priv, u32 addr)
422 return w5100_read_direct(priv->ndev, addr);
425 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
427 return w5100_write_direct(priv->ndev, addr, data);
430 static int w5100_read16(struct w5100_priv *priv, u32 addr)
432 return w5100_read16_direct(priv->ndev, addr);
435 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
437 return w5100_write16_direct(priv->ndev, addr, data);
440 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
442 return w5100_readbulk_direct(priv->ndev, addr, buf, len);
445 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
448 return w5100_writebulk_direct(priv->ndev, addr, buf, len);
451 #elif defined(CONFIG_WIZNET_BUS_INDIRECT)
453 static int w5100_read(struct w5100_priv *priv, u32 addr)
455 return w5100_read_indirect(priv->ndev, addr);
458 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
460 return w5100_write_indirect(priv->ndev, addr, data);
463 static int w5100_read16(struct w5100_priv *priv, u32 addr)
465 return w5100_read16_indirect(priv->ndev, addr);
468 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
470 return w5100_write16_indirect(priv->ndev, addr, data);
473 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
475 return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
478 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
481 return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
484 #else /* CONFIG_WIZNET_BUS_ANY */
486 static int w5100_read(struct w5100_priv *priv, u32 addr)
488 return priv->ops->read(priv->ndev, addr);
491 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
493 return priv->ops->write(priv->ndev, addr, data);
496 static int w5100_read16(struct w5100_priv *priv, u32 addr)
498 return priv->ops->read16(priv->ndev, addr);
501 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
503 return priv->ops->write16(priv->ndev, addr, data);
506 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
508 return priv->ops->readbulk(priv->ndev, addr, buf, len);
511 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
514 return priv->ops->writebulk(priv->ndev, addr, buf, len);
519 static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
524 const u32 mem_start = priv->s0_rx_buf;
525 const u16 mem_size = priv->s0_rx_buf_size;
528 addr = mem_start + offset;
530 if (offset + len > mem_size) {
531 remain = (offset + len) % mem_size;
532 len = mem_size - offset;
535 ret = w5100_readbulk(priv, addr, buf, len);
539 return w5100_readbulk(priv, mem_start, buf + len, remain);
542 static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
548 const u32 mem_start = priv->s0_tx_buf;
549 const u16 mem_size = priv->s0_tx_buf_size;
552 addr = mem_start + offset;
554 if (offset + len > mem_size) {
555 remain = (offset + len) % mem_size;
556 len = mem_size - offset;
559 ret = w5100_writebulk(priv, addr, buf, len);
563 return w5100_writebulk(priv, mem_start, buf + len, remain);
566 static int w5100_reset(struct w5100_priv *priv)
568 if (priv->ops->reset)
569 return priv->ops->reset(priv->ndev);
571 w5100_write(priv, W5100_MR, MR_RST);
573 w5100_write(priv, W5100_MR, MR_PB);
578 static int w5100_command(struct w5100_priv *priv, u16 cmd)
580 unsigned long timeout;
582 w5100_write(priv, W5100_S0_CR(priv), cmd);
584 timeout = jiffies + msecs_to_jiffies(100);
586 while (w5100_read(priv, W5100_S0_CR(priv)) != 0) {
587 if (time_after(jiffies, timeout))
595 static void w5100_write_macaddr(struct w5100_priv *priv)
597 struct net_device *ndev = priv->ndev;
599 w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
602 static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask)
606 if (priv->ops->chip_id == W5500)
611 w5100_write(priv, imr, mask);
614 static void w5100_enable_intr(struct w5100_priv *priv)
616 w5100_socket_intr_mask(priv, IR_S0);
619 static void w5100_disable_intr(struct w5100_priv *priv)
621 w5100_socket_intr_mask(priv, 0);
624 static void w5100_memory_configure(struct w5100_priv *priv)
626 /* Configure 16K of internal memory
627 * as 8K RX buffer and 8K TX buffer
629 w5100_write(priv, W5100_RMSR, 0x03);
630 w5100_write(priv, W5100_TMSR, 0x03);
633 static void w5200_memory_configure(struct w5100_priv *priv)
637 /* Configure internal RX memory as 16K RX buffer and
638 * internal TX memory as 16K TX buffer
640 w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10);
641 w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10);
643 for (i = 1; i < 8; i++) {
644 w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0);
645 w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0);
649 static void w5500_memory_configure(struct w5100_priv *priv)
653 /* Configure internal RX memory as 16K RX buffer and
654 * internal TX memory as 16K TX buffer
656 w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10);
657 w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10);
659 for (i = 1; i < 8; i++) {
660 w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0);
661 w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0);
665 static int w5100_hw_reset(struct w5100_priv *priv)
671 w5100_disable_intr(priv);
672 w5100_write_macaddr(priv);
674 switch (priv->ops->chip_id) {
676 w5100_memory_configure(priv);
680 w5200_memory_configure(priv);
684 w5500_memory_configure(priv);
691 if (w5100_read16(priv, rtr) != RTR_DEFAULT)
697 static void w5100_hw_start(struct w5100_priv *priv)
699 u8 mode = S0_MR_MACRAW;
701 if (!priv->promisc) {
702 if (priv->ops->chip_id == W5500)
703 mode |= W5500_S0_MR_MF;
708 w5100_write(priv, W5100_S0_MR(priv), mode);
709 w5100_command(priv, S0_CR_OPEN);
710 w5100_enable_intr(priv);
713 static void w5100_hw_close(struct w5100_priv *priv)
715 w5100_disable_intr(priv);
716 w5100_command(priv, S0_CR_CLOSE);
719 /***********************************************************************
721 * Device driver functions / callbacks
723 ***********************************************************************/
725 static void w5100_get_drvinfo(struct net_device *ndev,
726 struct ethtool_drvinfo *info)
728 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
729 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
730 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
731 sizeof(info->bus_info));
734 static u32 w5100_get_link(struct net_device *ndev)
736 struct w5100_priv *priv = netdev_priv(ndev);
738 if (gpio_is_valid(priv->link_gpio))
739 return !!gpio_get_value(priv->link_gpio);
744 static u32 w5100_get_msglevel(struct net_device *ndev)
746 struct w5100_priv *priv = netdev_priv(ndev);
748 return priv->msg_enable;
751 static void w5100_set_msglevel(struct net_device *ndev, u32 value)
753 struct w5100_priv *priv = netdev_priv(ndev);
755 priv->msg_enable = value;
758 static int w5100_get_regs_len(struct net_device *ndev)
760 return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
763 static void w5100_get_regs(struct net_device *ndev,
764 struct ethtool_regs *regs, void *buf)
766 struct w5100_priv *priv = netdev_priv(ndev);
769 w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
770 buf += W5100_COMMON_REGS_LEN;
771 w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN);
774 static void w5100_restart(struct net_device *ndev)
776 struct w5100_priv *priv = netdev_priv(ndev);
778 netif_stop_queue(ndev);
779 w5100_hw_reset(priv);
780 w5100_hw_start(priv);
781 ndev->stats.tx_errors++;
782 netif_trans_update(ndev);
783 netif_wake_queue(ndev);
786 static void w5100_restart_work(struct work_struct *work)
788 struct w5100_priv *priv = container_of(work, struct w5100_priv,
791 w5100_restart(priv->ndev);
794 static void w5100_tx_timeout(struct net_device *ndev)
796 struct w5100_priv *priv = netdev_priv(ndev);
798 if (priv->ops->may_sleep)
799 schedule_work(&priv->restart_work);
804 static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
806 struct w5100_priv *priv = netdev_priv(ndev);
809 offset = w5100_read16(priv, W5100_S0_TX_WR(priv));
810 w5100_writebuf(priv, offset, skb->data, skb->len);
811 w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
812 ndev->stats.tx_bytes += skb->len;
813 ndev->stats.tx_packets++;
816 w5100_command(priv, S0_CR_SEND);
819 static void w5100_tx_work(struct work_struct *work)
821 struct w5100_priv *priv = container_of(work, struct w5100_priv,
823 struct sk_buff *skb = priv->tx_skb;
829 w5100_tx_skb(priv->ndev, skb);
832 static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
834 struct w5100_priv *priv = netdev_priv(ndev);
836 netif_stop_queue(ndev);
838 if (priv->ops->may_sleep) {
839 WARN_ON(priv->tx_skb);
841 queue_work(priv->xfer_wq, &priv->tx_work);
843 w5100_tx_skb(ndev, skb);
849 static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
851 struct w5100_priv *priv = netdev_priv(ndev);
856 u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv));
861 offset = w5100_read16(priv, W5100_S0_RX_RD(priv));
862 w5100_readbuf(priv, offset, header, 2);
863 rx_len = get_unaligned_be16(header) - 2;
865 skb = netdev_alloc_skb_ip_align(ndev, rx_len);
866 if (unlikely(!skb)) {
867 w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len);
868 w5100_command(priv, S0_CR_RECV);
869 ndev->stats.rx_dropped++;
873 skb_put(skb, rx_len);
874 w5100_readbuf(priv, offset + 2, skb->data, rx_len);
875 w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len);
876 w5100_command(priv, S0_CR_RECV);
877 skb->protocol = eth_type_trans(skb, ndev);
879 ndev->stats.rx_packets++;
880 ndev->stats.rx_bytes += rx_len;
885 static void w5100_rx_work(struct work_struct *work)
887 struct w5100_priv *priv = container_of(work, struct w5100_priv,
891 while ((skb = w5100_rx_skb(priv->ndev)))
894 w5100_enable_intr(priv);
897 static int w5100_napi_poll(struct napi_struct *napi, int budget)
899 struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
902 for (rx_count = 0; rx_count < budget; rx_count++) {
903 struct sk_buff *skb = w5100_rx_skb(priv->ndev);
906 netif_receive_skb(skb);
911 if (rx_count < budget) {
912 napi_complete_done(napi, rx_count);
913 w5100_enable_intr(priv);
919 static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
921 struct net_device *ndev = ndev_instance;
922 struct w5100_priv *priv = netdev_priv(ndev);
924 int ir = w5100_read(priv, W5100_S0_IR(priv));
927 w5100_write(priv, W5100_S0_IR(priv), ir);
929 if (ir & S0_IR_SENDOK) {
930 netif_dbg(priv, tx_done, ndev, "tx done\n");
931 netif_wake_queue(ndev);
934 if (ir & S0_IR_RECV) {
935 w5100_disable_intr(priv);
937 if (priv->ops->may_sleep)
938 queue_work(priv->xfer_wq, &priv->rx_work);
939 else if (napi_schedule_prep(&priv->napi))
940 __napi_schedule(&priv->napi);
946 static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
948 struct net_device *ndev = ndev_instance;
949 struct w5100_priv *priv = netdev_priv(ndev);
951 if (netif_running(ndev)) {
952 if (gpio_get_value(priv->link_gpio) != 0) {
953 netif_info(priv, link, ndev, "link is up\n");
954 netif_carrier_on(ndev);
956 netif_info(priv, link, ndev, "link is down\n");
957 netif_carrier_off(ndev);
964 static void w5100_setrx_work(struct work_struct *work)
966 struct w5100_priv *priv = container_of(work, struct w5100_priv,
969 w5100_hw_start(priv);
972 static void w5100_set_rx_mode(struct net_device *ndev)
974 struct w5100_priv *priv = netdev_priv(ndev);
975 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
977 if (priv->promisc != set_promisc) {
978 priv->promisc = set_promisc;
980 if (priv->ops->may_sleep)
981 schedule_work(&priv->setrx_work);
983 w5100_hw_start(priv);
987 static int w5100_set_macaddr(struct net_device *ndev, void *addr)
989 struct w5100_priv *priv = netdev_priv(ndev);
990 struct sockaddr *sock_addr = addr;
992 if (!is_valid_ether_addr(sock_addr->sa_data))
993 return -EADDRNOTAVAIL;
994 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
995 w5100_write_macaddr(priv);
999 static int w5100_open(struct net_device *ndev)
1001 struct w5100_priv *priv = netdev_priv(ndev);
1003 netif_info(priv, ifup, ndev, "enabling\n");
1004 w5100_hw_start(priv);
1005 napi_enable(&priv->napi);
1006 netif_start_queue(ndev);
1007 if (!gpio_is_valid(priv->link_gpio) ||
1008 gpio_get_value(priv->link_gpio) != 0)
1009 netif_carrier_on(ndev);
1013 static int w5100_stop(struct net_device *ndev)
1015 struct w5100_priv *priv = netdev_priv(ndev);
1017 netif_info(priv, ifdown, ndev, "shutting down\n");
1018 w5100_hw_close(priv);
1019 netif_carrier_off(ndev);
1020 netif_stop_queue(ndev);
1021 napi_disable(&priv->napi);
1025 static const struct ethtool_ops w5100_ethtool_ops = {
1026 .get_drvinfo = w5100_get_drvinfo,
1027 .get_msglevel = w5100_get_msglevel,
1028 .set_msglevel = w5100_set_msglevel,
1029 .get_link = w5100_get_link,
1030 .get_regs_len = w5100_get_regs_len,
1031 .get_regs = w5100_get_regs,
1034 static const struct net_device_ops w5100_netdev_ops = {
1035 .ndo_open = w5100_open,
1036 .ndo_stop = w5100_stop,
1037 .ndo_start_xmit = w5100_start_tx,
1038 .ndo_tx_timeout = w5100_tx_timeout,
1039 .ndo_set_rx_mode = w5100_set_rx_mode,
1040 .ndo_set_mac_address = w5100_set_macaddr,
1041 .ndo_validate_addr = eth_validate_addr,
1044 static int w5100_mmio_probe(struct platform_device *pdev)
1046 struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
1047 const void *mac_addr = NULL;
1048 struct resource *mem;
1049 const struct w5100_ops *ops;
1052 if (data && is_valid_ether_addr(data->mac_addr))
1053 mac_addr = data->mac_addr;
1055 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1056 if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
1057 ops = &w5100_mmio_indirect_ops;
1059 ops = &w5100_mmio_direct_ops;
1061 irq = platform_get_irq(pdev, 0);
1065 return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
1066 mac_addr, irq, data ? data->link_gpio : -EINVAL);
1069 static int w5100_mmio_remove(struct platform_device *pdev)
1071 return w5100_remove(&pdev->dev);
1074 void *w5100_ops_priv(const struct net_device *ndev)
1076 return netdev_priv(ndev) +
1077 ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
1079 EXPORT_SYMBOL_GPL(w5100_ops_priv);
1081 int w5100_probe(struct device *dev, const struct w5100_ops *ops,
1082 int sizeof_ops_priv, const void *mac_addr, int irq,
1085 struct w5100_priv *priv;
1086 struct net_device *ndev;
1090 alloc_size = sizeof(*priv);
1091 if (sizeof_ops_priv) {
1092 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
1093 alloc_size += sizeof_ops_priv;
1095 alloc_size += NETDEV_ALIGN - 1;
1097 ndev = alloc_etherdev(alloc_size);
1100 SET_NETDEV_DEV(ndev, dev);
1101 dev_set_drvdata(dev, ndev);
1102 priv = netdev_priv(ndev);
1104 switch (ops->chip_id) {
1106 priv->s0_regs = W5100_S0_REGS;
1107 priv->s0_tx_buf = W5100_TX_MEM_START;
1108 priv->s0_tx_buf_size = W5100_TX_MEM_SIZE;
1109 priv->s0_rx_buf = W5100_RX_MEM_START;
1110 priv->s0_rx_buf_size = W5100_RX_MEM_SIZE;
1113 priv->s0_regs = W5200_S0_REGS;
1114 priv->s0_tx_buf = W5200_TX_MEM_START;
1115 priv->s0_tx_buf_size = W5200_TX_MEM_SIZE;
1116 priv->s0_rx_buf = W5200_RX_MEM_START;
1117 priv->s0_rx_buf_size = W5200_RX_MEM_SIZE;
1120 priv->s0_regs = W5500_S0_REGS;
1121 priv->s0_tx_buf = W5500_TX_MEM_START;
1122 priv->s0_tx_buf_size = W5500_TX_MEM_SIZE;
1123 priv->s0_rx_buf = W5500_RX_MEM_START;
1124 priv->s0_rx_buf_size = W5500_RX_MEM_SIZE;
1134 priv->link_gpio = link_gpio;
1136 ndev->netdev_ops = &w5100_netdev_ops;
1137 ndev->ethtool_ops = &w5100_ethtool_ops;
1138 netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
1140 /* This chip doesn't support VLAN packets with normal MTU,
1141 * so disable VLAN for this device.
1143 ndev->features |= NETIF_F_VLAN_CHALLENGED;
1145 err = register_netdev(ndev);
1149 priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
1151 if (!priv->xfer_wq) {
1156 INIT_WORK(&priv->rx_work, w5100_rx_work);
1157 INIT_WORK(&priv->tx_work, w5100_tx_work);
1158 INIT_WORK(&priv->setrx_work, w5100_setrx_work);
1159 INIT_WORK(&priv->restart_work, w5100_restart_work);
1162 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1164 eth_hw_addr_random(ndev);
1166 if (priv->ops->init) {
1167 err = priv->ops->init(priv->ndev);
1172 err = w5100_hw_reset(priv);
1176 if (ops->may_sleep) {
1177 err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
1178 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1179 netdev_name(ndev), ndev);
1181 err = request_irq(priv->irq, w5100_interrupt,
1182 IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
1187 if (gpio_is_valid(priv->link_gpio)) {
1188 char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
1194 snprintf(link_name, 16, "%s-link", netdev_name(ndev));
1195 priv->link_irq = gpio_to_irq(priv->link_gpio);
1196 if (request_any_context_irq(priv->link_irq, w5100_detect_link,
1197 IRQF_TRIGGER_RISING |
1198 IRQF_TRIGGER_FALLING,
1199 link_name, priv->ndev) < 0)
1200 priv->link_gpio = -EINVAL;
1206 free_irq(priv->irq, ndev);
1208 destroy_workqueue(priv->xfer_wq);
1210 unregister_netdev(ndev);
1215 EXPORT_SYMBOL_GPL(w5100_probe);
1217 int w5100_remove(struct device *dev)
1219 struct net_device *ndev = dev_get_drvdata(dev);
1220 struct w5100_priv *priv = netdev_priv(ndev);
1222 w5100_hw_reset(priv);
1223 free_irq(priv->irq, ndev);
1224 if (gpio_is_valid(priv->link_gpio))
1225 free_irq(priv->link_irq, ndev);
1227 flush_work(&priv->setrx_work);
1228 flush_work(&priv->restart_work);
1229 destroy_workqueue(priv->xfer_wq);
1231 unregister_netdev(ndev);
1235 EXPORT_SYMBOL_GPL(w5100_remove);
1237 #ifdef CONFIG_PM_SLEEP
1238 static int w5100_suspend(struct device *dev)
1240 struct net_device *ndev = dev_get_drvdata(dev);
1241 struct w5100_priv *priv = netdev_priv(ndev);
1243 if (netif_running(ndev)) {
1244 netif_carrier_off(ndev);
1245 netif_device_detach(ndev);
1247 w5100_hw_close(priv);
1252 static int w5100_resume(struct device *dev)
1254 struct net_device *ndev = dev_get_drvdata(dev);
1255 struct w5100_priv *priv = netdev_priv(ndev);
1257 if (netif_running(ndev)) {
1258 w5100_hw_reset(priv);
1259 w5100_hw_start(priv);
1261 netif_device_attach(ndev);
1262 if (!gpio_is_valid(priv->link_gpio) ||
1263 gpio_get_value(priv->link_gpio) != 0)
1264 netif_carrier_on(ndev);
1268 #endif /* CONFIG_PM_SLEEP */
1270 SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
1271 EXPORT_SYMBOL_GPL(w5100_pm_ops);
1273 static struct platform_driver w5100_mmio_driver = {
1276 .pm = &w5100_pm_ops,
1278 .probe = w5100_mmio_probe,
1279 .remove = w5100_mmio_remove,
1281 module_platform_driver(w5100_mmio_driver);