1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/net/ethernet/nxp/lpc_eth.c
5 * Author: Kevin Wells <kevin.wells@nxp.com>
7 * Copyright (C) 2010 NXP Semiconductors
8 * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/clk.h>
14 #include <linux/crc32.h>
15 #include <linux/etherdevice.h>
16 #include <linux/module.h>
17 #include <linux/of_net.h>
18 #include <linux/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/soc/nxp/lpc32xx-misc.h>
23 #define MODNAME "lpc-eth"
24 #define DRV_VERSION "1.00"
26 #define ENET_MAXF_SIZE 1536
27 #define ENET_RX_DESC 48
28 #define ENET_TX_DESC 16
30 #define NAPI_WEIGHT 16
33 * Ethernet MAC controller Register offsets
35 #define LPC_ENET_MAC1(x) (x + 0x000)
36 #define LPC_ENET_MAC2(x) (x + 0x004)
37 #define LPC_ENET_IPGT(x) (x + 0x008)
38 #define LPC_ENET_IPGR(x) (x + 0x00C)
39 #define LPC_ENET_CLRT(x) (x + 0x010)
40 #define LPC_ENET_MAXF(x) (x + 0x014)
41 #define LPC_ENET_SUPP(x) (x + 0x018)
42 #define LPC_ENET_TEST(x) (x + 0x01C)
43 #define LPC_ENET_MCFG(x) (x + 0x020)
44 #define LPC_ENET_MCMD(x) (x + 0x024)
45 #define LPC_ENET_MADR(x) (x + 0x028)
46 #define LPC_ENET_MWTD(x) (x + 0x02C)
47 #define LPC_ENET_MRDD(x) (x + 0x030)
48 #define LPC_ENET_MIND(x) (x + 0x034)
49 #define LPC_ENET_SA0(x) (x + 0x040)
50 #define LPC_ENET_SA1(x) (x + 0x044)
51 #define LPC_ENET_SA2(x) (x + 0x048)
52 #define LPC_ENET_COMMAND(x) (x + 0x100)
53 #define LPC_ENET_STATUS(x) (x + 0x104)
54 #define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
55 #define LPC_ENET_RXSTATUS(x) (x + 0x10C)
56 #define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
57 #define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
58 #define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
59 #define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
60 #define LPC_ENET_TXSTATUS(x) (x + 0x120)
61 #define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
62 #define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
63 #define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
64 #define LPC_ENET_TSV0(x) (x + 0x158)
65 #define LPC_ENET_TSV1(x) (x + 0x15C)
66 #define LPC_ENET_RSV(x) (x + 0x160)
67 #define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
68 #define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
69 #define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
70 #define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
71 #define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
72 #define LPC_ENET_HASHFILTERL(x) (x + 0x210)
73 #define LPC_ENET_HASHFILTERH(x) (x + 0x214)
74 #define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
75 #define LPC_ENET_INTENABLE(x) (x + 0xFE4)
76 #define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
77 #define LPC_ENET_INTSET(x) (x + 0xFEC)
78 #define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
81 * mac1 register definitions
83 #define LPC_MAC1_RECV_ENABLE (1 << 0)
84 #define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
85 #define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
86 #define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
87 #define LPC_MAC1_LOOPBACK (1 << 4)
88 #define LPC_MAC1_RESET_TX (1 << 8)
89 #define LPC_MAC1_RESET_MCS_TX (1 << 9)
90 #define LPC_MAC1_RESET_RX (1 << 10)
91 #define LPC_MAC1_RESET_MCS_RX (1 << 11)
92 #define LPC_MAC1_SIMULATION_RESET (1 << 14)
93 #define LPC_MAC1_SOFT_RESET (1 << 15)
96 * mac2 register definitions
98 #define LPC_MAC2_FULL_DUPLEX (1 << 0)
99 #define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
100 #define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
101 #define LPC_MAC2_DELAYED_CRC (1 << 3)
102 #define LPC_MAC2_CRC_ENABLE (1 << 4)
103 #define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
104 #define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
105 #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
106 #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
107 #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
108 #define LPC_MAC2_NO_BACKOFF (1 << 12)
109 #define LPC_MAC2_BACK_PRESSURE (1 << 13)
110 #define LPC_MAC2_EXCESS_DEFER (1 << 14)
113 * ipgt register definitions
115 #define LPC_IPGT_LOAD(n) ((n) & 0x7F)
118 * ipgr register definitions
120 #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
121 #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
124 * clrt register definitions
126 #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
127 #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
130 * maxf register definitions
132 #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
135 * supp register definitions
137 #define LPC_SUPP_SPEED (1 << 8)
138 #define LPC_SUPP_RESET_RMII (1 << 11)
141 * test register definitions
143 #define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
144 #define LPC_TEST_PAUSE (1 << 1)
145 #define LPC_TEST_BACKPRESSURE (1 << 2)
148 * mcfg register definitions
150 #define LPC_MCFG_SCAN_INCREMENT (1 << 0)
151 #define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
152 #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
153 #define LPC_MCFG_CLOCK_HOST_DIV_4 0
154 #define LPC_MCFG_CLOCK_HOST_DIV_6 2
155 #define LPC_MCFG_CLOCK_HOST_DIV_8 3
156 #define LPC_MCFG_CLOCK_HOST_DIV_10 4
157 #define LPC_MCFG_CLOCK_HOST_DIV_14 5
158 #define LPC_MCFG_CLOCK_HOST_DIV_20 6
159 #define LPC_MCFG_CLOCK_HOST_DIV_28 7
160 #define LPC_MCFG_RESET_MII_MGMT (1 << 15)
163 * mcmd register definitions
165 #define LPC_MCMD_READ (1 << 0)
166 #define LPC_MCMD_SCAN (1 << 1)
169 * madr register definitions
171 #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
172 #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
175 * mwtd register definitions
177 #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
180 * mrdd register definitions
182 #define LPC_MRDD_READ_MASK 0xFFFF
185 * mind register definitions
187 #define LPC_MIND_BUSY (1 << 0)
188 #define LPC_MIND_SCANNING (1 << 1)
189 #define LPC_MIND_NOT_VALID (1 << 2)
190 #define LPC_MIND_MII_LINK_FAIL (1 << 3)
193 * command register definitions
195 #define LPC_COMMAND_RXENABLE (1 << 0)
196 #define LPC_COMMAND_TXENABLE (1 << 1)
197 #define LPC_COMMAND_REG_RESET (1 << 3)
198 #define LPC_COMMAND_TXRESET (1 << 4)
199 #define LPC_COMMAND_RXRESET (1 << 5)
200 #define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
201 #define LPC_COMMAND_PASSRXFILTER (1 << 7)
202 #define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
203 #define LPC_COMMAND_RMII (1 << 9)
204 #define LPC_COMMAND_FULLDUPLEX (1 << 10)
207 * status register definitions
209 #define LPC_STATUS_RXACTIVE (1 << 0)
210 #define LPC_STATUS_TXACTIVE (1 << 1)
213 * tsv0 register definitions
215 #define LPC_TSV0_CRC_ERROR (1 << 0)
216 #define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
217 #define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
218 #define LPC_TSV0_DONE (1 << 3)
219 #define LPC_TSV0_MULTICAST (1 << 4)
220 #define LPC_TSV0_BROADCAST (1 << 5)
221 #define LPC_TSV0_PACKET_DEFER (1 << 6)
222 #define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
223 #define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
224 #define LPC_TSV0_LATE_COLLISION (1 << 9)
225 #define LPC_TSV0_GIANT (1 << 10)
226 #define LPC_TSV0_UNDERRUN (1 << 11)
227 #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
228 #define LPC_TSV0_CONTROL_FRAME (1 << 28)
229 #define LPC_TSV0_PAUSE (1 << 29)
230 #define LPC_TSV0_BACKPRESSURE (1 << 30)
231 #define LPC_TSV0_VLAN (1 << 31)
234 * tsv1 register definitions
236 #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
237 #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
240 * rsv register definitions
242 #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
243 #define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
244 #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
245 #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
246 #define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
247 #define LPC_RSV_CRC_ERROR (1 << 20)
248 #define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
249 #define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
250 #define LPC_RSV_RECEIVE_OK (1 << 23)
251 #define LPC_RSV_MULTICAST (1 << 24)
252 #define LPC_RSV_BROADCAST (1 << 25)
253 #define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
254 #define LPC_RSV_CONTROL_FRAME (1 << 27)
255 #define LPC_RSV_PAUSE (1 << 28)
256 #define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
257 #define LPC_RSV_VLAN (1 << 30)
260 * flowcontrolcounter register definitions
262 #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
263 #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
266 * flowcontrolstatus register definitions
268 #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
271 * rxfilterctrl, rxfilterwolstatus, and rxfilterwolclear shared
272 * register definitions
274 #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
275 #define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
276 #define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
277 #define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
278 #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
279 #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
282 * rxfilterctrl register definitions
284 #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
285 #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
288 * rxfilterwolstatus/rxfilterwolclear register definitions
290 #define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
291 #define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
294 * intstatus, intenable, intclear, and Intset shared register
297 #define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
298 #define LPC_MACINT_RXERRORONINT (1 << 1)
299 #define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
300 #define LPC_MACINT_RXDONEINTEN (1 << 3)
301 #define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
302 #define LPC_MACINT_TXERRORINTEN (1 << 5)
303 #define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
304 #define LPC_MACINT_TXDONEINTEN (1 << 7)
305 #define LPC_MACINT_SOFTINTEN (1 << 12)
306 #define LPC_MACINT_WAKEUPINTEN (1 << 13)
309 * powerdown register definitions
311 #define LPC_POWERDOWN_MACAHB (1 << 31)
313 static phy_interface_t lpc_phy_interface_mode(struct device *dev)
315 if (dev && dev->of_node) {
316 const char *mode = of_get_property(dev->of_node,
318 if (mode && !strcmp(mode, "mii"))
319 return PHY_INTERFACE_MODE_MII;
321 return PHY_INTERFACE_MODE_RMII;
324 static bool use_iram_for_net(struct device *dev)
326 if (dev && dev->of_node)
327 return of_property_read_bool(dev->of_node, "use-iram");
331 /* Receive Status information word */
332 #define RXSTATUS_SIZE 0x000007FF
333 #define RXSTATUS_CONTROL (1 << 18)
334 #define RXSTATUS_VLAN (1 << 19)
335 #define RXSTATUS_FILTER (1 << 20)
336 #define RXSTATUS_MULTICAST (1 << 21)
337 #define RXSTATUS_BROADCAST (1 << 22)
338 #define RXSTATUS_CRC (1 << 23)
339 #define RXSTATUS_SYMBOL (1 << 24)
340 #define RXSTATUS_LENGTH (1 << 25)
341 #define RXSTATUS_RANGE (1 << 26)
342 #define RXSTATUS_ALIGN (1 << 27)
343 #define RXSTATUS_OVERRUN (1 << 28)
344 #define RXSTATUS_NODESC (1 << 29)
345 #define RXSTATUS_LAST (1 << 30)
346 #define RXSTATUS_ERROR (1 << 31)
348 #define RXSTATUS_STATUS_ERROR \
349 (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
350 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
352 /* Receive Descriptor control word */
353 #define RXDESC_CONTROL_SIZE 0x000007FF
354 #define RXDESC_CONTROL_INT (1 << 31)
356 /* Transmit Status information word */
357 #define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
358 #define TXSTATUS_DEFER (1 << 25)
359 #define TXSTATUS_EXCESSDEFER (1 << 26)
360 #define TXSTATUS_EXCESSCOLL (1 << 27)
361 #define TXSTATUS_LATECOLL (1 << 28)
362 #define TXSTATUS_UNDERRUN (1 << 29)
363 #define TXSTATUS_NODESC (1 << 30)
364 #define TXSTATUS_ERROR (1 << 31)
366 /* Transmit Descriptor control word */
367 #define TXDESC_CONTROL_SIZE 0x000007FF
368 #define TXDESC_CONTROL_OVERRIDE (1 << 26)
369 #define TXDESC_CONTROL_HUGE (1 << 27)
370 #define TXDESC_CONTROL_PAD (1 << 28)
371 #define TXDESC_CONTROL_CRC (1 << 29)
372 #define TXDESC_CONTROL_LAST (1 << 30)
373 #define TXDESC_CONTROL_INT (1 << 31)
376 * Structure of a TX/RX descriptors and RX status
384 __le32 statushashcrc;
388 * Device driver data structure
390 struct netdata_local {
391 struct platform_device *pdev;
392 struct net_device *ndev;
394 void __iomem *net_base;
396 unsigned int skblen[ENET_TX_DESC];
397 unsigned int last_tx_idx;
398 unsigned int num_used_tx_buffs;
399 struct mii_bus *mii_bus;
401 dma_addr_t dma_buff_base_p;
402 void *dma_buff_base_v;
403 size_t dma_buff_size;
404 struct txrx_desc_t *tx_desc_v;
407 struct txrx_desc_t *rx_desc_v;
408 struct rx_status_t *rx_stat_v;
413 struct napi_struct napi;
417 * MAC support functions
419 static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
423 /* Set station address */
424 tmp = mac[0] | ((u32)mac[1] << 8);
425 writel(tmp, LPC_ENET_SA2(pldat->net_base));
426 tmp = mac[2] | ((u32)mac[3] << 8);
427 writel(tmp, LPC_ENET_SA1(pldat->net_base));
428 tmp = mac[4] | ((u32)mac[5] << 8);
429 writel(tmp, LPC_ENET_SA0(pldat->net_base));
431 netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
434 static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
438 /* Get station address */
439 tmp = readl(LPC_ENET_SA2(pldat->net_base));
442 tmp = readl(LPC_ENET_SA1(pldat->net_base));
445 tmp = readl(LPC_ENET_SA0(pldat->net_base));
450 static void __lpc_params_setup(struct netdata_local *pldat)
454 if (pldat->duplex == DUPLEX_FULL) {
455 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
456 tmp |= LPC_MAC2_FULL_DUPLEX;
457 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
458 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
459 tmp |= LPC_COMMAND_FULLDUPLEX;
460 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
461 writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
463 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
464 tmp &= ~LPC_MAC2_FULL_DUPLEX;
465 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
466 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
467 tmp &= ~LPC_COMMAND_FULLDUPLEX;
468 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
469 writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
472 if (pldat->speed == SPEED_100)
473 writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
475 writel(0, LPC_ENET_SUPP(pldat->net_base));
478 static void __lpc_eth_reset(struct netdata_local *pldat)
480 /* Reset all MAC logic */
481 writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
482 LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
483 LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
484 writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
485 LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
488 static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
490 /* Reset MII management hardware */
491 writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
493 /* Setup MII clock to slowest rate with a /28 divider */
494 writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
495 LPC_ENET_MCFG(pldat->net_base));
500 static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
504 phaddr = addr - pldat->dma_buff_base_v;
505 phaddr += pldat->dma_buff_base_p;
510 static void lpc_eth_enable_int(void __iomem *regbase)
512 writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
513 LPC_ENET_INTENABLE(regbase));
516 static void lpc_eth_disable_int(void __iomem *regbase)
518 writel(0, LPC_ENET_INTENABLE(regbase));
521 /* Setup TX/RX descriptors */
522 static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
527 struct txrx_desc_t *ptxrxdesc;
528 struct rx_status_t *prxstat;
530 tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
532 /* Setup TX descriptors, status, and buffers */
533 pldat->tx_desc_v = tbuff;
534 tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
536 pldat->tx_stat_v = tbuff;
537 tbuff += sizeof(u32) * ENET_TX_DESC;
539 tbuff = PTR_ALIGN(tbuff, 16);
540 pldat->tx_buff_v = tbuff;
541 tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
543 /* Setup RX descriptors, status, and buffers */
544 pldat->rx_desc_v = tbuff;
545 tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
547 tbuff = PTR_ALIGN(tbuff, 16);
548 pldat->rx_stat_v = tbuff;
549 tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
551 tbuff = PTR_ALIGN(tbuff, 16);
552 pldat->rx_buff_v = tbuff;
553 tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
555 /* Map the TX descriptors to the TX buffers in hardware */
556 for (i = 0; i < ENET_TX_DESC; i++) {
557 ptxstat = &pldat->tx_stat_v[i];
558 ptxrxdesc = &pldat->tx_desc_v[i];
560 ptxrxdesc->packet = __va_to_pa(
561 pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
562 ptxrxdesc->control = 0;
566 /* Map the RX descriptors to the RX buffers in hardware */
567 for (i = 0; i < ENET_RX_DESC; i++) {
568 prxstat = &pldat->rx_stat_v[i];
569 ptxrxdesc = &pldat->rx_desc_v[i];
571 ptxrxdesc->packet = __va_to_pa(
572 pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
573 ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
574 prxstat->statusinfo = 0;
575 prxstat->statushashcrc = 0;
578 /* Setup base addresses in hardware to point to buffers and
581 writel((ENET_TX_DESC - 1),
582 LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
583 writel(__va_to_pa(pldat->tx_desc_v, pldat),
584 LPC_ENET_TXDESCRIPTOR(pldat->net_base));
585 writel(__va_to_pa(pldat->tx_stat_v, pldat),
586 LPC_ENET_TXSTATUS(pldat->net_base));
587 writel((ENET_RX_DESC - 1),
588 LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
589 writel(__va_to_pa(pldat->rx_desc_v, pldat),
590 LPC_ENET_RXDESCRIPTOR(pldat->net_base));
591 writel(__va_to_pa(pldat->rx_stat_v, pldat),
592 LPC_ENET_RXSTATUS(pldat->net_base));
595 static void __lpc_eth_init(struct netdata_local *pldat)
599 /* Disable controller and reset */
600 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
601 tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
602 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
603 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
604 tmp &= ~LPC_MAC1_RECV_ENABLE;
605 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
607 /* Initial MAC setup */
608 writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
609 writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
610 LPC_ENET_MAC2(pldat->net_base));
611 writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
613 /* Collision window, gap */
614 writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
615 LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
616 LPC_ENET_CLRT(pldat->net_base));
617 writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
619 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
620 writel(LPC_COMMAND_PASSRUNTFRAME,
621 LPC_ENET_COMMAND(pldat->net_base));
623 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
624 LPC_ENET_COMMAND(pldat->net_base));
625 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
628 __lpc_params_setup(pldat);
630 /* Setup TX and RX descriptors */
631 __lpc_txrx_desc_setup(pldat);
633 /* Setup packet filtering */
634 writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
635 LPC_ENET_RXFILTER_CTRL(pldat->net_base));
637 /* Get the next TX buffer output index */
638 pldat->num_used_tx_buffs = 0;
640 readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
642 /* Clear and enable interrupts */
643 writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
645 lpc_eth_enable_int(pldat->net_base);
647 /* Enable controller */
648 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
649 tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
650 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
651 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
652 tmp |= LPC_MAC1_RECV_ENABLE;
653 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
656 static void __lpc_eth_shutdown(struct netdata_local *pldat)
658 /* Reset ethernet and power down PHY */
659 __lpc_eth_reset(pldat);
660 writel(0, LPC_ENET_MAC1(pldat->net_base));
661 writel(0, LPC_ENET_MAC2(pldat->net_base));
665 * MAC<--->PHY support functions
667 static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
669 struct netdata_local *pldat = bus->priv;
670 unsigned long timeout = jiffies + msecs_to_jiffies(100);
673 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
674 writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
676 /* Wait for unbusy status */
677 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
678 if (time_after(jiffies, timeout))
683 lps = readl(LPC_ENET_MRDD(pldat->net_base));
684 writel(0, LPC_ENET_MCMD(pldat->net_base));
689 static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
692 struct netdata_local *pldat = bus->priv;
693 unsigned long timeout = jiffies + msecs_to_jiffies(100);
695 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
696 writel(phydata, LPC_ENET_MWTD(pldat->net_base));
698 /* Wait for completion */
699 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
700 if (time_after(jiffies, timeout))
708 static int lpc_mdio_reset(struct mii_bus *bus)
710 return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
713 static void lpc_handle_link_change(struct net_device *ndev)
715 struct netdata_local *pldat = netdev_priv(ndev);
716 struct phy_device *phydev = ndev->phydev;
719 bool status_change = false;
721 spin_lock_irqsave(&pldat->lock, flags);
724 if ((pldat->speed != phydev->speed) ||
725 (pldat->duplex != phydev->duplex)) {
726 pldat->speed = phydev->speed;
727 pldat->duplex = phydev->duplex;
728 status_change = true;
732 if (phydev->link != pldat->link) {
737 pldat->link = phydev->link;
739 status_change = true;
742 spin_unlock_irqrestore(&pldat->lock, flags);
745 __lpc_params_setup(pldat);
748 static int lpc_mii_probe(struct net_device *ndev)
750 struct netdata_local *pldat = netdev_priv(ndev);
751 struct phy_device *phydev = phy_find_first(pldat->mii_bus);
754 netdev_err(ndev, "no PHY found\n");
758 /* Attach to the PHY */
759 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
760 netdev_info(ndev, "using MII interface\n");
762 netdev_info(ndev, "using RMII interface\n");
763 phydev = phy_connect(ndev, phydev_name(phydev),
764 &lpc_handle_link_change,
765 lpc_phy_interface_mode(&pldat->pdev->dev));
767 if (IS_ERR(phydev)) {
768 netdev_err(ndev, "Could not attach to PHY\n");
769 return PTR_ERR(phydev);
772 phy_set_max_speed(phydev, SPEED_100);
778 phy_attached_info(phydev);
783 static int lpc_mii_init(struct netdata_local *pldat)
787 pldat->mii_bus = mdiobus_alloc();
788 if (!pldat->mii_bus) {
794 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
795 writel(LPC_COMMAND_PASSRUNTFRAME,
796 LPC_ENET_COMMAND(pldat->net_base));
798 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
799 LPC_ENET_COMMAND(pldat->net_base));
800 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
803 pldat->mii_bus->name = "lpc_mii_bus";
804 pldat->mii_bus->read = &lpc_mdio_read;
805 pldat->mii_bus->write = &lpc_mdio_write;
806 pldat->mii_bus->reset = &lpc_mdio_reset;
807 snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
808 pldat->pdev->name, pldat->pdev->id);
809 pldat->mii_bus->priv = pldat;
810 pldat->mii_bus->parent = &pldat->pdev->dev;
812 platform_set_drvdata(pldat->pdev, pldat->mii_bus);
814 if (mdiobus_register(pldat->mii_bus))
815 goto err_out_unregister_bus;
817 if (lpc_mii_probe(pldat->ndev) != 0)
818 goto err_out_unregister_bus;
822 err_out_unregister_bus:
823 mdiobus_unregister(pldat->mii_bus);
824 mdiobus_free(pldat->mii_bus);
829 static void __lpc_handle_xmit(struct net_device *ndev)
831 struct netdata_local *pldat = netdev_priv(ndev);
832 u32 txcidx, *ptxstat, txstat;
834 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
835 while (pldat->last_tx_idx != txcidx) {
836 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
838 /* A buffer is available, get buffer status */
839 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
842 /* Next buffer and decrement used buffer counter */
843 pldat->num_used_tx_buffs--;
844 pldat->last_tx_idx++;
845 if (pldat->last_tx_idx >= ENET_TX_DESC)
846 pldat->last_tx_idx = 0;
848 /* Update collision counter */
849 ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
851 /* Any errors occurred? */
852 if (txstat & TXSTATUS_ERROR) {
853 if (txstat & TXSTATUS_UNDERRUN) {
855 ndev->stats.tx_fifo_errors++;
857 if (txstat & TXSTATUS_LATECOLL) {
859 ndev->stats.tx_aborted_errors++;
861 if (txstat & TXSTATUS_EXCESSCOLL) {
862 /* Excessive collision */
863 ndev->stats.tx_aborted_errors++;
865 if (txstat & TXSTATUS_EXCESSDEFER) {
867 ndev->stats.tx_aborted_errors++;
869 ndev->stats.tx_errors++;
872 ndev->stats.tx_packets++;
873 ndev->stats.tx_bytes += skblen;
876 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
879 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
880 if (netif_queue_stopped(ndev))
881 netif_wake_queue(ndev);
885 static int __lpc_handle_recv(struct net_device *ndev, int budget)
887 struct netdata_local *pldat = netdev_priv(ndev);
889 u32 rxconsidx, len, ethst;
890 struct rx_status_t *prxstat;
893 /* Get the current RX buffer indexes */
894 rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
895 while (rx_done < budget && rxconsidx !=
896 readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
897 /* Get pointer to receive status */
898 prxstat = &pldat->rx_stat_v[rxconsidx];
899 len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
902 ethst = prxstat->statusinfo;
903 if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
904 (RXSTATUS_ERROR | RXSTATUS_RANGE))
905 ethst &= ~RXSTATUS_ERROR;
907 if (ethst & RXSTATUS_ERROR) {
908 int si = prxstat->statusinfo;
910 if (si & RXSTATUS_OVERRUN) {
912 ndev->stats.rx_fifo_errors++;
913 } else if (si & RXSTATUS_CRC) {
915 ndev->stats.rx_crc_errors++;
916 } else if (si & RXSTATUS_LENGTH) {
918 ndev->stats.rx_length_errors++;
919 } else if (si & RXSTATUS_ERROR) {
921 ndev->stats.rx_length_errors++;
923 ndev->stats.rx_errors++;
926 skb = dev_alloc_skb(len);
928 ndev->stats.rx_dropped++;
930 /* Copy packet from buffer */
932 pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
935 /* Pass to upper layer */
936 skb->protocol = eth_type_trans(skb, ndev);
937 netif_receive_skb(skb);
938 ndev->stats.rx_packets++;
939 ndev->stats.rx_bytes += len;
943 /* Increment consume index */
944 rxconsidx = rxconsidx + 1;
945 if (rxconsidx >= ENET_RX_DESC)
948 LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
955 static int lpc_eth_poll(struct napi_struct *napi, int budget)
957 struct netdata_local *pldat = container_of(napi,
958 struct netdata_local, napi);
959 struct net_device *ndev = pldat->ndev;
961 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
963 __netif_tx_lock(txq, smp_processor_id());
964 __lpc_handle_xmit(ndev);
965 __netif_tx_unlock(txq);
966 rx_done = __lpc_handle_recv(ndev, budget);
968 if (rx_done < budget) {
969 napi_complete_done(napi, rx_done);
970 lpc_eth_enable_int(pldat->net_base);
976 static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
978 struct net_device *ndev = dev_id;
979 struct netdata_local *pldat = netdev_priv(ndev);
982 spin_lock(&pldat->lock);
984 tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
985 /* Clear interrupts */
986 writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
988 lpc_eth_disable_int(pldat->net_base);
989 if (likely(napi_schedule_prep(&pldat->napi)))
990 __napi_schedule(&pldat->napi);
992 spin_unlock(&pldat->lock);
997 static int lpc_eth_close(struct net_device *ndev)
1000 struct netdata_local *pldat = netdev_priv(ndev);
1002 if (netif_msg_ifdown(pldat))
1003 dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1005 napi_disable(&pldat->napi);
1006 netif_stop_queue(ndev);
1009 phy_stop(ndev->phydev);
1011 spin_lock_irqsave(&pldat->lock, flags);
1012 __lpc_eth_reset(pldat);
1013 netif_carrier_off(ndev);
1014 writel(0, LPC_ENET_MAC1(pldat->net_base));
1015 writel(0, LPC_ENET_MAC2(pldat->net_base));
1016 spin_unlock_irqrestore(&pldat->lock, flags);
1018 clk_disable_unprepare(pldat->clk);
1023 static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1025 struct netdata_local *pldat = netdev_priv(ndev);
1028 struct txrx_desc_t *ptxrxdesc;
1032 spin_lock_irq(&pldat->lock);
1034 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1035 /* This function should never be called when there are no
1037 netif_stop_queue(ndev);
1038 spin_unlock_irq(&pldat->lock);
1039 WARN(1, "BUG! TX request when no free TX buffers!\n");
1040 return NETDEV_TX_BUSY;
1043 /* Get the next TX descriptor index */
1044 txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1046 /* Setup control for the transfer */
1047 ptxstat = &pldat->tx_stat_v[txidx];
1049 ptxrxdesc = &pldat->tx_desc_v[txidx];
1050 ptxrxdesc->control =
1051 (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1053 /* Copy data to the DMA buffer */
1054 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1056 /* Save the buffer and increment the buffer counter */
1057 pldat->skblen[txidx] = len;
1058 pldat->num_used_tx_buffs++;
1060 /* Start transmit */
1062 if (txidx >= ENET_TX_DESC)
1064 writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1066 /* Stop queue if no more TX buffers */
1067 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1068 netif_stop_queue(ndev);
1070 spin_unlock_irq(&pldat->lock);
1073 return NETDEV_TX_OK;
1076 static int lpc_set_mac_address(struct net_device *ndev, void *p)
1078 struct sockaddr *addr = p;
1079 struct netdata_local *pldat = netdev_priv(ndev);
1080 unsigned long flags;
1082 if (!is_valid_ether_addr(addr->sa_data))
1083 return -EADDRNOTAVAIL;
1084 memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
1086 spin_lock_irqsave(&pldat->lock, flags);
1088 /* Set station address */
1089 __lpc_set_mac(pldat, ndev->dev_addr);
1091 spin_unlock_irqrestore(&pldat->lock, flags);
1096 static void lpc_eth_set_multicast_list(struct net_device *ndev)
1098 struct netdata_local *pldat = netdev_priv(ndev);
1099 struct netdev_hw_addr_list *mcptr = &ndev->mc;
1100 struct netdev_hw_addr *ha;
1101 u32 tmp32, hash_val, hashlo, hashhi;
1102 unsigned long flags;
1104 spin_lock_irqsave(&pldat->lock, flags);
1106 /* Set station address */
1107 __lpc_set_mac(pldat, ndev->dev_addr);
1109 tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1111 if (ndev->flags & IFF_PROMISC)
1112 tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1113 LPC_RXFLTRW_ACCEPTUMULTICAST;
1114 if (ndev->flags & IFF_ALLMULTI)
1115 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1117 if (netdev_hw_addr_list_count(mcptr))
1118 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1120 writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1123 /* Set initial hash table */
1127 /* 64 bits : multicast address in hash table */
1128 netdev_hw_addr_list_for_each(ha, mcptr) {
1129 hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1132 hashhi |= 1 << (hash_val - 32);
1134 hashlo |= 1 << hash_val;
1137 writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1138 writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1140 spin_unlock_irqrestore(&pldat->lock, flags);
1143 static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1145 struct phy_device *phydev = ndev->phydev;
1147 if (!netif_running(ndev))
1153 return phy_mii_ioctl(phydev, req, cmd);
1156 static int lpc_eth_open(struct net_device *ndev)
1158 struct netdata_local *pldat = netdev_priv(ndev);
1161 if (netif_msg_ifup(pldat))
1162 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1164 ret = clk_prepare_enable(pldat->clk);
1168 /* Suspended PHY makes LPC ethernet core block, so resume now */
1169 phy_resume(ndev->phydev);
1171 /* Reset and initialize */
1172 __lpc_eth_reset(pldat);
1173 __lpc_eth_init(pldat);
1175 /* schedule a link state check */
1176 phy_start(ndev->phydev);
1177 netif_start_queue(ndev);
1178 napi_enable(&pldat->napi);
1186 static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1187 struct ethtool_drvinfo *info)
1189 strlcpy(info->driver, MODNAME, sizeof(info->driver));
1190 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1191 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1192 sizeof(info->bus_info));
1195 static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1197 struct netdata_local *pldat = netdev_priv(ndev);
1199 return pldat->msg_enable;
1202 static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1204 struct netdata_local *pldat = netdev_priv(ndev);
1206 pldat->msg_enable = level;
1209 static const struct ethtool_ops lpc_eth_ethtool_ops = {
1210 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1211 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1212 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1213 .get_link = ethtool_op_get_link,
1214 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1215 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1218 static const struct net_device_ops lpc_netdev_ops = {
1219 .ndo_open = lpc_eth_open,
1220 .ndo_stop = lpc_eth_close,
1221 .ndo_start_xmit = lpc_eth_hard_start_xmit,
1222 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1223 .ndo_do_ioctl = lpc_eth_ioctl,
1224 .ndo_set_mac_address = lpc_set_mac_address,
1225 .ndo_validate_addr = eth_validate_addr,
1228 static int lpc_eth_drv_probe(struct platform_device *pdev)
1230 struct device *dev = &pdev->dev;
1231 struct device_node *np = dev->of_node;
1232 struct netdata_local *pldat;
1233 struct net_device *ndev;
1234 dma_addr_t dma_handle;
1235 struct resource *res;
1238 /* Setup network interface for RMII or MII mode */
1239 lpc32xx_set_phy_interface_mode(lpc_phy_interface_mode(dev));
1241 /* Get platform resources */
1242 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1243 irq = platform_get_irq(pdev, 0);
1244 if (!res || irq < 0) {
1245 dev_err(dev, "error getting resources.\n");
1250 /* Allocate net driver data structure */
1251 ndev = alloc_etherdev(sizeof(struct netdata_local));
1253 dev_err(dev, "could not allocate device.\n");
1258 SET_NETDEV_DEV(ndev, dev);
1260 pldat = netdev_priv(ndev);
1264 spin_lock_init(&pldat->lock);
1266 /* Save resources */
1269 /* Get clock for the device */
1270 pldat->clk = clk_get(dev, NULL);
1271 if (IS_ERR(pldat->clk)) {
1272 dev_err(dev, "error getting clock.\n");
1273 ret = PTR_ERR(pldat->clk);
1274 goto err_out_free_dev;
1277 /* Enable network clock */
1278 ret = clk_prepare_enable(pldat->clk);
1280 goto err_out_clk_put;
1283 pldat->net_base = ioremap(res->start, resource_size(res));
1284 if (!pldat->net_base) {
1285 dev_err(dev, "failed to map registers\n");
1287 goto err_out_disable_clocks;
1289 ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1292 dev_err(dev, "error requesting interrupt.\n");
1293 goto err_out_iounmap;
1296 /* Setup driver functions */
1297 ndev->netdev_ops = &lpc_netdev_ops;
1298 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1299 ndev->watchdog_timeo = msecs_to_jiffies(2500);
1301 /* Get size of DMA buffers/descriptors region */
1302 pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1303 sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1305 if (use_iram_for_net(dev)) {
1306 if (pldat->dma_buff_size >
1307 lpc32xx_return_iram(&pldat->dma_buff_base_v, &dma_handle)) {
1308 pldat->dma_buff_base_v = NULL;
1309 pldat->dma_buff_size = 0;
1311 "IRAM not big enough for net buffers, using SDRAM instead.\n");
1315 if (pldat->dma_buff_base_v == NULL) {
1316 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
1318 goto err_out_free_irq;
1320 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1322 /* Allocate a chunk of memory for the DMA ethernet buffers
1324 pldat->dma_buff_base_v =
1325 dma_alloc_coherent(dev,
1326 pldat->dma_buff_size, &dma_handle,
1328 if (pldat->dma_buff_base_v == NULL) {
1330 goto err_out_free_irq;
1333 pldat->dma_buff_base_p = dma_handle;
1335 netdev_dbg(ndev, "IO address space :%pR\n", res);
1336 netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
1337 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1339 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1340 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
1341 netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
1342 pldat->dma_buff_base_p);
1343 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1344 pldat->dma_buff_base_v);
1346 /* Get MAC address from current HW setting (POR state is all zeros) */
1347 __lpc_get_mac(pldat, ndev->dev_addr);
1349 if (!is_valid_ether_addr(ndev->dev_addr)) {
1350 const char *macaddr = of_get_mac_address(np);
1351 if (!IS_ERR(macaddr))
1352 ether_addr_copy(ndev->dev_addr, macaddr);
1354 if (!is_valid_ether_addr(ndev->dev_addr))
1355 eth_hw_addr_random(ndev);
1357 /* Reset the ethernet controller */
1358 __lpc_eth_reset(pldat);
1360 /* then shut everything down to save power */
1361 __lpc_eth_shutdown(pldat);
1363 /* Set default parameters */
1364 pldat->msg_enable = NETIF_MSG_LINK;
1366 /* Force an MII interface reset and clock setup */
1367 __lpc_mii_mngt_reset(pldat);
1369 /* Force default PHY interface setup in chip, this will probably be
1370 changed by the PHY driver */
1373 pldat->duplex = DUPLEX_FULL;
1374 __lpc_params_setup(pldat);
1376 netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1378 ret = register_netdev(ndev);
1380 dev_err(dev, "Cannot register net device, aborting.\n");
1381 goto err_out_dma_unmap;
1383 platform_set_drvdata(pdev, ndev);
1385 ret = lpc_mii_init(pldat);
1387 goto err_out_unregister_netdev;
1389 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1390 res->start, ndev->irq);
1392 device_init_wakeup(dev, 1);
1393 device_set_wakeup_enable(dev, 0);
1397 err_out_unregister_netdev:
1398 unregister_netdev(ndev);
1400 if (!use_iram_for_net(dev) ||
1401 pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
1402 dma_free_coherent(dev, pldat->dma_buff_size,
1403 pldat->dma_buff_base_v,
1404 pldat->dma_buff_base_p);
1406 free_irq(ndev->irq, ndev);
1408 iounmap(pldat->net_base);
1409 err_out_disable_clocks:
1410 clk_disable_unprepare(pldat->clk);
1412 clk_put(pldat->clk);
1416 pr_err("%s: not found (%d).\n", MODNAME, ret);
1420 static int lpc_eth_drv_remove(struct platform_device *pdev)
1422 struct net_device *ndev = platform_get_drvdata(pdev);
1423 struct netdata_local *pldat = netdev_priv(ndev);
1425 unregister_netdev(ndev);
1427 if (!use_iram_for_net(&pldat->pdev->dev) ||
1428 pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
1429 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1430 pldat->dma_buff_base_v,
1431 pldat->dma_buff_base_p);
1432 free_irq(ndev->irq, ndev);
1433 iounmap(pldat->net_base);
1434 mdiobus_unregister(pldat->mii_bus);
1435 mdiobus_free(pldat->mii_bus);
1436 clk_disable_unprepare(pldat->clk);
1437 clk_put(pldat->clk);
1444 static int lpc_eth_drv_suspend(struct platform_device *pdev,
1447 struct net_device *ndev = platform_get_drvdata(pdev);
1448 struct netdata_local *pldat = netdev_priv(ndev);
1450 if (device_may_wakeup(&pdev->dev))
1451 enable_irq_wake(ndev->irq);
1454 if (netif_running(ndev)) {
1455 netif_device_detach(ndev);
1456 __lpc_eth_shutdown(pldat);
1457 clk_disable_unprepare(pldat->clk);
1460 * Reset again now clock is disable to be sure
1463 __lpc_eth_reset(pldat);
1470 static int lpc_eth_drv_resume(struct platform_device *pdev)
1472 struct net_device *ndev = platform_get_drvdata(pdev);
1473 struct netdata_local *pldat;
1475 if (device_may_wakeup(&pdev->dev))
1476 disable_irq_wake(ndev->irq);
1479 if (netif_running(ndev)) {
1480 pldat = netdev_priv(ndev);
1482 /* Enable interface clock */
1483 clk_enable(pldat->clk);
1485 /* Reset and initialize */
1486 __lpc_eth_reset(pldat);
1487 __lpc_eth_init(pldat);
1489 netif_device_attach(ndev);
1497 static const struct of_device_id lpc_eth_match[] = {
1498 { .compatible = "nxp,lpc-eth" },
1501 MODULE_DEVICE_TABLE(of, lpc_eth_match);
1503 static struct platform_driver lpc_eth_driver = {
1504 .probe = lpc_eth_drv_probe,
1505 .remove = lpc_eth_drv_remove,
1507 .suspend = lpc_eth_drv_suspend,
1508 .resume = lpc_eth_drv_resume,
1512 .of_match_table = lpc_eth_match,
1516 module_platform_driver(lpc_eth_driver);
1518 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1519 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1520 MODULE_DESCRIPTION("LPC Ethernet Driver");
1521 MODULE_LICENSE("GPL");