2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2009-2012 Cavium, Inc
9 #include <linux/platform_device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/capability.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/interrupt.h>
15 #include <linux/netdevice.h>
16 #include <linux/spinlock.h>
17 #include <linux/if_vlan.h>
18 #include <linux/of_mdio.h>
19 #include <linux/module.h>
20 #include <linux/of_net.h>
21 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <linux/phy.h>
26 #include <asm/octeon/octeon.h>
27 #include <asm/octeon/cvmx-mixx-defs.h>
28 #include <asm/octeon/cvmx-agl-defs.h>
30 #define DRV_NAME "octeon_mgmt"
31 #define DRV_VERSION "2.0"
32 #define DRV_DESCRIPTION \
33 "Cavium Networks Octeon MII (management) port Network Driver"
35 #define OCTEON_MGMT_NAPI_WEIGHT 16
37 /* Ring sizes that are powers of two allow for more efficient modulo
40 #define OCTEON_MGMT_RX_RING_SIZE 512
41 #define OCTEON_MGMT_TX_RING_SIZE 128
43 /* Allow 8 bytes for vlan and FCS. */
44 #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
46 union mgmt_port_ring_entry {
49 #define RING_ENTRY_CODE_DONE 0xf
50 #define RING_ENTRY_CODE_MORE 0x10
51 #ifdef __BIG_ENDIAN_BITFIELD
53 /* Length of the buffer/packet in bytes */
55 /* For TX, signals that the packet should be timestamped */
57 /* The RX error code */
59 /* Physical address of the buffer */
71 #define MIX_ORING1 0x0
72 #define MIX_ORING2 0x8
73 #define MIX_IRING1 0x10
74 #define MIX_IRING2 0x18
76 #define MIX_IRHWM 0x28
77 #define MIX_IRCNT 0x30
78 #define MIX_ORHWM 0x38
79 #define MIX_ORCNT 0x40
81 #define MIX_INTENA 0x50
82 #define MIX_REMCNT 0x58
85 #define AGL_GMX_PRT_CFG 0x10
86 #define AGL_GMX_RX_FRM_CTL 0x18
87 #define AGL_GMX_RX_FRM_MAX 0x30
88 #define AGL_GMX_RX_JABBER 0x38
89 #define AGL_GMX_RX_STATS_CTL 0x50
91 #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
92 #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
93 #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
95 #define AGL_GMX_RX_ADR_CTL 0x100
96 #define AGL_GMX_RX_ADR_CAM_EN 0x108
97 #define AGL_GMX_RX_ADR_CAM0 0x180
98 #define AGL_GMX_RX_ADR_CAM1 0x188
99 #define AGL_GMX_RX_ADR_CAM2 0x190
100 #define AGL_GMX_RX_ADR_CAM3 0x198
101 #define AGL_GMX_RX_ADR_CAM4 0x1a0
102 #define AGL_GMX_RX_ADR_CAM5 0x1a8
104 #define AGL_GMX_TX_CLK 0x208
105 #define AGL_GMX_TX_STATS_CTL 0x268
106 #define AGL_GMX_TX_CTL 0x270
107 #define AGL_GMX_TX_STAT0 0x280
108 #define AGL_GMX_TX_STAT1 0x288
109 #define AGL_GMX_TX_STAT2 0x290
110 #define AGL_GMX_TX_STAT3 0x298
111 #define AGL_GMX_TX_STAT4 0x2a0
112 #define AGL_GMX_TX_STAT5 0x2a8
113 #define AGL_GMX_TX_STAT6 0x2b0
114 #define AGL_GMX_TX_STAT7 0x2b8
115 #define AGL_GMX_TX_STAT8 0x2c0
116 #define AGL_GMX_TX_STAT9 0x2c8
119 struct net_device *netdev;
127 dma_addr_t tx_ring_handle;
128 unsigned int tx_next;
129 unsigned int tx_next_clean;
130 unsigned int tx_current_fill;
131 /* The tx_list lock also protects the ring related variables */
132 struct sk_buff_head tx_list;
134 /* RX variables only touched in napi_poll. No locking necessary. */
136 dma_addr_t rx_ring_handle;
137 unsigned int rx_next;
138 unsigned int rx_next_fill;
139 unsigned int rx_current_fill;
140 struct sk_buff_head rx_list;
143 unsigned int last_duplex;
144 unsigned int last_link;
145 unsigned int last_speed;
147 struct napi_struct napi;
148 struct tasklet_struct tx_clean_tasklet;
149 struct device_node *phy_np;
150 resource_size_t mix_phys;
151 resource_size_t mix_size;
152 resource_size_t agl_phys;
153 resource_size_t agl_size;
154 resource_size_t agl_prt_ctl_phys;
155 resource_size_t agl_prt_ctl_size;
158 static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
160 union cvmx_mixx_intena mix_intena;
163 spin_lock_irqsave(&p->lock, flags);
164 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
165 mix_intena.s.ithena = enable ? 1 : 0;
166 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
167 spin_unlock_irqrestore(&p->lock, flags);
170 static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
172 union cvmx_mixx_intena mix_intena;
175 spin_lock_irqsave(&p->lock, flags);
176 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
177 mix_intena.s.othena = enable ? 1 : 0;
178 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
179 spin_unlock_irqrestore(&p->lock, flags);
182 static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
184 octeon_mgmt_set_rx_irq(p, 1);
187 static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
189 octeon_mgmt_set_rx_irq(p, 0);
192 static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
194 octeon_mgmt_set_tx_irq(p, 1);
197 static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
199 octeon_mgmt_set_tx_irq(p, 0);
202 static unsigned int ring_max_fill(unsigned int ring_size)
204 return ring_size - 8;
207 static unsigned int ring_size_to_bytes(unsigned int ring_size)
209 return ring_size * sizeof(union mgmt_port_ring_entry);
212 static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
214 struct octeon_mgmt *p = netdev_priv(netdev);
216 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
218 union mgmt_port_ring_entry re;
221 /* CN56XX pass 1 needs 8 bytes of padding. */
222 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
224 skb = netdev_alloc_skb(netdev, size);
227 skb_reserve(skb, NET_IP_ALIGN);
228 __skb_queue_tail(&p->rx_list, skb);
232 re.s.addr = dma_map_single(p->dev, skb->data,
236 /* Put it in the ring. */
237 p->rx_ring[p->rx_next_fill] = re.d64;
238 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
239 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
242 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
243 p->rx_current_fill++;
245 cvmx_write_csr(p->mix + MIX_IRING2, 1);
249 static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
251 union cvmx_mixx_orcnt mix_orcnt;
252 union mgmt_port_ring_entry re;
257 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
258 while (mix_orcnt.s.orcnt) {
259 spin_lock_irqsave(&p->tx_list.lock, flags);
261 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
263 if (mix_orcnt.s.orcnt == 0) {
264 spin_unlock_irqrestore(&p->tx_list.lock, flags);
268 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
269 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
272 re.d64 = p->tx_ring[p->tx_next_clean];
274 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
275 skb = __skb_dequeue(&p->tx_list);
278 mix_orcnt.s.orcnt = 1;
280 /* Acknowledge to hardware that we have the buffer. */
281 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
282 p->tx_current_fill--;
284 spin_unlock_irqrestore(&p->tx_list.lock, flags);
286 dma_unmap_single(p->dev, re.s.addr, re.s.len,
289 /* Read the hardware TX timestamp if one was recorded */
290 if (unlikely(re.s.tstamp)) {
291 struct skb_shared_hwtstamps ts;
294 memset(&ts, 0, sizeof(ts));
295 /* Read the timestamp */
296 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
297 /* Remove the timestamp from the FIFO */
298 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
299 /* Tell the kernel about the timestamp */
300 ts.hwtstamp = ns_to_ktime(ns);
301 skb_tstamp_tx(skb, &ts);
304 dev_kfree_skb_any(skb);
307 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
310 if (cleaned && netif_queue_stopped(p->netdev))
311 netif_wake_queue(p->netdev);
314 static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
316 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
317 octeon_mgmt_clean_tx_buffers(p);
318 octeon_mgmt_enable_tx_irq(p);
321 static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
323 struct octeon_mgmt *p = netdev_priv(netdev);
327 /* These reads also clear the count registers. */
328 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
329 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
332 /* Do an atomic update. */
333 spin_lock_irqsave(&p->lock, flags);
334 netdev->stats.rx_errors += bad;
335 netdev->stats.rx_dropped += drop;
336 spin_unlock_irqrestore(&p->lock, flags);
340 static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
342 struct octeon_mgmt *p = netdev_priv(netdev);
345 union cvmx_agl_gmx_txx_stat0 s0;
346 union cvmx_agl_gmx_txx_stat1 s1;
348 /* These reads also clear the count registers. */
349 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
350 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
352 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
353 /* Do an atomic update. */
354 spin_lock_irqsave(&p->lock, flags);
355 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
356 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
357 spin_unlock_irqrestore(&p->lock, flags);
362 * Dequeue a receive skb and its corresponding ring entry. The ring
363 * entry is returned, *pskb is updated to point to the skb.
365 static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
366 struct sk_buff **pskb)
368 union mgmt_port_ring_entry re;
370 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
371 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
374 re.d64 = p->rx_ring[p->rx_next];
375 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
376 p->rx_current_fill--;
377 *pskb = __skb_dequeue(&p->rx_list);
379 dma_unmap_single(p->dev, re.s.addr,
380 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
387 static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
389 struct net_device *netdev = p->netdev;
390 union cvmx_mixx_ircnt mix_ircnt;
391 union mgmt_port_ring_entry re;
393 struct sk_buff *skb2;
394 struct sk_buff *skb_new;
395 union mgmt_port_ring_entry re2;
399 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
400 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
401 /* A good packet, send it up. */
402 skb_put(skb, re.s.len);
404 /* Process the RX timestamp if it was recorded */
405 if (p->has_rx_tstamp) {
406 /* The first 8 bytes are the timestamp */
407 u64 ns = *(u64 *)skb->data;
408 struct skb_shared_hwtstamps *ts;
409 ts = skb_hwtstamps(skb);
410 ts->hwtstamp = ns_to_ktime(ns);
413 skb->protocol = eth_type_trans(skb, netdev);
414 netdev->stats.rx_packets++;
415 netdev->stats.rx_bytes += skb->len;
416 netif_receive_skb(skb);
418 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
419 /* Packet split across skbs. This can happen if we
420 * increase the MTU. Buffers that are already in the
421 * rx ring can then end up being too small. As the rx
422 * ring is refilled, buffers sized for the new MTU
423 * will be used and we should go back to the normal
426 skb_put(skb, re.s.len);
428 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
429 if (re2.s.code != RING_ENTRY_CODE_MORE
430 && re2.s.code != RING_ENTRY_CODE_DONE)
432 skb_put(skb2, re2.s.len);
433 skb_new = skb_copy_expand(skb, 0, skb2->len,
437 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
440 skb_put(skb_new, skb2->len);
441 dev_kfree_skb_any(skb);
442 dev_kfree_skb_any(skb2);
444 } while (re2.s.code == RING_ENTRY_CODE_MORE);
447 /* Some other error, discard it. */
448 dev_kfree_skb_any(skb);
449 /* Error statistics are accumulated in
450 * octeon_mgmt_update_rx_stats.
455 /* Discard the whole mess. */
456 dev_kfree_skb_any(skb);
457 dev_kfree_skb_any(skb2);
458 while (re2.s.code == RING_ENTRY_CODE_MORE) {
459 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
460 dev_kfree_skb_any(skb2);
462 netdev->stats.rx_errors++;
465 /* Tell the hardware we processed a packet. */
467 mix_ircnt.s.ircnt = 1;
468 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
472 static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
474 unsigned int work_done = 0;
475 union cvmx_mixx_ircnt mix_ircnt;
478 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
479 while (work_done < budget && mix_ircnt.s.ircnt) {
481 rc = octeon_mgmt_receive_one(p);
485 /* Check for more packets. */
486 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
489 octeon_mgmt_rx_fill_ring(p->netdev);
494 static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
496 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
497 struct net_device *netdev = p->netdev;
498 unsigned int work_done = 0;
500 work_done = octeon_mgmt_receive_packets(p, budget);
502 if (work_done < budget) {
503 /* We stopped because no more packets were available. */
504 napi_complete_done(napi, work_done);
505 octeon_mgmt_enable_rx_irq(p);
507 octeon_mgmt_update_rx_stats(netdev);
512 /* Reset the hardware to clean state. */
513 static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
515 union cvmx_mixx_ctl mix_ctl;
516 union cvmx_mixx_bist mix_bist;
517 union cvmx_agl_gmx_bist agl_gmx_bist;
520 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
522 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
523 } while (mix_ctl.s.busy);
525 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
526 cvmx_read_csr(p->mix + MIX_CTL);
527 octeon_io_clk_delay(64);
529 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
531 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
532 (unsigned long long)mix_bist.u64);
534 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
535 if (agl_gmx_bist.u64)
536 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
537 (unsigned long long)agl_gmx_bist.u64);
540 struct octeon_mgmt_cam_state {
546 static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
551 for (i = 0; i < 6; i++)
552 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
553 cs->cam_mask |= (1ULL << cs->cam_index);
557 static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
559 struct octeon_mgmt *p = netdev_priv(netdev);
560 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
561 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
563 unsigned int prev_packet_enable;
564 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
565 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
566 struct octeon_mgmt_cam_state cam_state;
567 struct netdev_hw_addr *ha;
568 int available_cam_entries;
570 memset(&cam_state, 0, sizeof(cam_state));
572 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
574 available_cam_entries = 8;
576 /* One CAM entry for the primary address, leaves seven
577 * for the secondary addresses.
579 available_cam_entries = 7 - netdev->uc.count;
582 if (netdev->flags & IFF_MULTICAST) {
583 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
584 netdev_mc_count(netdev) > available_cam_entries)
585 multicast_mode = 2; /* 2 - Accept all multicast. */
587 multicast_mode = 0; /* 0 - Use CAM. */
591 /* Add primary address. */
592 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
593 netdev_for_each_uc_addr(ha, netdev)
594 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
596 if (multicast_mode == 0) {
597 netdev_for_each_mc_addr(ha, netdev)
598 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
601 spin_lock_irqsave(&p->lock, flags);
603 /* Disable packet I/O. */
604 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
605 prev_packet_enable = agl_gmx_prtx.s.en;
606 agl_gmx_prtx.s.en = 0;
607 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
610 adr_ctl.s.cam_mode = cam_mode;
611 adr_ctl.s.mcst = multicast_mode;
612 adr_ctl.s.bcst = 1; /* Allow broadcast */
614 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
616 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
617 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
618 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
619 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
620 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
621 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
622 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
624 /* Restore packet I/O. */
625 agl_gmx_prtx.s.en = prev_packet_enable;
626 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
628 spin_unlock_irqrestore(&p->lock, flags);
631 static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
633 int r = eth_mac_addr(netdev, addr);
638 octeon_mgmt_set_rx_filtering(netdev);
643 static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
645 struct octeon_mgmt *p = netdev_priv(netdev);
646 int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
648 netdev->mtu = new_mtu;
650 /* HW lifts the limit if the frame is VLAN tagged
651 * (+4 bytes per each tag, up to two tags)
653 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
654 /* Set the hardware to truncate packets larger than the MTU. The jabber
655 * register must be set to a multiple of 8 bytes, so round up. JABBER is
656 * an unconditional limit, so we need to account for two possible VLAN
659 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
660 (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
665 static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
667 struct net_device *netdev = dev_id;
668 struct octeon_mgmt *p = netdev_priv(netdev);
669 union cvmx_mixx_isr mixx_isr;
671 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
673 /* Clear any pending interrupts */
674 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
675 cvmx_read_csr(p->mix + MIX_ISR);
677 if (mixx_isr.s.irthresh) {
678 octeon_mgmt_disable_rx_irq(p);
679 napi_schedule(&p->napi);
681 if (mixx_isr.s.orthresh) {
682 octeon_mgmt_disable_tx_irq(p);
683 tasklet_schedule(&p->tx_clean_tasklet);
689 static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
690 struct ifreq *rq, int cmd)
692 struct octeon_mgmt *p = netdev_priv(netdev);
693 struct hwtstamp_config config;
694 union cvmx_mio_ptp_clock_cfg ptp;
695 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
696 bool have_hw_timestamps = false;
698 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
701 if (config.flags) /* reserved for future extensions */
704 /* Check the status of hardware for tiemstamps */
705 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
706 /* Get the current state of the PTP clock */
707 ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
708 if (!ptp.s.ext_clk_en) {
709 /* The clock has not been configured to use an
710 * external source. Program it to use the main clock
713 u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
715 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
717 "PTP Clock using sclk reference @ %lldHz\n",
718 (NSEC_PER_SEC << 32) / clock_comp);
720 /* The clock is already programmed to use a GPIO */
721 u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
723 "PTP Clock using GPIO%d @ %lld Hz\n",
724 ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
727 /* Enable the clock if it wasn't done already */
730 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
732 have_hw_timestamps = true;
735 if (!have_hw_timestamps)
738 switch (config.tx_type) {
739 case HWTSTAMP_TX_OFF:
746 switch (config.rx_filter) {
747 case HWTSTAMP_FILTER_NONE:
748 p->has_rx_tstamp = false;
749 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
750 rxx_frm_ctl.s.ptp_mode = 0;
751 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
753 case HWTSTAMP_FILTER_ALL:
754 case HWTSTAMP_FILTER_SOME:
755 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
756 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
757 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
758 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
759 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
760 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
761 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
762 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
763 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
764 case HWTSTAMP_FILTER_PTP_V2_EVENT:
765 case HWTSTAMP_FILTER_PTP_V2_SYNC:
766 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
767 case HWTSTAMP_FILTER_NTP_ALL:
768 p->has_rx_tstamp = have_hw_timestamps;
769 config.rx_filter = HWTSTAMP_FILTER_ALL;
770 if (p->has_rx_tstamp) {
771 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
772 rxx_frm_ctl.s.ptp_mode = 1;
773 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
780 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
786 static int octeon_mgmt_ioctl(struct net_device *netdev,
787 struct ifreq *rq, int cmd)
791 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
794 return phy_mii_ioctl(netdev->phydev, rq, cmd);
799 static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
801 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
803 /* Disable GMX before we make any changes. */
804 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
806 prtx_cfg.s.tx_en = 0;
807 prtx_cfg.s.rx_en = 0;
808 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
810 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
812 for (i = 0; i < 10; i++) {
813 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
814 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
822 static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
824 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
826 /* Restore the GMX enable state only if link is set */
827 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
828 prtx_cfg.s.tx_en = 1;
829 prtx_cfg.s.rx_en = 1;
831 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
834 static void octeon_mgmt_update_link(struct octeon_mgmt *p)
836 struct net_device *ndev = p->netdev;
837 struct phy_device *phydev = ndev->phydev;
838 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
840 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
843 prtx_cfg.s.duplex = 1;
845 prtx_cfg.s.duplex = phydev->duplex;
847 switch (phydev->speed) {
849 prtx_cfg.s.speed = 0;
850 prtx_cfg.s.slottime = 0;
852 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
853 prtx_cfg.s.burst = 1;
854 prtx_cfg.s.speed_msb = 1;
858 prtx_cfg.s.speed = 0;
859 prtx_cfg.s.slottime = 0;
861 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
862 prtx_cfg.s.burst = 1;
863 prtx_cfg.s.speed_msb = 0;
867 /* 1000 MBits is only supported on 6XXX chips */
868 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
869 prtx_cfg.s.speed = 1;
870 prtx_cfg.s.speed_msb = 0;
871 /* Only matters for half-duplex */
872 prtx_cfg.s.slottime = 1;
873 prtx_cfg.s.burst = phydev->duplex;
876 case 0: /* No link */
881 /* Write the new GMX setting with the port still disabled. */
882 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
884 /* Read GMX CFG again to make sure the config is completed. */
885 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
887 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
888 union cvmx_agl_gmx_txx_clk agl_clk;
889 union cvmx_agl_prtx_ctl prtx_ctl;
891 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
892 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
893 /* MII (both speeds) and RGMII 1000 speed. */
894 agl_clk.s.clk_cnt = 1;
895 if (prtx_ctl.s.mode == 0) { /* RGMII mode */
896 if (phydev->speed == 10)
897 agl_clk.s.clk_cnt = 50;
898 else if (phydev->speed == 100)
899 agl_clk.s.clk_cnt = 5;
901 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
905 static void octeon_mgmt_adjust_link(struct net_device *netdev)
907 struct octeon_mgmt *p = netdev_priv(netdev);
908 struct phy_device *phydev = netdev->phydev;
910 int link_changed = 0;
915 spin_lock_irqsave(&p->lock, flags);
918 if (!phydev->link && p->last_link)
922 (p->last_duplex != phydev->duplex ||
923 p->last_link != phydev->link ||
924 p->last_speed != phydev->speed)) {
925 octeon_mgmt_disable_link(p);
927 octeon_mgmt_update_link(p);
928 octeon_mgmt_enable_link(p);
931 p->last_link = phydev->link;
932 p->last_speed = phydev->speed;
933 p->last_duplex = phydev->duplex;
935 spin_unlock_irqrestore(&p->lock, flags);
937 if (link_changed != 0) {
938 if (link_changed > 0)
939 netdev_info(netdev, "Link is up - %d/%s\n",
940 phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
942 netdev_info(netdev, "Link is down\n");
946 static int octeon_mgmt_init_phy(struct net_device *netdev)
948 struct octeon_mgmt *p = netdev_priv(netdev);
949 struct phy_device *phydev = NULL;
951 if (octeon_is_simulation() || p->phy_np == NULL) {
952 /* No PHYs in the simulator. */
953 netif_carrier_on(netdev);
957 phydev = of_phy_connect(netdev, p->phy_np,
958 octeon_mgmt_adjust_link, 0,
959 PHY_INTERFACE_MODE_MII);
967 static int octeon_mgmt_open(struct net_device *netdev)
969 struct octeon_mgmt *p = netdev_priv(netdev);
970 union cvmx_mixx_ctl mix_ctl;
971 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
972 union cvmx_mixx_oring1 oring1;
973 union cvmx_mixx_iring1 iring1;
974 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
975 union cvmx_mixx_irhwm mix_irhwm;
976 union cvmx_mixx_orhwm mix_orhwm;
977 union cvmx_mixx_intena mix_intena;
980 /* Allocate ring buffers. */
981 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
986 dma_map_single(p->dev, p->tx_ring,
987 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
990 p->tx_next_clean = 0;
991 p->tx_current_fill = 0;
994 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
999 dma_map_single(p->dev, p->rx_ring,
1000 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1004 p->rx_next_fill = 0;
1005 p->rx_current_fill = 0;
1007 octeon_mgmt_reset_hw(p);
1009 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1011 /* Bring it out of reset if needed. */
1012 if (mix_ctl.s.reset) {
1013 mix_ctl.s.reset = 0;
1014 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1016 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1017 } while (mix_ctl.s.reset);
1020 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1021 agl_gmx_inf_mode.u64 = 0;
1022 agl_gmx_inf_mode.s.en = 1;
1023 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1025 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1026 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1027 /* Force compensation values, as they are not
1028 * determined properly by HW
1030 union cvmx_agl_gmx_drv_ctl drv_ctl;
1032 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1034 drv_ctl.s.byp_en1 = 1;
1035 drv_ctl.s.nctl1 = 6;
1036 drv_ctl.s.pctl1 = 6;
1038 drv_ctl.s.byp_en = 1;
1042 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1046 oring1.s.obase = p->tx_ring_handle >> 3;
1047 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
1048 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
1051 iring1.s.ibase = p->rx_ring_handle >> 3;
1052 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
1053 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
1055 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1056 octeon_mgmt_set_mac_address(netdev, &sa);
1058 octeon_mgmt_change_mtu(netdev, netdev->mtu);
1060 /* Enable the port HW. Packets are not allowed until
1061 * cvmx_mgmt_port_enable() is called.
1064 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
1065 mix_ctl.s.en = 1; /* Enable the port */
1066 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
1067 /* MII CB-request FIFO programmable high watermark */
1068 mix_ctl.s.mrq_hwm = 1;
1069 #ifdef __LITTLE_ENDIAN
1070 mix_ctl.s.lendian = 1;
1072 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1074 /* Read the PHY to find the mode of the interface. */
1075 if (octeon_mgmt_init_phy(netdev)) {
1076 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1080 /* Set the mode of the interface, RGMII/MII. */
1081 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
1082 union cvmx_agl_prtx_ctl agl_prtx_ctl;
1084 (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1085 netdev->phydev->supported) |
1086 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1087 netdev->phydev->supported)) != 0;
1089 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1090 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1091 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1093 /* MII clocks counts are based on the 125Mhz
1094 * reference, which has an 8nS period. So our delays
1095 * need to be multiplied by this factor.
1097 #define NS_PER_PHY_CLK 8
1099 /* Take the DLL and clock tree out of reset */
1100 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1101 agl_prtx_ctl.s.clkrst = 0;
1103 agl_prtx_ctl.s.dllrst = 0;
1104 agl_prtx_ctl.s.clktx_byp = 0;
1106 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1107 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1109 /* Wait for the DLL to lock. External 125 MHz
1110 * reference clock must be stable at this point.
1112 ndelay(256 * NS_PER_PHY_CLK);
1114 /* Enable the interface */
1115 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1116 agl_prtx_ctl.s.enable = 1;
1117 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1119 /* Read the value back to force the previous write */
1120 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1122 /* Enable the compensation controller */
1123 agl_prtx_ctl.s.comp = 1;
1124 agl_prtx_ctl.s.drv_byp = 0;
1125 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1126 /* Force write out before wait. */
1127 cvmx_read_csr(p->agl_prt_ctl);
1129 /* For compensation state to lock. */
1130 ndelay(1040 * NS_PER_PHY_CLK);
1132 /* Default Interframe Gaps are too small. Recommended
1135 * AGL_GMX_TX_IFG[IFG1]=14
1136 * AGL_GMX_TX_IFG[IFG2]=10
1138 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1141 octeon_mgmt_rx_fill_ring(netdev);
1143 /* Clear statistics. */
1144 /* Clear on read. */
1145 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1146 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1147 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
1149 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1150 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1151 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
1153 /* Clear any pending interrupts */
1154 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
1156 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1158 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1162 /* Interrupt every single RX packet */
1164 mix_irhwm.s.irhwm = 0;
1165 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
1167 /* Interrupt when we have 1 or more packets to clean. */
1169 mix_orhwm.s.orhwm = 0;
1170 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
1172 /* Enable receive and transmit interrupts */
1174 mix_intena.s.ithena = 1;
1175 mix_intena.s.othena = 1;
1176 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
1178 /* Enable packet I/O. */
1180 rxx_frm_ctl.u64 = 0;
1181 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
1182 rxx_frm_ctl.s.pre_align = 1;
1183 /* When set, disables the length check for non-min sized pkts
1184 * with padding in the client data.
1186 rxx_frm_ctl.s.pad_len = 1;
1187 /* When set, disables the length check for VLAN pkts */
1188 rxx_frm_ctl.s.vlan_len = 1;
1189 /* When set, PREAMBLE checking is less strict */
1190 rxx_frm_ctl.s.pre_free = 1;
1191 /* Control Pause Frames can match station SMAC */
1192 rxx_frm_ctl.s.ctl_smac = 0;
1193 /* Control Pause Frames can match globally assign Multicast address */
1194 rxx_frm_ctl.s.ctl_mcst = 1;
1195 /* Forward pause information to TX block */
1196 rxx_frm_ctl.s.ctl_bck = 1;
1197 /* Drop Control Pause Frames */
1198 rxx_frm_ctl.s.ctl_drp = 1;
1199 /* Strip off the preamble */
1200 rxx_frm_ctl.s.pre_strp = 1;
1201 /* This port is configured to send PREAMBLE+SFD to begin every
1202 * frame. GMX checks that the PREAMBLE is sent correctly.
1204 rxx_frm_ctl.s.pre_chk = 1;
1205 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
1207 /* Configure the port duplex, speed and enables */
1208 octeon_mgmt_disable_link(p);
1210 octeon_mgmt_update_link(p);
1211 octeon_mgmt_enable_link(p);
1215 /* PHY is not present in simulator. The carrier is enabled
1216 * while initializing the phy for simulator, leave it enabled.
1218 if (netdev->phydev) {
1219 netif_carrier_off(netdev);
1220 phy_start_aneg(netdev->phydev);
1223 netif_wake_queue(netdev);
1224 napi_enable(&p->napi);
1228 octeon_mgmt_reset_hw(p);
1229 dma_unmap_single(p->dev, p->rx_ring_handle,
1230 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1234 dma_unmap_single(p->dev, p->tx_ring_handle,
1235 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1241 static int octeon_mgmt_stop(struct net_device *netdev)
1243 struct octeon_mgmt *p = netdev_priv(netdev);
1245 napi_disable(&p->napi);
1246 netif_stop_queue(netdev);
1249 phy_disconnect(netdev->phydev);
1251 netif_carrier_off(netdev);
1253 octeon_mgmt_reset_hw(p);
1255 free_irq(p->irq, netdev);
1257 /* dma_unmap is a nop on Octeon, so just free everything. */
1258 skb_queue_purge(&p->tx_list);
1259 skb_queue_purge(&p->rx_list);
1261 dma_unmap_single(p->dev, p->rx_ring_handle,
1262 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1266 dma_unmap_single(p->dev, p->tx_ring_handle,
1267 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1275 octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1277 struct octeon_mgmt *p = netdev_priv(netdev);
1278 union mgmt_port_ring_entry re;
1279 unsigned long flags;
1280 netdev_tx_t rv = NETDEV_TX_BUSY;
1283 re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
1284 re.s.len = skb->len;
1285 re.s.addr = dma_map_single(p->dev, skb->data,
1289 spin_lock_irqsave(&p->tx_list.lock, flags);
1291 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1292 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1293 netif_stop_queue(netdev);
1294 spin_lock_irqsave(&p->tx_list.lock, flags);
1297 if (unlikely(p->tx_current_fill >=
1298 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1299 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1300 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1305 __skb_queue_tail(&p->tx_list, skb);
1307 /* Put it in the ring. */
1308 p->tx_ring[p->tx_next] = re.d64;
1309 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1310 p->tx_current_fill++;
1312 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1314 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1315 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1318 netdev->stats.tx_packets++;
1319 netdev->stats.tx_bytes += skb->len;
1321 /* Ring the bell. */
1322 cvmx_write_csr(p->mix + MIX_ORING2, 1);
1324 netif_trans_update(netdev);
1327 octeon_mgmt_update_tx_stats(netdev);
1331 #ifdef CONFIG_NET_POLL_CONTROLLER
1332 static void octeon_mgmt_poll_controller(struct net_device *netdev)
1334 struct octeon_mgmt *p = netdev_priv(netdev);
1336 octeon_mgmt_receive_packets(p, 16);
1337 octeon_mgmt_update_rx_stats(netdev);
1341 static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1342 struct ethtool_drvinfo *info)
1344 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1345 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1346 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1347 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1350 static int octeon_mgmt_nway_reset(struct net_device *dev)
1352 if (!capable(CAP_NET_ADMIN))
1356 return phy_start_aneg(dev->phydev);
1361 static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1362 .get_drvinfo = octeon_mgmt_get_drvinfo,
1363 .nway_reset = octeon_mgmt_nway_reset,
1364 .get_link = ethtool_op_get_link,
1365 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1366 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1369 static const struct net_device_ops octeon_mgmt_ops = {
1370 .ndo_open = octeon_mgmt_open,
1371 .ndo_stop = octeon_mgmt_stop,
1372 .ndo_start_xmit = octeon_mgmt_xmit,
1373 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1374 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1375 .ndo_do_ioctl = octeon_mgmt_ioctl,
1376 .ndo_change_mtu = octeon_mgmt_change_mtu,
1377 #ifdef CONFIG_NET_POLL_CONTROLLER
1378 .ndo_poll_controller = octeon_mgmt_poll_controller,
1382 static int octeon_mgmt_probe(struct platform_device *pdev)
1384 struct net_device *netdev;
1385 struct octeon_mgmt *p;
1388 struct resource *res_mix;
1389 struct resource *res_agl;
1390 struct resource *res_agl_prt_ctl;
1394 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1398 SET_NETDEV_DEV(netdev, &pdev->dev);
1400 platform_set_drvdata(pdev, netdev);
1401 p = netdev_priv(netdev);
1402 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1403 OCTEON_MGMT_NAPI_WEIGHT);
1406 p->dev = &pdev->dev;
1407 p->has_rx_tstamp = false;
1409 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1410 if (data && len == sizeof(*data)) {
1411 p->port = be32_to_cpup(data);
1413 dev_err(&pdev->dev, "no 'cell-index' property\n");
1418 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1420 result = platform_get_irq(pdev, 0);
1426 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1427 if (res_mix == NULL) {
1428 dev_err(&pdev->dev, "no 'reg' resource\n");
1433 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1434 if (res_agl == NULL) {
1435 dev_err(&pdev->dev, "no 'reg' resource\n");
1440 res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1441 if (res_agl_prt_ctl == NULL) {
1442 dev_err(&pdev->dev, "no 'reg' resource\n");
1447 p->mix_phys = res_mix->start;
1448 p->mix_size = resource_size(res_mix);
1449 p->agl_phys = res_agl->start;
1450 p->agl_size = resource_size(res_agl);
1451 p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1452 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1455 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1457 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1463 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1466 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1471 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1472 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1474 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1475 res_agl_prt_ctl->name);
1479 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1480 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1481 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1482 p->agl_prt_ctl_size);
1483 if (!p->mix || !p->agl || !p->agl_prt_ctl) {
1484 dev_err(&pdev->dev, "failed to map I/O memory\n");
1489 spin_lock_init(&p->lock);
1491 skb_queue_head_init(&p->tx_list);
1492 skb_queue_head_init(&p->rx_list);
1493 tasklet_init(&p->tx_clean_tasklet,
1494 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1496 netdev->priv_flags |= IFF_UNICAST_FLT;
1498 netdev->netdev_ops = &octeon_mgmt_ops;
1499 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1501 netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
1502 netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM;
1504 mac = of_get_mac_address(pdev->dev.of_node);
1507 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1509 eth_hw_addr_random(netdev);
1511 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1513 result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1517 netif_carrier_off(netdev);
1518 result = register_netdev(netdev);
1522 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1526 of_node_put(p->phy_np);
1527 free_netdev(netdev);
1531 static int octeon_mgmt_remove(struct platform_device *pdev)
1533 struct net_device *netdev = platform_get_drvdata(pdev);
1534 struct octeon_mgmt *p = netdev_priv(netdev);
1536 unregister_netdev(netdev);
1537 of_node_put(p->phy_np);
1538 free_netdev(netdev);
1542 static const struct of_device_id octeon_mgmt_match[] = {
1544 .compatible = "cavium,octeon-5750-mix",
1548 MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1550 static struct platform_driver octeon_mgmt_driver = {
1552 .name = "octeon_mgmt",
1553 .of_match_table = octeon_mgmt_match,
1555 .probe = octeon_mgmt_probe,
1556 .remove = octeon_mgmt_remove,
1559 extern void octeon_mdiobus_force_mod_depencency(void);
1561 static int __init octeon_mgmt_mod_init(void)
1563 /* Force our mdiobus driver module to be loaded first. */
1564 octeon_mdiobus_force_mod_depencency();
1565 return platform_driver_register(&octeon_mgmt_driver);
1568 static void __exit octeon_mgmt_mod_exit(void)
1570 platform_driver_unregister(&octeon_mgmt_driver);
1573 module_init(octeon_mgmt_mod_init);
1574 module_exit(octeon_mgmt_mod_exit);
1576 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1577 MODULE_AUTHOR("David Daney");
1578 MODULE_LICENSE("GPL");
1579 MODULE_VERSION(DRV_VERSION);