1 /* Renesas Ethernet AVB device driver
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation
4 * Copyright (C) 2015 Renesas Solutions Corp.
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
7 * Based on the SuperH Ethernet driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
14 #include <linux/cache.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/etherdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/if_vlan.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/net_tstamp.h>
27 #include <linux/of_device.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
35 #include <asm/div64.h>
39 #define RAVB_DEF_MSG_ENABLE \
45 static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
50 static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
55 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
58 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
61 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
65 for (i = 0; i < 10000; i++) {
66 if ((ravb_read(ndev, reg) & mask) == value)
73 static int ravb_config(struct net_device *ndev)
78 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
79 /* Check if the operating mode is changed to the config mode */
80 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
82 netdev_err(ndev, "failed to switch device to config mode\n");
87 static void ravb_set_duplex(struct net_device *ndev)
89 struct ravb_private *priv = netdev_priv(ndev);
91 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
94 static void ravb_set_rate(struct net_device *ndev)
96 struct ravb_private *priv = netdev_priv(ndev);
98 switch (priv->speed) {
99 case 100: /* 100BASE */
100 ravb_write(ndev, GECMR_SPEED_100, GECMR);
102 case 1000: /* 1000BASE */
103 ravb_write(ndev, GECMR_SPEED_1000, GECMR);
108 static void ravb_set_buffer_align(struct sk_buff *skb)
110 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
113 skb_reserve(skb, RAVB_ALIGN - reserve);
116 /* Get MAC address from the MAC address registers
118 * Ethernet AVB device doesn't have ROM for MAC address.
119 * This function gets the MAC address that was used by a bootloader.
121 static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
124 ether_addr_copy(ndev->dev_addr, mac);
126 u32 mahr = ravb_read(ndev, MAHR);
127 u32 malr = ravb_read(ndev, MALR);
129 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
130 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
131 ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
132 ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
133 ndev->dev_addr[4] = (malr >> 8) & 0xFF;
134 ndev->dev_addr[5] = (malr >> 0) & 0xFF;
138 static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
140 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
143 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
146 /* MDC pin control */
147 static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
149 ravb_mdio_ctrl(ctrl, PIR_MDC, level);
152 /* Data I/O pin control */
153 static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
155 ravb_mdio_ctrl(ctrl, PIR_MMD, output);
159 static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
161 ravb_mdio_ctrl(ctrl, PIR_MDO, value);
165 static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
167 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
170 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
173 /* MDIO bus control struct */
174 static struct mdiobb_ops bb_ops = {
175 .owner = THIS_MODULE,
176 .set_mdc = ravb_set_mdc,
177 .set_mdio_dir = ravb_set_mdio_dir,
178 .set_mdio_data = ravb_set_mdio_data,
179 .get_mdio_data = ravb_get_mdio_data,
182 /* Free TX skb function for AVB-IP */
183 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
185 struct ravb_private *priv = netdev_priv(ndev);
186 struct net_device_stats *stats = &priv->stats[q];
187 struct ravb_tx_desc *desc;
192 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
195 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
197 desc = &priv->tx_ring[q][entry];
198 txed = desc->die_dt == DT_FEMPTY;
199 if (free_txed_only && !txed)
201 /* Descriptor type must be checked before all other reads */
203 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
204 /* Free the original skb. */
205 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
206 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
207 size, DMA_TO_DEVICE);
208 /* Last packet descriptor? */
209 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
210 entry /= NUM_TX_DESC;
211 dev_kfree_skb_any(priv->tx_skb[q][entry]);
212 priv->tx_skb[q][entry] = NULL;
219 stats->tx_bytes += size;
220 desc->die_dt = DT_EEMPTY;
225 /* Free skb's and DMA buffers for Ethernet AVB */
226 static void ravb_ring_free(struct net_device *ndev, int q)
228 struct ravb_private *priv = netdev_priv(ndev);
232 /* Free RX skb ringbuffer */
233 if (priv->rx_skb[q]) {
234 for (i = 0; i < priv->num_rx_ring[q]; i++)
235 dev_kfree_skb(priv->rx_skb[q][i]);
237 kfree(priv->rx_skb[q]);
238 priv->rx_skb[q] = NULL;
240 /* Free aligned TX buffers */
241 kfree(priv->tx_align[q]);
242 priv->tx_align[q] = NULL;
244 if (priv->rx_ring[q]) {
245 for (i = 0; i < priv->num_rx_ring[q]; i++) {
246 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
248 if (!dma_mapping_error(ndev->dev.parent,
249 le32_to_cpu(desc->dptr)))
250 dma_unmap_single(ndev->dev.parent,
251 le32_to_cpu(desc->dptr),
255 ring_size = sizeof(struct ravb_ex_rx_desc) *
256 (priv->num_rx_ring[q] + 1);
257 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
258 priv->rx_desc_dma[q]);
259 priv->rx_ring[q] = NULL;
262 if (priv->tx_ring[q]) {
263 ravb_tx_free(ndev, q, false);
265 ring_size = sizeof(struct ravb_tx_desc) *
266 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
267 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
268 priv->tx_desc_dma[q]);
269 priv->tx_ring[q] = NULL;
272 /* Free TX skb ringbuffer.
273 * SKBs are freed by ravb_tx_free() call above.
275 kfree(priv->tx_skb[q]);
276 priv->tx_skb[q] = NULL;
279 /* Format skb and descriptor buffer for Ethernet AVB */
280 static void ravb_ring_format(struct net_device *ndev, int q)
282 struct ravb_private *priv = netdev_priv(ndev);
283 struct ravb_ex_rx_desc *rx_desc;
284 struct ravb_tx_desc *tx_desc;
285 struct ravb_desc *desc;
286 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
287 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
294 priv->dirty_rx[q] = 0;
295 priv->dirty_tx[q] = 0;
297 memset(priv->rx_ring[q], 0, rx_ring_size);
298 /* Build RX ring buffer */
299 for (i = 0; i < priv->num_rx_ring[q]; i++) {
301 rx_desc = &priv->rx_ring[q][i];
302 rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
303 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
306 /* We just set the data size to 0 for a failed mapping which
307 * should prevent DMA from happening...
309 if (dma_mapping_error(ndev->dev.parent, dma_addr))
310 rx_desc->ds_cc = cpu_to_le16(0);
311 rx_desc->dptr = cpu_to_le32(dma_addr);
312 rx_desc->die_dt = DT_FEMPTY;
314 rx_desc = &priv->rx_ring[q][i];
315 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
316 rx_desc->die_dt = DT_LINKFIX; /* type */
318 memset(priv->tx_ring[q], 0, tx_ring_size);
319 /* Build TX ring buffer */
320 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
322 tx_desc->die_dt = DT_EEMPTY;
324 tx_desc->die_dt = DT_EEMPTY;
326 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
327 tx_desc->die_dt = DT_LINKFIX; /* type */
329 /* RX descriptor base address for best effort */
330 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
331 desc->die_dt = DT_LINKFIX; /* type */
332 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
334 /* TX descriptor base address for best effort */
335 desc = &priv->desc_bat[q];
336 desc->die_dt = DT_LINKFIX; /* type */
337 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
340 /* Init skb and descriptor buffer for Ethernet AVB */
341 static int ravb_ring_init(struct net_device *ndev, int q)
343 struct ravb_private *priv = netdev_priv(ndev);
348 /* Allocate RX and TX skb rings */
349 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
350 sizeof(*priv->rx_skb[q]), GFP_KERNEL);
351 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
352 sizeof(*priv->tx_skb[q]), GFP_KERNEL);
353 if (!priv->rx_skb[q] || !priv->tx_skb[q])
356 for (i = 0; i < priv->num_rx_ring[q]; i++) {
357 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
360 ravb_set_buffer_align(skb);
361 priv->rx_skb[q][i] = skb;
364 /* Allocate rings for the aligned buffers */
365 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
366 DPTR_ALIGN - 1, GFP_KERNEL);
367 if (!priv->tx_align[q])
370 /* Allocate all RX descriptors. */
371 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
372 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
373 &priv->rx_desc_dma[q],
375 if (!priv->rx_ring[q])
378 priv->dirty_rx[q] = 0;
380 /* Allocate all TX descriptors. */
381 ring_size = sizeof(struct ravb_tx_desc) *
382 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
383 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
384 &priv->tx_desc_dma[q],
386 if (!priv->tx_ring[q])
392 ravb_ring_free(ndev, q);
397 /* E-MAC init function */
398 static void ravb_emac_init(struct net_device *ndev)
400 struct ravb_private *priv = netdev_priv(ndev);
402 /* Receive frame limit set register */
403 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
405 /* PAUSE prohibition */
406 ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
407 ECMR_TE | ECMR_RE, ECMR);
411 /* Set MAC address */
413 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
414 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
416 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
418 /* E-MAC status register clear */
419 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
421 /* E-MAC interrupt enable register */
422 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
425 /* Device init function for Ethernet AVB */
426 static int ravb_dmac_init(struct net_device *ndev)
428 struct ravb_private *priv = netdev_priv(ndev);
431 /* Set CONFIG mode */
432 error = ravb_config(ndev);
436 error = ravb_ring_init(ndev, RAVB_BE);
439 error = ravb_ring_init(ndev, RAVB_NC);
441 ravb_ring_free(ndev, RAVB_BE);
445 /* Descriptor format */
446 ravb_ring_format(ndev, RAVB_BE);
447 ravb_ring_format(ndev, RAVB_NC);
449 #if defined(__LITTLE_ENDIAN)
450 ravb_modify(ndev, CCC, CCC_BOC, 0);
452 ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC);
457 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
460 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
462 /* Timestamp enable */
463 ravb_write(ndev, TCCR_TFEN, TCCR);
465 /* Interrupt init: */
466 if (priv->chip_id == RCAR_GEN3) {
468 ravb_write(ndev, 0, DIL);
469 /* Set queue specific interrupt */
470 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
473 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
474 /* Disable FIFO full warning */
475 ravb_write(ndev, 0, RIC1);
476 /* Receive FIFO full error, descriptor empty */
477 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
478 /* Frame transmitted, timestamp FIFO updated */
479 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
481 /* Setting the control will start the AVB-DMAC process. */
482 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
487 static void ravb_get_tx_tstamp(struct net_device *ndev)
489 struct ravb_private *priv = netdev_priv(ndev);
490 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
491 struct skb_shared_hwtstamps shhwtstamps;
493 struct timespec64 ts;
498 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
500 tfa2 = ravb_read(ndev, TFA2);
501 tfa_tag = (tfa2 & TFA2_TST) >> 16;
502 ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
503 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
504 ravb_read(ndev, TFA1);
505 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
506 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
507 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
511 list_del(&ts_skb->list);
513 if (tag == tfa_tag) {
514 skb_tstamp_tx(skb, &shhwtstamps);
518 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
522 /* Packet receive function for Ethernet AVB */
523 static bool ravb_rx(struct net_device *ndev, int *quota, int q)
525 struct ravb_private *priv = netdev_priv(ndev);
526 int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
527 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
529 struct net_device_stats *stats = &priv->stats[q];
530 struct ravb_ex_rx_desc *desc;
533 struct timespec64 ts;
538 boguscnt = min(boguscnt, *quota);
540 desc = &priv->rx_ring[q][entry];
541 while (desc->die_dt != DT_FEMPTY) {
542 /* Descriptor type must be checked before all other reads */
544 desc_status = desc->msc;
545 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
550 /* We use 0-byte descriptors to mark the DMA mapping errors */
554 if (desc_status & MSC_MC)
557 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
560 if (desc_status & MSC_CRC)
561 stats->rx_crc_errors++;
562 if (desc_status & MSC_RFE)
563 stats->rx_frame_errors++;
564 if (desc_status & (MSC_RTLF | MSC_RTSF))
565 stats->rx_length_errors++;
566 if (desc_status & MSC_CEEF)
567 stats->rx_missed_errors++;
569 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
571 skb = priv->rx_skb[q][entry];
572 priv->rx_skb[q][entry] = NULL;
573 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
576 get_ts &= (q == RAVB_NC) ?
577 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
578 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
580 struct skb_shared_hwtstamps *shhwtstamps;
582 shhwtstamps = skb_hwtstamps(skb);
583 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
584 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
585 32) | le32_to_cpu(desc->ts_sl);
586 ts.tv_nsec = le32_to_cpu(desc->ts_n);
587 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
589 skb_put(skb, pkt_len);
590 skb->protocol = eth_type_trans(skb, ndev);
591 napi_gro_receive(&priv->napi[q], skb);
593 stats->rx_bytes += pkt_len;
596 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
597 desc = &priv->rx_ring[q][entry];
600 /* Refill the RX ring buffers. */
601 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
602 entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
603 desc = &priv->rx_ring[q][entry];
604 desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
606 if (!priv->rx_skb[q][entry]) {
607 skb = netdev_alloc_skb(ndev,
608 PKT_BUF_SZ + RAVB_ALIGN - 1);
610 break; /* Better luck next round. */
611 ravb_set_buffer_align(skb);
612 dma_addr = dma_map_single(ndev->dev.parent, skb->data,
613 le16_to_cpu(desc->ds_cc),
615 skb_checksum_none_assert(skb);
616 /* We just set the data size to 0 for a failed mapping
617 * which should prevent DMA from happening...
619 if (dma_mapping_error(ndev->dev.parent, dma_addr))
620 desc->ds_cc = cpu_to_le16(0);
621 desc->dptr = cpu_to_le32(dma_addr);
622 priv->rx_skb[q][entry] = skb;
624 /* Descriptor type must be set after all the above writes */
626 desc->die_dt = DT_FEMPTY;
629 *quota -= limit - (++boguscnt);
631 return boguscnt <= 0;
634 static void ravb_rcv_snd_disable(struct net_device *ndev)
636 /* Disable TX and RX */
637 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
640 static void ravb_rcv_snd_enable(struct net_device *ndev)
642 /* Enable TX and RX */
643 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
646 /* function for waiting dma process finished */
647 static int ravb_stop_dma(struct net_device *ndev)
651 /* Wait for stopping the hardware TX process */
652 error = ravb_wait(ndev, TCCR,
653 TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
657 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
662 /* Stop the E-MAC's RX/TX processes. */
663 ravb_rcv_snd_disable(ndev);
665 /* Wait for stopping the RX DMA process */
666 error = ravb_wait(ndev, CSR, CSR_RPO, 0);
670 /* Stop AVB-DMAC process */
671 return ravb_config(ndev);
674 /* E-MAC interrupt handler */
675 static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
677 struct ravb_private *priv = netdev_priv(ndev);
680 ecsr = ravb_read(ndev, ECSR);
681 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
683 ndev->stats.tx_carrier_errors++;
684 if (ecsr & ECSR_LCHNG) {
686 if (priv->no_avb_link)
688 psr = ravb_read(ndev, PSR);
689 if (priv->avb_link_active_low)
691 if (!(psr & PSR_LMON)) {
692 /* DIsable RX and TX */
693 ravb_rcv_snd_disable(ndev);
695 /* Enable RX and TX */
696 ravb_rcv_snd_enable(ndev);
701 static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
703 struct net_device *ndev = dev_id;
704 struct ravb_private *priv = netdev_priv(ndev);
706 spin_lock(&priv->lock);
707 ravb_emac_interrupt_unlocked(ndev);
709 spin_unlock(&priv->lock);
713 /* Error interrupt handler */
714 static void ravb_error_interrupt(struct net_device *ndev)
716 struct ravb_private *priv = netdev_priv(ndev);
719 eis = ravb_read(ndev, EIS);
720 ravb_write(ndev, ~EIS_QFS, EIS);
722 ris2 = ravb_read(ndev, RIS2);
723 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
725 /* Receive Descriptor Empty int */
726 if (ris2 & RIS2_QFF0)
727 priv->stats[RAVB_BE].rx_over_errors++;
729 /* Receive Descriptor Empty int */
730 if (ris2 & RIS2_QFF1)
731 priv->stats[RAVB_NC].rx_over_errors++;
733 /* Receive FIFO Overflow int */
734 if (ris2 & RIS2_RFFF)
735 priv->rx_fifo_errors++;
739 static bool ravb_queue_interrupt(struct net_device *ndev, int q)
741 struct ravb_private *priv = netdev_priv(ndev);
742 u32 ris0 = ravb_read(ndev, RIS0);
743 u32 ric0 = ravb_read(ndev, RIC0);
744 u32 tis = ravb_read(ndev, TIS);
745 u32 tic = ravb_read(ndev, TIC);
747 if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
748 if (napi_schedule_prep(&priv->napi[q])) {
749 /* Mask RX and TX interrupts */
750 if (priv->chip_id == RCAR_GEN2) {
751 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
752 ravb_write(ndev, tic & ~BIT(q), TIC);
754 ravb_write(ndev, BIT(q), RID0);
755 ravb_write(ndev, BIT(q), TID);
757 __napi_schedule(&priv->napi[q]);
760 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
763 " tx status 0x%08x, tx mask 0x%08x.\n",
771 static bool ravb_timestamp_interrupt(struct net_device *ndev)
773 u32 tis = ravb_read(ndev, TIS);
775 if (tis & TIS_TFUF) {
776 ravb_write(ndev, ~TIS_TFUF, TIS);
777 ravb_get_tx_tstamp(ndev);
783 static irqreturn_t ravb_interrupt(int irq, void *dev_id)
785 struct net_device *ndev = dev_id;
786 struct ravb_private *priv = netdev_priv(ndev);
787 irqreturn_t result = IRQ_NONE;
790 spin_lock(&priv->lock);
791 /* Get interrupt status */
792 iss = ravb_read(ndev, ISS);
794 /* Received and transmitted interrupts */
795 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
798 /* Timestamp updated */
799 if (ravb_timestamp_interrupt(ndev))
800 result = IRQ_HANDLED;
802 /* Network control and best effort queue RX/TX */
803 for (q = RAVB_NC; q >= RAVB_BE; q--) {
804 if (ravb_queue_interrupt(ndev, q))
805 result = IRQ_HANDLED;
809 /* E-MAC status summary */
811 ravb_emac_interrupt_unlocked(ndev);
812 result = IRQ_HANDLED;
815 /* Error status summary */
817 ravb_error_interrupt(ndev);
818 result = IRQ_HANDLED;
821 /* gPTP interrupt status summary */
822 if (iss & ISS_CGIS) {
823 ravb_ptp_interrupt(ndev);
824 result = IRQ_HANDLED;
828 spin_unlock(&priv->lock);
832 /* Timestamp/Error/gPTP interrupt handler */
833 static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
835 struct net_device *ndev = dev_id;
836 struct ravb_private *priv = netdev_priv(ndev);
837 irqreturn_t result = IRQ_NONE;
840 spin_lock(&priv->lock);
841 /* Get interrupt status */
842 iss = ravb_read(ndev, ISS);
844 /* Timestamp updated */
845 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
846 result = IRQ_HANDLED;
848 /* Error status summary */
850 ravb_error_interrupt(ndev);
851 result = IRQ_HANDLED;
854 /* gPTP interrupt status summary */
855 if (iss & ISS_CGIS) {
856 ravb_ptp_interrupt(ndev);
857 result = IRQ_HANDLED;
861 spin_unlock(&priv->lock);
865 static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
867 struct net_device *ndev = dev_id;
868 struct ravb_private *priv = netdev_priv(ndev);
869 irqreturn_t result = IRQ_NONE;
871 spin_lock(&priv->lock);
873 /* Network control/Best effort queue RX/TX */
874 if (ravb_queue_interrupt(ndev, q))
875 result = IRQ_HANDLED;
878 spin_unlock(&priv->lock);
882 static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
884 return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
887 static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
889 return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
892 static int ravb_poll(struct napi_struct *napi, int budget)
894 struct net_device *ndev = napi->dev;
895 struct ravb_private *priv = netdev_priv(ndev);
897 int q = napi - priv->napi;
903 tis = ravb_read(ndev, TIS);
904 ris0 = ravb_read(ndev, RIS0);
905 if (!((ris0 & mask) || (tis & mask)))
908 /* Processing RX Descriptor Ring */
910 /* Clear RX interrupt */
911 ravb_write(ndev, ~mask, RIS0);
912 if (ravb_rx(ndev, "a, q))
915 /* Processing TX Descriptor Ring */
917 spin_lock_irqsave(&priv->lock, flags);
918 /* Clear TX interrupt */
919 ravb_write(ndev, ~mask, TIS);
920 ravb_tx_free(ndev, q, true);
921 netif_wake_subqueue(ndev, q);
923 spin_unlock_irqrestore(&priv->lock, flags);
929 /* Re-enable RX/TX interrupts */
930 spin_lock_irqsave(&priv->lock, flags);
931 if (priv->chip_id == RCAR_GEN2) {
932 ravb_modify(ndev, RIC0, mask, mask);
933 ravb_modify(ndev, TIC, mask, mask);
935 ravb_write(ndev, mask, RIE0);
936 ravb_write(ndev, mask, TIE);
939 spin_unlock_irqrestore(&priv->lock, flags);
941 /* Receive error message handling */
942 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
943 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
944 if (priv->rx_over_errors != ndev->stats.rx_over_errors)
945 ndev->stats.rx_over_errors = priv->rx_over_errors;
946 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
947 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
949 return budget - quota;
952 /* PHY state control function */
953 static void ravb_adjust_link(struct net_device *ndev)
955 struct ravb_private *priv = netdev_priv(ndev);
956 struct phy_device *phydev = ndev->phydev;
957 bool new_state = false;
960 if (phydev->duplex != priv->duplex) {
962 priv->duplex = phydev->duplex;
963 ravb_set_duplex(ndev);
966 if (phydev->speed != priv->speed) {
968 priv->speed = phydev->speed;
972 ravb_modify(ndev, ECMR, ECMR_TXF, 0);
974 priv->link = phydev->link;
975 if (priv->no_avb_link)
976 ravb_rcv_snd_enable(ndev);
978 } else if (priv->link) {
983 if (priv->no_avb_link)
984 ravb_rcv_snd_disable(ndev);
987 if (new_state && netif_msg_link(priv))
988 phy_print_status(phydev);
991 /* PHY init function */
992 static int ravb_phy_init(struct net_device *ndev)
994 struct device_node *np = ndev->dev.parent->of_node;
995 struct ravb_private *priv = netdev_priv(ndev);
996 struct phy_device *phydev;
997 struct device_node *pn;
1004 /* Try connecting to PHY */
1005 pn = of_parse_phandle(np, "phy-handle", 0);
1007 /* In the case of a fixed PHY, the DT node associated
1008 * to the PHY is the Ethernet MAC DT node.
1010 if (of_phy_is_fixed_link(np)) {
1011 err = of_phy_register_fixed_link(np);
1015 pn = of_node_get(np);
1017 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
1018 priv->phy_interface);
1021 netdev_err(ndev, "failed to connect PHY\n");
1023 goto err_deregister_fixed_link;
1026 /* This driver only support 10/100Mbit speeds on Gen3
1029 if (priv->chip_id == RCAR_GEN3) {
1030 err = phy_set_max_speed(phydev, SPEED_100);
1032 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
1033 goto err_phy_disconnect;
1036 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
1039 /* 10BASE is not supported */
1040 phydev->supported &= ~PHY_10BT_FEATURES;
1042 phy_attached_info(phydev);
1047 phy_disconnect(phydev);
1048 err_deregister_fixed_link:
1049 if (of_phy_is_fixed_link(np))
1050 of_phy_deregister_fixed_link(np);
1055 /* PHY control start function */
1056 static int ravb_phy_start(struct net_device *ndev)
1060 error = ravb_phy_init(ndev);
1064 phy_start(ndev->phydev);
1069 static int ravb_get_link_ksettings(struct net_device *ndev,
1070 struct ethtool_link_ksettings *cmd)
1072 struct ravb_private *priv = netdev_priv(ndev);
1073 int error = -ENODEV;
1074 unsigned long flags;
1077 spin_lock_irqsave(&priv->lock, flags);
1078 error = phy_ethtool_ksettings_get(ndev->phydev, cmd);
1079 spin_unlock_irqrestore(&priv->lock, flags);
1085 static int ravb_set_link_ksettings(struct net_device *ndev,
1086 const struct ethtool_link_ksettings *cmd)
1088 struct ravb_private *priv = netdev_priv(ndev);
1089 unsigned long flags;
1095 spin_lock_irqsave(&priv->lock, flags);
1097 /* Disable TX and RX */
1098 ravb_rcv_snd_disable(ndev);
1100 error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
1104 if (cmd->base.duplex == DUPLEX_FULL)
1109 ravb_set_duplex(ndev);
1114 /* Enable TX and RX */
1115 ravb_rcv_snd_enable(ndev);
1118 spin_unlock_irqrestore(&priv->lock, flags);
1123 static int ravb_nway_reset(struct net_device *ndev)
1125 struct ravb_private *priv = netdev_priv(ndev);
1126 int error = -ENODEV;
1127 unsigned long flags;
1130 spin_lock_irqsave(&priv->lock, flags);
1131 error = phy_start_aneg(ndev->phydev);
1132 spin_unlock_irqrestore(&priv->lock, flags);
1138 static u32 ravb_get_msglevel(struct net_device *ndev)
1140 struct ravb_private *priv = netdev_priv(ndev);
1142 return priv->msg_enable;
1145 static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1147 struct ravb_private *priv = netdev_priv(ndev);
1149 priv->msg_enable = value;
1152 static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1153 "rx_queue_0_current",
1154 "tx_queue_0_current",
1157 "rx_queue_0_packets",
1158 "tx_queue_0_packets",
1161 "rx_queue_0_mcast_packets",
1162 "rx_queue_0_errors",
1163 "rx_queue_0_crc_errors",
1164 "rx_queue_0_frame_errors",
1165 "rx_queue_0_length_errors",
1166 "rx_queue_0_missed_errors",
1167 "rx_queue_0_over_errors",
1169 "rx_queue_1_current",
1170 "tx_queue_1_current",
1173 "rx_queue_1_packets",
1174 "tx_queue_1_packets",
1177 "rx_queue_1_mcast_packets",
1178 "rx_queue_1_errors",
1179 "rx_queue_1_crc_errors",
1180 "rx_queue_1_frame_errors",
1181 "rx_queue_1_length_errors",
1182 "rx_queue_1_missed_errors",
1183 "rx_queue_1_over_errors",
1186 #define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
1188 static int ravb_get_sset_count(struct net_device *netdev, int sset)
1192 return RAVB_STATS_LEN;
1198 static void ravb_get_ethtool_stats(struct net_device *ndev,
1199 struct ethtool_stats *stats, u64 *data)
1201 struct ravb_private *priv = netdev_priv(ndev);
1205 /* Device-specific stats */
1206 for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
1207 struct net_device_stats *stats = &priv->stats[q];
1209 data[i++] = priv->cur_rx[q];
1210 data[i++] = priv->cur_tx[q];
1211 data[i++] = priv->dirty_rx[q];
1212 data[i++] = priv->dirty_tx[q];
1213 data[i++] = stats->rx_packets;
1214 data[i++] = stats->tx_packets;
1215 data[i++] = stats->rx_bytes;
1216 data[i++] = stats->tx_bytes;
1217 data[i++] = stats->multicast;
1218 data[i++] = stats->rx_errors;
1219 data[i++] = stats->rx_crc_errors;
1220 data[i++] = stats->rx_frame_errors;
1221 data[i++] = stats->rx_length_errors;
1222 data[i++] = stats->rx_missed_errors;
1223 data[i++] = stats->rx_over_errors;
1227 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1229 switch (stringset) {
1231 memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
1236 static void ravb_get_ringparam(struct net_device *ndev,
1237 struct ethtool_ringparam *ring)
1239 struct ravb_private *priv = netdev_priv(ndev);
1241 ring->rx_max_pending = BE_RX_RING_MAX;
1242 ring->tx_max_pending = BE_TX_RING_MAX;
1243 ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1244 ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1247 static int ravb_set_ringparam(struct net_device *ndev,
1248 struct ethtool_ringparam *ring)
1250 struct ravb_private *priv = netdev_priv(ndev);
1253 if (ring->tx_pending > BE_TX_RING_MAX ||
1254 ring->rx_pending > BE_RX_RING_MAX ||
1255 ring->tx_pending < BE_TX_RING_MIN ||
1256 ring->rx_pending < BE_RX_RING_MIN)
1258 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1261 if (netif_running(ndev)) {
1262 netif_device_detach(ndev);
1263 /* Stop PTP Clock driver */
1264 if (priv->chip_id == RCAR_GEN2)
1265 ravb_ptp_stop(ndev);
1266 /* Wait for DMA stopping */
1267 error = ravb_stop_dma(ndev);
1270 "cannot set ringparam! Any AVB processes are still running?\n");
1273 synchronize_irq(ndev->irq);
1275 /* Free all the skb's in the RX queue and the DMA buffers. */
1276 ravb_ring_free(ndev, RAVB_BE);
1277 ravb_ring_free(ndev, RAVB_NC);
1280 /* Set new parameters */
1281 priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1282 priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1284 if (netif_running(ndev)) {
1285 error = ravb_dmac_init(ndev);
1288 "%s: ravb_dmac_init() failed, error %d\n",
1293 ravb_emac_init(ndev);
1295 /* Initialise PTP Clock driver */
1296 if (priv->chip_id == RCAR_GEN2)
1297 ravb_ptp_init(ndev, priv->pdev);
1299 netif_device_attach(ndev);
1305 static int ravb_get_ts_info(struct net_device *ndev,
1306 struct ethtool_ts_info *info)
1308 struct ravb_private *priv = netdev_priv(ndev);
1310 info->so_timestamping =
1311 SOF_TIMESTAMPING_TX_SOFTWARE |
1312 SOF_TIMESTAMPING_RX_SOFTWARE |
1313 SOF_TIMESTAMPING_SOFTWARE |
1314 SOF_TIMESTAMPING_TX_HARDWARE |
1315 SOF_TIMESTAMPING_RX_HARDWARE |
1316 SOF_TIMESTAMPING_RAW_HARDWARE;
1317 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1319 (1 << HWTSTAMP_FILTER_NONE) |
1320 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1321 (1 << HWTSTAMP_FILTER_ALL);
1322 info->phc_index = ptp_clock_index(priv->ptp.clock);
1327 static const struct ethtool_ops ravb_ethtool_ops = {
1328 .nway_reset = ravb_nway_reset,
1329 .get_msglevel = ravb_get_msglevel,
1330 .set_msglevel = ravb_set_msglevel,
1331 .get_link = ethtool_op_get_link,
1332 .get_strings = ravb_get_strings,
1333 .get_ethtool_stats = ravb_get_ethtool_stats,
1334 .get_sset_count = ravb_get_sset_count,
1335 .get_ringparam = ravb_get_ringparam,
1336 .set_ringparam = ravb_set_ringparam,
1337 .get_ts_info = ravb_get_ts_info,
1338 .get_link_ksettings = ravb_get_link_ksettings,
1339 .set_link_ksettings = ravb_set_link_ksettings,
1342 static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1343 struct net_device *ndev, struct device *dev,
1349 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1352 error = request_irq(irq, handler, 0, name, ndev);
1354 netdev_err(ndev, "cannot request IRQ %s\n", name);
1359 /* Network device open function for Ethernet AVB */
1360 static int ravb_open(struct net_device *ndev)
1362 struct ravb_private *priv = netdev_priv(ndev);
1363 struct platform_device *pdev = priv->pdev;
1364 struct device *dev = &pdev->dev;
1367 napi_enable(&priv->napi[RAVB_BE]);
1368 napi_enable(&priv->napi[RAVB_NC]);
1370 if (priv->chip_id == RCAR_GEN2) {
1371 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1374 netdev_err(ndev, "cannot request IRQ\n");
1378 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1382 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1386 error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1387 ndev, dev, "ch0:rx_be");
1389 goto out_free_irq_emac;
1390 error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1391 ndev, dev, "ch18:tx_be");
1393 goto out_free_irq_be_rx;
1394 error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1395 ndev, dev, "ch1:rx_nc");
1397 goto out_free_irq_be_tx;
1398 error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1399 ndev, dev, "ch19:tx_nc");
1401 goto out_free_irq_nc_rx;
1405 error = ravb_dmac_init(ndev);
1407 goto out_free_irq_nc_tx;
1408 ravb_emac_init(ndev);
1410 /* Initialise PTP Clock driver */
1411 if (priv->chip_id == RCAR_GEN2)
1412 ravb_ptp_init(ndev, priv->pdev);
1414 netif_tx_start_all_queues(ndev);
1416 /* PHY control start */
1417 error = ravb_phy_start(ndev);
1424 /* Stop PTP Clock driver */
1425 if (priv->chip_id == RCAR_GEN2)
1426 ravb_ptp_stop(ndev);
1428 if (priv->chip_id == RCAR_GEN2)
1430 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1432 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1434 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1436 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1438 free_irq(priv->emac_irq, ndev);
1440 free_irq(ndev->irq, ndev);
1442 napi_disable(&priv->napi[RAVB_NC]);
1443 napi_disable(&priv->napi[RAVB_BE]);
1447 /* Timeout function for Ethernet AVB */
1448 static void ravb_tx_timeout(struct net_device *ndev)
1450 struct ravb_private *priv = netdev_priv(ndev);
1452 netif_err(priv, tx_err, ndev,
1453 "transmit timed out, status %08x, resetting...\n",
1454 ravb_read(ndev, ISS));
1456 /* tx_errors count up */
1457 ndev->stats.tx_errors++;
1459 schedule_work(&priv->work);
1462 static void ravb_tx_timeout_work(struct work_struct *work)
1464 struct ravb_private *priv = container_of(work, struct ravb_private,
1466 struct net_device *ndev = priv->ndev;
1468 netif_tx_stop_all_queues(ndev);
1470 /* Stop PTP Clock driver */
1471 if (priv->chip_id == RCAR_GEN2)
1472 ravb_ptp_stop(ndev);
1474 /* Wait for DMA stopping */
1475 ravb_stop_dma(ndev);
1477 ravb_ring_free(ndev, RAVB_BE);
1478 ravb_ring_free(ndev, RAVB_NC);
1481 ravb_dmac_init(ndev);
1482 ravb_emac_init(ndev);
1484 /* Initialise PTP Clock driver */
1485 if (priv->chip_id == RCAR_GEN2)
1486 ravb_ptp_init(ndev, priv->pdev);
1488 netif_tx_start_all_queues(ndev);
1491 /* Packet transmit function for Ethernet AVB */
1492 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1494 struct ravb_private *priv = netdev_priv(ndev);
1495 u16 q = skb_get_queue_mapping(skb);
1496 struct ravb_tstamp_skb *ts_skb;
1497 struct ravb_tx_desc *desc;
1498 unsigned long flags;
1504 spin_lock_irqsave(&priv->lock, flags);
1505 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1507 netif_err(priv, tx_queued, ndev,
1508 "still transmitting with the full ring!\n");
1509 netif_stop_subqueue(ndev, q);
1510 spin_unlock_irqrestore(&priv->lock, flags);
1511 return NETDEV_TX_BUSY;
1513 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
1514 priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
1516 if (skb_put_padto(skb, ETH_ZLEN))
1519 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1520 entry / NUM_TX_DESC * DPTR_ALIGN;
1521 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1522 /* Zero length DMA descriptors are problematic as they seem to
1523 * terminate DMA transfers. Avoid them by simply using a length of
1524 * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
1526 * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
1527 * data by the call to skb_put_padto() above this is safe with
1528 * respect to both the length of the first DMA descriptor (len)
1529 * overflowing the available data and the length of the second DMA
1530 * descriptor (skb->len - len) being negative.
1535 memcpy(buffer, skb->data, len);
1536 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1537 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1540 desc = &priv->tx_ring[q][entry];
1541 desc->ds_tagl = cpu_to_le16(len);
1542 desc->dptr = cpu_to_le32(dma_addr);
1544 buffer = skb->data + len;
1545 len = skb->len - len;
1546 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1547 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1551 desc->ds_tagl = cpu_to_le16(len);
1552 desc->dptr = cpu_to_le32(dma_addr);
1554 /* TX timestamp required */
1556 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
1559 dma_unmap_single(ndev->dev.parent, dma_addr, len,
1564 ts_skb->tag = priv->ts_skb_tag++;
1565 priv->ts_skb_tag &= 0x3ff;
1566 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
1568 /* TAG and timestamp required flag */
1569 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1570 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1571 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
1574 skb_tx_timestamp(skb);
1575 /* Descriptor type must be set after all the above writes */
1577 desc->die_dt = DT_FEND;
1579 desc->die_dt = DT_FSTART;
1581 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
1583 priv->cur_tx[q] += NUM_TX_DESC;
1584 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1585 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
1586 !ravb_tx_free(ndev, q, true))
1587 netif_stop_subqueue(ndev, q);
1591 spin_unlock_irqrestore(&priv->lock, flags);
1592 return NETDEV_TX_OK;
1595 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
1596 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
1598 dev_kfree_skb_any(skb);
1599 priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
1603 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1604 void *accel_priv, select_queue_fallback_t fallback)
1606 /* If skb needs TX timestamp, it is handled in network control queue */
1607 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
1612 static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
1614 struct ravb_private *priv = netdev_priv(ndev);
1615 struct net_device_stats *nstats, *stats0, *stats1;
1617 nstats = &ndev->stats;
1618 stats0 = &priv->stats[RAVB_BE];
1619 stats1 = &priv->stats[RAVB_NC];
1621 nstats->tx_dropped += ravb_read(ndev, TROCR);
1622 ravb_write(ndev, 0, TROCR); /* (write clear) */
1623 nstats->collisions += ravb_read(ndev, CDCR);
1624 ravb_write(ndev, 0, CDCR); /* (write clear) */
1625 nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
1626 ravb_write(ndev, 0, LCCR); /* (write clear) */
1628 nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
1629 ravb_write(ndev, 0, CERCR); /* (write clear) */
1630 nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
1631 ravb_write(ndev, 0, CEECR); /* (write clear) */
1633 nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
1634 nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
1635 nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
1636 nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
1637 nstats->multicast = stats0->multicast + stats1->multicast;
1638 nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
1639 nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
1640 nstats->rx_frame_errors =
1641 stats0->rx_frame_errors + stats1->rx_frame_errors;
1642 nstats->rx_length_errors =
1643 stats0->rx_length_errors + stats1->rx_length_errors;
1644 nstats->rx_missed_errors =
1645 stats0->rx_missed_errors + stats1->rx_missed_errors;
1646 nstats->rx_over_errors =
1647 stats0->rx_over_errors + stats1->rx_over_errors;
1652 /* Update promiscuous bit */
1653 static void ravb_set_rx_mode(struct net_device *ndev)
1655 struct ravb_private *priv = netdev_priv(ndev);
1656 unsigned long flags;
1658 spin_lock_irqsave(&priv->lock, flags);
1659 ravb_modify(ndev, ECMR, ECMR_PRM,
1660 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
1662 spin_unlock_irqrestore(&priv->lock, flags);
1665 /* Device close function for Ethernet AVB */
1666 static int ravb_close(struct net_device *ndev)
1668 struct device_node *np = ndev->dev.parent->of_node;
1669 struct ravb_private *priv = netdev_priv(ndev);
1670 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
1672 netif_tx_stop_all_queues(ndev);
1674 /* Disable interrupts by clearing the interrupt masks. */
1675 ravb_write(ndev, 0, RIC0);
1676 ravb_write(ndev, 0, RIC2);
1677 ravb_write(ndev, 0, TIC);
1679 /* Stop PTP Clock driver */
1680 if (priv->chip_id == RCAR_GEN2)
1681 ravb_ptp_stop(ndev);
1683 /* Set the config mode to stop the AVB-DMAC's processes */
1684 if (ravb_stop_dma(ndev) < 0)
1686 "device will be stopped after h/w processes are done.\n");
1688 /* Clear the timestamp list */
1689 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1690 list_del(&ts_skb->list);
1694 /* PHY disconnect */
1696 phy_stop(ndev->phydev);
1697 phy_disconnect(ndev->phydev);
1698 if (of_phy_is_fixed_link(np))
1699 of_phy_deregister_fixed_link(np);
1702 if (priv->chip_id != RCAR_GEN2) {
1703 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1704 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1705 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1706 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1707 free_irq(priv->emac_irq, ndev);
1709 free_irq(ndev->irq, ndev);
1711 napi_disable(&priv->napi[RAVB_NC]);
1712 napi_disable(&priv->napi[RAVB_BE]);
1714 /* Free all the skb's in the RX queue and the DMA buffers. */
1715 ravb_ring_free(ndev, RAVB_BE);
1716 ravb_ring_free(ndev, RAVB_NC);
1721 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
1723 struct ravb_private *priv = netdev_priv(ndev);
1724 struct hwtstamp_config config;
1727 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1729 if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
1730 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1731 else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
1732 config.rx_filter = HWTSTAMP_FILTER_ALL;
1734 config.rx_filter = HWTSTAMP_FILTER_NONE;
1736 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1740 /* Control hardware time stamping */
1741 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
1743 struct ravb_private *priv = netdev_priv(ndev);
1744 struct hwtstamp_config config;
1745 u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
1748 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1751 /* Reserved for future extensions */
1755 switch (config.tx_type) {
1756 case HWTSTAMP_TX_OFF:
1759 case HWTSTAMP_TX_ON:
1760 tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
1766 switch (config.rx_filter) {
1767 case HWTSTAMP_FILTER_NONE:
1770 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1771 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
1774 config.rx_filter = HWTSTAMP_FILTER_ALL;
1775 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
1778 priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1779 priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1781 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1785 /* ioctl to device function */
1786 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1788 struct phy_device *phydev = ndev->phydev;
1790 if (!netif_running(ndev))
1798 return ravb_hwtstamp_get(ndev, req);
1800 return ravb_hwtstamp_set(ndev, req);
1803 return phy_mii_ioctl(phydev, req, cmd);
1806 static const struct net_device_ops ravb_netdev_ops = {
1807 .ndo_open = ravb_open,
1808 .ndo_stop = ravb_close,
1809 .ndo_start_xmit = ravb_start_xmit,
1810 .ndo_select_queue = ravb_select_queue,
1811 .ndo_get_stats = ravb_get_stats,
1812 .ndo_set_rx_mode = ravb_set_rx_mode,
1813 .ndo_tx_timeout = ravb_tx_timeout,
1814 .ndo_do_ioctl = ravb_do_ioctl,
1815 .ndo_validate_addr = eth_validate_addr,
1816 .ndo_set_mac_address = eth_mac_addr,
1819 /* MDIO bus init function */
1820 static int ravb_mdio_init(struct ravb_private *priv)
1822 struct platform_device *pdev = priv->pdev;
1823 struct device *dev = &pdev->dev;
1827 priv->mdiobb.ops = &bb_ops;
1829 /* MII controller setting */
1830 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1834 /* Hook up MII support for ethtool */
1835 priv->mii_bus->name = "ravb_mii";
1836 priv->mii_bus->parent = dev;
1837 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1838 pdev->name, pdev->id);
1840 /* Register MDIO bus */
1841 error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1848 free_mdio_bitbang(priv->mii_bus);
1852 /* MDIO bus release function */
1853 static int ravb_mdio_release(struct ravb_private *priv)
1855 /* Unregister mdio bus */
1856 mdiobus_unregister(priv->mii_bus);
1858 /* Free bitbang info */
1859 free_mdio_bitbang(priv->mii_bus);
1864 static const struct of_device_id ravb_match_table[] = {
1865 { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
1866 { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
1867 { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 },
1868 { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
1869 { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 },
1872 MODULE_DEVICE_TABLE(of, ravb_match_table);
1874 static int ravb_set_gti(struct net_device *ndev)
1877 struct device *dev = ndev->dev.parent;
1878 struct device_node *np = dev->of_node;
1883 clk = of_clk_get(np, 0);
1885 dev_err(dev, "could not get clock\n");
1886 return PTR_ERR(clk);
1889 rate = clk_get_rate(clk);
1895 inc = 1000000000ULL << 20;
1898 if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
1899 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
1900 inc, GTI_TIV_MIN, GTI_TIV_MAX);
1904 ravb_write(ndev, inc, GTI);
1909 static void ravb_set_config_mode(struct net_device *ndev)
1911 struct ravb_private *priv = netdev_priv(ndev);
1913 if (priv->chip_id == RCAR_GEN2) {
1914 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
1915 /* Set CSEL value */
1916 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
1918 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
1919 CCC_GAC | CCC_CSEL_HPB);
1923 static int ravb_probe(struct platform_device *pdev)
1925 struct device_node *np = pdev->dev.of_node;
1926 struct ravb_private *priv;
1927 enum ravb_chip_id chip_id;
1928 struct net_device *ndev;
1930 struct resource *res;
1935 "this driver is required to be instantiated from device tree\n");
1939 /* Get base address */
1940 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1942 dev_err(&pdev->dev, "invalid resource\n");
1946 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
1947 NUM_TX_QUEUE, NUM_RX_QUEUE);
1951 pm_runtime_enable(&pdev->dev);
1952 pm_runtime_get_sync(&pdev->dev);
1954 /* The Ether-specific entries in the device structure. */
1955 ndev->base_addr = res->start;
1957 chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
1959 if (chip_id == RCAR_GEN3)
1960 irq = platform_get_irq_byname(pdev, "ch22");
1962 irq = platform_get_irq(pdev, 0);
1969 SET_NETDEV_DEV(ndev, &pdev->dev);
1971 priv = netdev_priv(ndev);
1974 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
1975 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
1976 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
1977 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
1978 priv->addr = devm_ioremap_resource(&pdev->dev, res);
1979 if (IS_ERR(priv->addr)) {
1980 error = PTR_ERR(priv->addr);
1984 spin_lock_init(&priv->lock);
1985 INIT_WORK(&priv->work, ravb_tx_timeout_work);
1987 priv->phy_interface = of_get_phy_mode(np);
1989 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
1990 priv->avb_link_active_low =
1991 of_property_read_bool(np, "renesas,ether-link-active-low");
1993 if (chip_id == RCAR_GEN3) {
1994 irq = platform_get_irq_byname(pdev, "ch24");
1999 priv->emac_irq = irq;
2000 for (i = 0; i < NUM_RX_QUEUE; i++) {
2001 irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
2006 priv->rx_irqs[i] = irq;
2008 for (i = 0; i < NUM_TX_QUEUE; i++) {
2009 irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
2014 priv->tx_irqs[i] = irq;
2018 priv->chip_id = chip_id;
2021 ndev->netdev_ops = &ravb_netdev_ops;
2022 ndev->ethtool_ops = &ravb_ethtool_ops;
2024 /* Set AVB config mode */
2025 ravb_set_config_mode(ndev);
2028 error = ravb_set_gti(ndev);
2032 /* Request GTI loading */
2033 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2035 /* Allocate descriptor base address table */
2036 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2037 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2038 &priv->desc_bat_dma, GFP_KERNEL);
2039 if (!priv->desc_bat) {
2041 "Cannot allocate desc base address table (size %d bytes)\n",
2042 priv->desc_bat_size);
2046 for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2047 priv->desc_bat[q].die_dt = DT_EOS;
2048 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2050 /* Initialise HW timestamp list */
2051 INIT_LIST_HEAD(&priv->ts_skb_list);
2053 /* Initialise PTP Clock driver */
2054 if (chip_id != RCAR_GEN2)
2055 ravb_ptp_init(ndev, pdev);
2057 /* Debug message level */
2058 priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2060 /* Read and set MAC address */
2061 ravb_read_mac_address(ndev, of_get_mac_address(np));
2062 if (!is_valid_ether_addr(ndev->dev_addr)) {
2063 dev_warn(&pdev->dev,
2064 "no valid MAC address supplied, using a random one\n");
2065 eth_hw_addr_random(ndev);
2069 error = ravb_mdio_init(priv);
2071 dev_err(&pdev->dev, "failed to initialize MDIO\n");
2075 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
2076 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
2078 /* Network device register */
2079 error = register_netdev(ndev);
2083 /* Print device information */
2084 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2085 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2087 platform_set_drvdata(pdev, ndev);
2092 netif_napi_del(&priv->napi[RAVB_NC]);
2093 netif_napi_del(&priv->napi[RAVB_BE]);
2094 ravb_mdio_release(priv);
2096 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2097 priv->desc_bat_dma);
2099 /* Stop PTP Clock driver */
2100 if (chip_id != RCAR_GEN2)
2101 ravb_ptp_stop(ndev);
2106 pm_runtime_put(&pdev->dev);
2107 pm_runtime_disable(&pdev->dev);
2111 static int ravb_remove(struct platform_device *pdev)
2113 struct net_device *ndev = platform_get_drvdata(pdev);
2114 struct ravb_private *priv = netdev_priv(ndev);
2116 /* Stop PTP Clock driver */
2117 if (priv->chip_id != RCAR_GEN2)
2118 ravb_ptp_stop(ndev);
2120 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2121 priv->desc_bat_dma);
2122 /* Set reset mode */
2123 ravb_write(ndev, CCC_OPC_RESET, CCC);
2124 pm_runtime_put_sync(&pdev->dev);
2125 unregister_netdev(ndev);
2126 netif_napi_del(&priv->napi[RAVB_NC]);
2127 netif_napi_del(&priv->napi[RAVB_BE]);
2128 ravb_mdio_release(priv);
2129 pm_runtime_disable(&pdev->dev);
2131 platform_set_drvdata(pdev, NULL);
2136 static int __maybe_unused ravb_suspend(struct device *dev)
2138 struct net_device *ndev = dev_get_drvdata(dev);
2141 if (netif_running(ndev)) {
2142 netif_device_detach(ndev);
2143 ret = ravb_close(ndev);
2149 static int __maybe_unused ravb_resume(struct device *dev)
2151 struct net_device *ndev = dev_get_drvdata(dev);
2152 struct ravb_private *priv = netdev_priv(ndev);
2155 /* All register have been reset to default values.
2156 * Restore all registers which where setup at probe time and
2157 * reopen device if it was running before system suspended.
2160 /* Set AVB config mode */
2161 ravb_set_config_mode(ndev);
2164 ret = ravb_set_gti(ndev);
2168 /* Request GTI loading */
2169 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2171 /* Restore descriptor base address table */
2172 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2174 if (netif_running(ndev)) {
2175 ret = ravb_open(ndev);
2178 netif_device_attach(ndev);
2184 static int __maybe_unused ravb_runtime_nop(struct device *dev)
2186 /* Runtime PM callback shared between ->runtime_suspend()
2187 * and ->runtime_resume(). Simply returns success.
2189 * This driver re-initializes all registers after
2190 * pm_runtime_get_sync() anyway so there is no need
2191 * to save and restore registers here.
2196 static const struct dev_pm_ops ravb_dev_pm_ops = {
2197 SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
2198 SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
2201 static struct platform_driver ravb_driver = {
2202 .probe = ravb_probe,
2203 .remove = ravb_remove,
2206 .pm = &ravb_dev_pm_ops,
2207 .of_match_table = ravb_match_table,
2211 module_platform_driver(ravb_driver);
2213 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
2214 MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
2215 MODULE_LICENSE("GPL v2");