2 * SuperH Ethernet device driver
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2013 Renesas Solutions Corp.
6 * Copyright (C) 2013 Cogent Embedded, Inc.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/spinlock.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/etherdevice.h>
31 #include <linux/delay.h>
32 #include <linux/platform_device.h>
33 #include <linux/mdio-bitbang.h>
34 #include <linux/netdevice.h>
35 #include <linux/phy.h>
36 #include <linux/cache.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/slab.h>
40 #include <linux/ethtool.h>
41 #include <linux/if_vlan.h>
42 #include <linux/clk.h>
43 #include <linux/sh_eth.h>
47 #define SH_ETH_DEF_MSG_ENABLE \
53 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
107 [TSU_CTRST] = 0x0004,
108 [TSU_FWEN0] = 0x0010,
109 [TSU_FWEN1] = 0x0014,
111 [TSU_BSYSL0] = 0x0020,
112 [TSU_BSYSL1] = 0x0024,
113 [TSU_PRISL0] = 0x0028,
114 [TSU_PRISL1] = 0x002c,
115 [TSU_FWSL0] = 0x0030,
116 [TSU_FWSL1] = 0x0034,
117 [TSU_FWSLC] = 0x0038,
118 [TSU_QTAG0] = 0x0040,
119 [TSU_QTAG1] = 0x0044,
121 [TSU_FWINMK] = 0x0054,
122 [TSU_ADQT0] = 0x0048,
123 [TSU_ADQT1] = 0x004c,
124 [TSU_VTAG0] = 0x0058,
125 [TSU_VTAG1] = 0x005c,
126 [TSU_ADSBSY] = 0x0060,
128 [TSU_POST1] = 0x0070,
129 [TSU_POST2] = 0x0074,
130 [TSU_POST3] = 0x0078,
131 [TSU_POST4] = 0x007c,
132 [TSU_ADRH0] = 0x0100,
133 [TSU_ADRL0] = 0x0104,
134 [TSU_ADRH31] = 0x01f8,
135 [TSU_ADRL31] = 0x01fc,
151 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
196 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
248 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
274 [TSU_CTRST] = 0x0004,
275 [TSU_FWEN0] = 0x0010,
276 [TSU_FWEN1] = 0x0014,
278 [TSU_BSYSL0] = 0x0020,
279 [TSU_BSYSL1] = 0x0024,
280 [TSU_PRISL0] = 0x0028,
281 [TSU_PRISL1] = 0x002c,
282 [TSU_FWSL0] = 0x0030,
283 [TSU_FWSL1] = 0x0034,
284 [TSU_FWSLC] = 0x0038,
285 [TSU_QTAGM0] = 0x0040,
286 [TSU_QTAGM1] = 0x0044,
287 [TSU_ADQT0] = 0x0048,
288 [TSU_ADQT1] = 0x004c,
290 [TSU_FWINMK] = 0x0054,
291 [TSU_ADSBSY] = 0x0060,
293 [TSU_POST1] = 0x0070,
294 [TSU_POST2] = 0x0074,
295 [TSU_POST3] = 0x0078,
296 [TSU_POST4] = 0x007c,
311 [TSU_ADRH0] = 0x0100,
312 [TSU_ADRL0] = 0x0104,
313 [TSU_ADRL31] = 0x01fc,
316 static int sh_eth_is_gether(struct sh_eth_private *mdp)
318 if (mdp->reg_offset == sh_eth_offset_gigabit)
324 static void sh_eth_select_mii(struct net_device *ndev)
327 struct sh_eth_private *mdp = netdev_priv(ndev);
329 switch (mdp->phy_interface) {
330 case PHY_INTERFACE_MODE_GMII:
333 case PHY_INTERFACE_MODE_MII:
336 case PHY_INTERFACE_MODE_RMII:
340 pr_warn("PHY interface mode was not setup. Set to MII.\n");
345 sh_eth_write(ndev, value, RMII_MII);
348 static void sh_eth_set_duplex(struct net_device *ndev)
350 struct sh_eth_private *mdp = netdev_priv(ndev);
352 if (mdp->duplex) /* Full */
353 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
355 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
358 /* There is CPU dependent code */
359 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
361 struct sh_eth_private *mdp = netdev_priv(ndev);
363 switch (mdp->speed) {
364 case 10: /* 10BASE */
365 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
367 case 100:/* 100BASE */
368 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
376 static struct sh_eth_cpu_data r8a777x_data = {
377 .set_duplex = sh_eth_set_duplex,
378 .set_rate = sh_eth_set_rate_r8a777x,
380 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
381 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
382 .eesipr_value = 0x01ff009f,
384 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
385 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
386 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
387 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
395 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
397 struct sh_eth_private *mdp = netdev_priv(ndev);
399 switch (mdp->speed) {
400 case 10: /* 10BASE */
401 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
403 case 100:/* 100BASE */
404 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
412 static struct sh_eth_cpu_data sh7724_data = {
413 .set_duplex = sh_eth_set_duplex,
414 .set_rate = sh_eth_set_rate_sh7724,
416 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
417 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
418 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
420 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
421 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
422 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
423 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
430 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
433 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
435 struct sh_eth_private *mdp = netdev_priv(ndev);
437 switch (mdp->speed) {
438 case 10: /* 10BASE */
439 sh_eth_write(ndev, 0, RTRATE);
441 case 100:/* 100BASE */
442 sh_eth_write(ndev, 1, RTRATE);
450 static struct sh_eth_cpu_data sh7757_data = {
451 .set_duplex = sh_eth_set_duplex,
452 .set_rate = sh_eth_set_rate_sh7757,
454 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
455 .rmcr_value = 0x00000001,
457 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
458 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
459 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
460 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
462 .irq_flags = IRQF_SHARED,
469 .rpadir_value = 2 << 16,
472 #define SH_GIGA_ETH_BASE 0xfee00000UL
473 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
474 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
475 static void sh_eth_chip_reset_giga(struct net_device *ndev)
478 unsigned long mahr[2], malr[2];
480 /* save MAHR and MALR */
481 for (i = 0; i < 2; i++) {
482 malr[i] = ioread32((void *)GIGA_MALR(i));
483 mahr[i] = ioread32((void *)GIGA_MAHR(i));
487 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
490 /* restore MAHR and MALR */
491 for (i = 0; i < 2; i++) {
492 iowrite32(malr[i], (void *)GIGA_MALR(i));
493 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
497 static void sh_eth_set_rate_giga(struct net_device *ndev)
499 struct sh_eth_private *mdp = netdev_priv(ndev);
501 switch (mdp->speed) {
502 case 10: /* 10BASE */
503 sh_eth_write(ndev, 0x00000000, GECMR);
505 case 100:/* 100BASE */
506 sh_eth_write(ndev, 0x00000010, GECMR);
508 case 1000: /* 1000BASE */
509 sh_eth_write(ndev, 0x00000020, GECMR);
516 /* SH7757(GETHERC) */
517 static struct sh_eth_cpu_data sh7757_data_giga = {
518 .chip_reset = sh_eth_chip_reset_giga,
519 .set_duplex = sh_eth_set_duplex,
520 .set_rate = sh_eth_set_rate_giga,
522 .ecsr_value = ECSR_ICD | ECSR_MPD,
523 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
524 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
526 .tx_check = EESR_TC1 | EESR_FTC,
527 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
528 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
530 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
532 .fdr_value = 0x0000072f,
533 .rmcr_value = 0x00000001,
535 .irq_flags = IRQF_SHARED,
542 .rpadir_value = 2 << 16,
548 static void sh_eth_chip_reset(struct net_device *ndev)
550 struct sh_eth_private *mdp = netdev_priv(ndev);
553 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
557 static void sh_eth_set_rate_gether(struct net_device *ndev)
559 struct sh_eth_private *mdp = netdev_priv(ndev);
561 switch (mdp->speed) {
562 case 10: /* 10BASE */
563 sh_eth_write(ndev, GECMR_10, GECMR);
565 case 100:/* 100BASE */
566 sh_eth_write(ndev, GECMR_100, GECMR);
568 case 1000: /* 1000BASE */
569 sh_eth_write(ndev, GECMR_1000, GECMR);
577 static struct sh_eth_cpu_data sh7734_data = {
578 .chip_reset = sh_eth_chip_reset,
579 .set_duplex = sh_eth_set_duplex,
580 .set_rate = sh_eth_set_rate_gether,
582 .ecsr_value = ECSR_ICD | ECSR_MPD,
583 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
584 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
586 .tx_check = EESR_TC1 | EESR_FTC,
587 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
588 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
590 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
606 static struct sh_eth_cpu_data sh7763_data = {
607 .chip_reset = sh_eth_chip_reset,
608 .set_duplex = sh_eth_set_duplex,
609 .set_rate = sh_eth_set_rate_gether,
611 .ecsr_value = ECSR_ICD | ECSR_MPD,
612 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
613 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
615 .tx_check = EESR_TC1 | EESR_FTC,
616 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
617 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
619 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
630 .irq_flags = IRQF_SHARED,
633 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
635 struct sh_eth_private *mdp = netdev_priv(ndev);
638 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
641 sh_eth_select_mii(ndev);
645 static struct sh_eth_cpu_data r8a7740_data = {
646 .chip_reset = sh_eth_chip_reset_r8a7740,
647 .set_duplex = sh_eth_set_duplex,
648 .set_rate = sh_eth_set_rate_gether,
650 .ecsr_value = ECSR_ICD | ECSR_MPD,
651 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
652 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
654 .tx_check = EESR_TC1 | EESR_FTC,
655 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
656 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
658 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
672 static struct sh_eth_cpu_data sh7619_data = {
673 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
681 static struct sh_eth_cpu_data sh771x_data = {
682 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
686 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
689 cd->ecsr_value = DEFAULT_ECSR_INIT;
691 if (!cd->ecsipr_value)
692 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
694 if (!cd->fcftr_value)
695 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
696 DEFAULT_FIFO_F_D_RFD;
699 cd->fdr_value = DEFAULT_FDR_INIT;
702 cd->rmcr_value = DEFAULT_RMCR_VALUE;
705 cd->tx_check = DEFAULT_TX_CHECK;
707 if (!cd->eesr_err_check)
708 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
710 if (!cd->tx_error_check)
711 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
714 static int sh_eth_check_reset(struct net_device *ndev)
720 if (!(sh_eth_read(ndev, EDMR) & 0x3))
726 pr_err("Device reset failed\n");
732 static int sh_eth_reset(struct net_device *ndev)
734 struct sh_eth_private *mdp = netdev_priv(ndev);
737 if (sh_eth_is_gether(mdp)) {
738 sh_eth_write(ndev, EDSR_ENALL, EDSR);
739 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
742 ret = sh_eth_check_reset(ndev);
747 sh_eth_write(ndev, 0x0, TDLAR);
748 sh_eth_write(ndev, 0x0, TDFAR);
749 sh_eth_write(ndev, 0x0, TDFXR);
750 sh_eth_write(ndev, 0x0, TDFFR);
751 sh_eth_write(ndev, 0x0, RDLAR);
752 sh_eth_write(ndev, 0x0, RDFAR);
753 sh_eth_write(ndev, 0x0, RDFXR);
754 sh_eth_write(ndev, 0x0, RDFFR);
756 /* Reset HW CRC register */
758 sh_eth_write(ndev, 0x0, CSMR);
760 /* Select MII mode */
761 if (mdp->cd->select_mii)
762 sh_eth_select_mii(ndev);
764 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
767 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
775 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
776 static void sh_eth_set_receive_align(struct sk_buff *skb)
780 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
782 skb_reserve(skb, reserve);
785 static void sh_eth_set_receive_align(struct sk_buff *skb)
787 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
792 /* CPU <-> EDMAC endian convert */
793 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
795 switch (mdp->edmac_endian) {
796 case EDMAC_LITTLE_ENDIAN:
797 return cpu_to_le32(x);
798 case EDMAC_BIG_ENDIAN:
799 return cpu_to_be32(x);
804 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
806 switch (mdp->edmac_endian) {
807 case EDMAC_LITTLE_ENDIAN:
808 return le32_to_cpu(x);
809 case EDMAC_BIG_ENDIAN:
810 return be32_to_cpu(x);
816 * Program the hardware MAC address from dev->dev_addr.
818 static void update_mac_address(struct net_device *ndev)
821 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
822 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
824 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
828 * Get MAC address from SuperH MAC address register
830 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
831 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
832 * When you want use this device, you must set MAC address in bootloader.
835 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
837 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
838 memcpy(ndev->dev_addr, mac, 6);
840 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
841 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
842 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
843 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
844 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
845 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
849 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
851 if (sh_eth_is_gether(mdp))
852 return EDTRR_TRNS_GETHER;
854 return EDTRR_TRNS_ETHER;
858 void (*set_gate)(void *addr);
859 struct mdiobb_ctrl ctrl;
861 u32 mmd_msk;/* MMD */
868 static void bb_set(void *addr, u32 msk)
870 iowrite32(ioread32(addr) | msk, addr);
874 static void bb_clr(void *addr, u32 msk)
876 iowrite32((ioread32(addr) & ~msk), addr);
880 static int bb_read(void *addr, u32 msk)
882 return (ioread32(addr) & msk) != 0;
885 /* Data I/O pin control */
886 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
888 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
890 if (bitbang->set_gate)
891 bitbang->set_gate(bitbang->addr);
894 bb_set(bitbang->addr, bitbang->mmd_msk);
896 bb_clr(bitbang->addr, bitbang->mmd_msk);
900 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
902 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
904 if (bitbang->set_gate)
905 bitbang->set_gate(bitbang->addr);
908 bb_set(bitbang->addr, bitbang->mdo_msk);
910 bb_clr(bitbang->addr, bitbang->mdo_msk);
914 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
916 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
918 if (bitbang->set_gate)
919 bitbang->set_gate(bitbang->addr);
921 return bb_read(bitbang->addr, bitbang->mdi_msk);
924 /* MDC pin control */
925 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
927 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
929 if (bitbang->set_gate)
930 bitbang->set_gate(bitbang->addr);
933 bb_set(bitbang->addr, bitbang->mdc_msk);
935 bb_clr(bitbang->addr, bitbang->mdc_msk);
938 /* mdio bus control struct */
939 static struct mdiobb_ops bb_ops = {
940 .owner = THIS_MODULE,
941 .set_mdc = sh_mdc_ctrl,
942 .set_mdio_dir = sh_mmd_ctrl,
943 .set_mdio_data = sh_set_mdio,
944 .get_mdio_data = sh_get_mdio,
947 /* free skb and descriptor buffer */
948 static void sh_eth_ring_free(struct net_device *ndev)
950 struct sh_eth_private *mdp = netdev_priv(ndev);
953 /* Free Rx skb ringbuffer */
954 if (mdp->rx_skbuff) {
955 for (i = 0; i < mdp->num_rx_ring; i++) {
956 if (mdp->rx_skbuff[i])
957 dev_kfree_skb(mdp->rx_skbuff[i]);
960 kfree(mdp->rx_skbuff);
961 mdp->rx_skbuff = NULL;
963 /* Free Tx skb ringbuffer */
964 if (mdp->tx_skbuff) {
965 for (i = 0; i < mdp->num_tx_ring; i++) {
966 if (mdp->tx_skbuff[i])
967 dev_kfree_skb(mdp->tx_skbuff[i]);
970 kfree(mdp->tx_skbuff);
971 mdp->tx_skbuff = NULL;
974 /* format skb and descriptor buffer */
975 static void sh_eth_ring_format(struct net_device *ndev)
977 struct sh_eth_private *mdp = netdev_priv(ndev);
980 struct sh_eth_rxdesc *rxdesc = NULL;
981 struct sh_eth_txdesc *txdesc = NULL;
982 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
983 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
985 mdp->cur_rx = mdp->cur_tx = 0;
986 mdp->dirty_rx = mdp->dirty_tx = 0;
988 memset(mdp->rx_ring, 0, rx_ringsize);
990 /* build Rx ring buffer */
991 for (i = 0; i < mdp->num_rx_ring; i++) {
993 mdp->rx_skbuff[i] = NULL;
994 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
995 mdp->rx_skbuff[i] = skb;
998 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1000 sh_eth_set_receive_align(skb);
1003 rxdesc = &mdp->rx_ring[i];
1004 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1005 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1007 /* The size of the buffer is 16 byte boundary. */
1008 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1009 /* Rx descriptor address set */
1011 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1012 if (sh_eth_is_gether(mdp))
1013 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1017 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1019 /* Mark the last entry as wrapping the ring. */
1020 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1022 memset(mdp->tx_ring, 0, tx_ringsize);
1024 /* build Tx ring buffer */
1025 for (i = 0; i < mdp->num_tx_ring; i++) {
1026 mdp->tx_skbuff[i] = NULL;
1027 txdesc = &mdp->tx_ring[i];
1028 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1029 txdesc->buffer_length = 0;
1031 /* Tx descriptor address set */
1032 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1033 if (sh_eth_is_gether(mdp))
1034 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1038 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1041 /* Get skb and descriptor buffer */
1042 static int sh_eth_ring_init(struct net_device *ndev)
1044 struct sh_eth_private *mdp = netdev_priv(ndev);
1045 int rx_ringsize, tx_ringsize, ret = 0;
1048 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1049 * card needs room to do 8 byte alignment, +2 so we can reserve
1050 * the first 2 bytes, and +16 gets room for the status word from the
1053 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1054 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1055 if (mdp->cd->rpadir)
1056 mdp->rx_buf_sz += NET_IP_ALIGN;
1058 /* Allocate RX and TX skb rings */
1059 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1060 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1061 if (!mdp->rx_skbuff) {
1066 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1067 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1068 if (!mdp->tx_skbuff) {
1073 /* Allocate all Rx descriptors. */
1074 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1075 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1077 if (!mdp->rx_ring) {
1079 goto desc_ring_free;
1084 /* Allocate all Tx descriptors. */
1085 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1086 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1088 if (!mdp->tx_ring) {
1090 goto desc_ring_free;
1095 /* free DMA buffer */
1096 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1099 /* Free Rx and Tx skb ring buffer */
1100 sh_eth_ring_free(ndev);
1101 mdp->tx_ring = NULL;
1102 mdp->rx_ring = NULL;
1107 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1112 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1113 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1115 mdp->rx_ring = NULL;
1119 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1120 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1122 mdp->tx_ring = NULL;
1126 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1129 struct sh_eth_private *mdp = netdev_priv(ndev);
1133 ret = sh_eth_reset(ndev);
1137 /* Descriptor format */
1138 sh_eth_ring_format(ndev);
1139 if (mdp->cd->rpadir)
1140 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1142 /* all sh_eth int mask */
1143 sh_eth_write(ndev, 0, EESIPR);
1145 #if defined(__LITTLE_ENDIAN)
1146 if (mdp->cd->hw_swap)
1147 sh_eth_write(ndev, EDMR_EL, EDMR);
1150 sh_eth_write(ndev, 0, EDMR);
1153 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1154 sh_eth_write(ndev, 0, TFTR);
1156 /* Frame recv control */
1157 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
1159 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1162 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
1164 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1166 if (!mdp->cd->no_trimd)
1167 sh_eth_write(ndev, 0, TRIMD);
1169 /* Recv frame limit set register */
1170 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1173 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1175 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1177 /* PAUSE Prohibition */
1178 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1179 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1181 sh_eth_write(ndev, val, ECMR);
1183 if (mdp->cd->set_rate)
1184 mdp->cd->set_rate(ndev);
1186 /* E-MAC Status Register clear */
1187 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1189 /* E-MAC Interrupt Enable register */
1191 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1193 /* Set MAC address */
1194 update_mac_address(ndev);
1198 sh_eth_write(ndev, APR_AP, APR);
1200 sh_eth_write(ndev, MPR_MP, MPR);
1201 if (mdp->cd->tpauser)
1202 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1205 /* Setting the Rx mode will start the Rx process. */
1206 sh_eth_write(ndev, EDRRR_R, EDRRR);
1208 netif_start_queue(ndev);
1215 /* free Tx skb function */
1216 static int sh_eth_txfree(struct net_device *ndev)
1218 struct sh_eth_private *mdp = netdev_priv(ndev);
1219 struct sh_eth_txdesc *txdesc;
1223 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1224 entry = mdp->dirty_tx % mdp->num_tx_ring;
1225 txdesc = &mdp->tx_ring[entry];
1226 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1228 /* Free the original skb. */
1229 if (mdp->tx_skbuff[entry]) {
1230 dma_unmap_single(&ndev->dev, txdesc->addr,
1231 txdesc->buffer_length, DMA_TO_DEVICE);
1232 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1233 mdp->tx_skbuff[entry] = NULL;
1236 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1237 if (entry >= mdp->num_tx_ring - 1)
1238 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1240 ndev->stats.tx_packets++;
1241 ndev->stats.tx_bytes += txdesc->buffer_length;
1246 /* Packet receive function */
1247 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1249 struct sh_eth_private *mdp = netdev_priv(ndev);
1250 struct sh_eth_rxdesc *rxdesc;
1252 int entry = mdp->cur_rx % mdp->num_rx_ring;
1253 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1254 struct sk_buff *skb;
1259 rxdesc = &mdp->rx_ring[entry];
1260 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1261 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1262 pkt_len = rxdesc->frame_length;
1273 if (!(desc_status & RDFEND))
1274 ndev->stats.rx_length_errors++;
1276 #if defined(CONFIG_ARCH_R8A7740)
1278 * In case of almost all GETHER/ETHERs, the Receive Frame State
1279 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1280 * bit 0. However, in case of the R8A7740's GETHER, the RFS
1281 * bits are from bit 25 to bit 16. So, the driver needs right
1287 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1288 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1289 ndev->stats.rx_errors++;
1290 if (desc_status & RD_RFS1)
1291 ndev->stats.rx_crc_errors++;
1292 if (desc_status & RD_RFS2)
1293 ndev->stats.rx_frame_errors++;
1294 if (desc_status & RD_RFS3)
1295 ndev->stats.rx_length_errors++;
1296 if (desc_status & RD_RFS4)
1297 ndev->stats.rx_length_errors++;
1298 if (desc_status & RD_RFS6)
1299 ndev->stats.rx_missed_errors++;
1300 if (desc_status & RD_RFS10)
1301 ndev->stats.rx_over_errors++;
1303 if (!mdp->cd->hw_swap)
1305 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1307 skb = mdp->rx_skbuff[entry];
1308 mdp->rx_skbuff[entry] = NULL;
1309 if (mdp->cd->rpadir)
1310 skb_reserve(skb, NET_IP_ALIGN);
1311 skb_put(skb, pkt_len);
1312 skb->protocol = eth_type_trans(skb, ndev);
1314 ndev->stats.rx_packets++;
1315 ndev->stats.rx_bytes += pkt_len;
1317 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1318 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1319 rxdesc = &mdp->rx_ring[entry];
1322 /* Refill the Rx ring buffers. */
1323 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1324 entry = mdp->dirty_rx % mdp->num_rx_ring;
1325 rxdesc = &mdp->rx_ring[entry];
1326 /* The size of the buffer is 16 byte boundary. */
1327 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1329 if (mdp->rx_skbuff[entry] == NULL) {
1330 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1331 mdp->rx_skbuff[entry] = skb;
1333 break; /* Better luck next round. */
1334 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1336 sh_eth_set_receive_align(skb);
1338 skb_checksum_none_assert(skb);
1339 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1341 if (entry >= mdp->num_rx_ring - 1)
1343 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1346 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1349 /* Restart Rx engine if stopped. */
1350 /* If we don't need to check status, don't. -KDU */
1351 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1352 /* fix the values for the next receiving if RDE is set */
1353 if (intr_status & EESR_RDE)
1354 mdp->cur_rx = mdp->dirty_rx =
1355 (sh_eth_read(ndev, RDFAR) -
1356 sh_eth_read(ndev, RDLAR)) >> 4;
1357 sh_eth_write(ndev, EDRRR_R, EDRRR);
1363 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1365 /* disable tx and rx */
1366 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1367 ~(ECMR_RE | ECMR_TE), ECMR);
1370 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1372 /* enable tx and rx */
1373 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1374 (ECMR_RE | ECMR_TE), ECMR);
1377 /* error control function */
1378 static void sh_eth_error(struct net_device *ndev, int intr_status)
1380 struct sh_eth_private *mdp = netdev_priv(ndev);
1385 if (intr_status & EESR_ECI) {
1386 felic_stat = sh_eth_read(ndev, ECSR);
1387 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
1388 if (felic_stat & ECSR_ICD)
1389 ndev->stats.tx_carrier_errors++;
1390 if (felic_stat & ECSR_LCHNG) {
1392 if (mdp->cd->no_psr || mdp->no_ether_link) {
1395 link_stat = (sh_eth_read(ndev, PSR));
1396 if (mdp->ether_link_active_low)
1397 link_stat = ~link_stat;
1399 if (!(link_stat & PHY_ST_LINK))
1400 sh_eth_rcv_snd_disable(ndev);
1403 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1404 ~DMAC_M_ECI, EESIPR);
1406 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1408 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1409 DMAC_M_ECI, EESIPR);
1410 /* enable tx and rx */
1411 sh_eth_rcv_snd_enable(ndev);
1417 if (intr_status & EESR_TWB) {
1418 /* Write buck end. unused write back interrupt */
1419 if (intr_status & EESR_TABT) /* Transmit Abort int */
1420 ndev->stats.tx_aborted_errors++;
1421 if (netif_msg_tx_err(mdp))
1422 dev_err(&ndev->dev, "Transmit Abort\n");
1425 if (intr_status & EESR_RABT) {
1426 /* Receive Abort int */
1427 if (intr_status & EESR_RFRMER) {
1428 /* Receive Frame Overflow int */
1429 ndev->stats.rx_frame_errors++;
1430 if (netif_msg_rx_err(mdp))
1431 dev_err(&ndev->dev, "Receive Abort\n");
1435 if (intr_status & EESR_TDE) {
1436 /* Transmit Descriptor Empty int */
1437 ndev->stats.tx_fifo_errors++;
1438 if (netif_msg_tx_err(mdp))
1439 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1442 if (intr_status & EESR_TFE) {
1443 /* FIFO under flow */
1444 ndev->stats.tx_fifo_errors++;
1445 if (netif_msg_tx_err(mdp))
1446 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1449 if (intr_status & EESR_RDE) {
1450 /* Receive Descriptor Empty int */
1451 ndev->stats.rx_over_errors++;
1453 if (netif_msg_rx_err(mdp))
1454 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1457 if (intr_status & EESR_RFE) {
1458 /* Receive FIFO Overflow int */
1459 ndev->stats.rx_fifo_errors++;
1460 if (netif_msg_rx_err(mdp))
1461 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1464 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1466 ndev->stats.tx_fifo_errors++;
1467 if (netif_msg_tx_err(mdp))
1468 dev_err(&ndev->dev, "Address Error\n");
1471 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1472 if (mdp->cd->no_ade)
1474 if (intr_status & mask) {
1476 u32 edtrr = sh_eth_read(ndev, EDTRR);
1478 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1479 intr_status, mdp->cur_tx);
1480 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1481 mdp->dirty_tx, (u32) ndev->state, edtrr);
1482 /* dirty buffer free */
1483 sh_eth_txfree(ndev);
1486 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1488 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1491 netif_wake_queue(ndev);
1495 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1497 struct net_device *ndev = netdev;
1498 struct sh_eth_private *mdp = netdev_priv(ndev);
1499 struct sh_eth_cpu_data *cd = mdp->cd;
1500 irqreturn_t ret = IRQ_NONE;
1501 unsigned long intr_status, intr_enable;
1503 spin_lock(&mdp->lock);
1505 /* Get interrupt status */
1506 intr_status = sh_eth_read(ndev, EESR);
1507 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1508 * enabled since it's the one that comes thru regardless of the mask,
1509 * and we need to fully handle it in sh_eth_error() in order to quench
1510 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1512 intr_enable = sh_eth_read(ndev, EESIPR);
1513 intr_status &= intr_enable | DMAC_M_ECI;
1514 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1519 if (intr_status & EESR_RX_CHECK) {
1520 if (napi_schedule_prep(&mdp->napi)) {
1521 /* Mask Rx interrupts */
1522 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1524 __napi_schedule(&mdp->napi);
1526 dev_warn(&ndev->dev,
1527 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1528 intr_status, intr_enable);
1533 if (intr_status & cd->tx_check) {
1534 /* Clear Tx interrupts */
1535 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1537 sh_eth_txfree(ndev);
1538 netif_wake_queue(ndev);
1541 if (intr_status & cd->eesr_err_check) {
1542 /* Clear error interrupts */
1543 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1545 sh_eth_error(ndev, intr_status);
1549 spin_unlock(&mdp->lock);
1554 static int sh_eth_poll(struct napi_struct *napi, int budget)
1556 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1558 struct net_device *ndev = napi->dev;
1560 unsigned long intr_status;
1563 intr_status = sh_eth_read(ndev, EESR);
1564 if (!(intr_status & EESR_RX_CHECK))
1566 /* Clear Rx interrupts */
1567 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1569 if (sh_eth_rx(ndev, intr_status, "a))
1573 napi_complete(napi);
1575 /* Reenable Rx interrupts */
1576 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1578 return budget - quota;
1581 /* PHY state control function */
1582 static void sh_eth_adjust_link(struct net_device *ndev)
1584 struct sh_eth_private *mdp = netdev_priv(ndev);
1585 struct phy_device *phydev = mdp->phydev;
1589 if (phydev->duplex != mdp->duplex) {
1591 mdp->duplex = phydev->duplex;
1592 if (mdp->cd->set_duplex)
1593 mdp->cd->set_duplex(ndev);
1596 if (phydev->speed != mdp->speed) {
1598 mdp->speed = phydev->speed;
1599 if (mdp->cd->set_rate)
1600 mdp->cd->set_rate(ndev);
1604 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1606 mdp->link = phydev->link;
1607 if (mdp->cd->no_psr || mdp->no_ether_link)
1608 sh_eth_rcv_snd_enable(ndev);
1610 } else if (mdp->link) {
1615 if (mdp->cd->no_psr || mdp->no_ether_link)
1616 sh_eth_rcv_snd_disable(ndev);
1619 if (new_state && netif_msg_link(mdp))
1620 phy_print_status(phydev);
1623 /* PHY init function */
1624 static int sh_eth_phy_init(struct net_device *ndev)
1626 struct sh_eth_private *mdp = netdev_priv(ndev);
1627 char phy_id[MII_BUS_ID_SIZE + 3];
1628 struct phy_device *phydev = NULL;
1630 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1631 mdp->mii_bus->id , mdp->phy_id);
1637 /* Try connect to PHY */
1638 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1639 mdp->phy_interface);
1640 if (IS_ERR(phydev)) {
1641 dev_err(&ndev->dev, "phy_connect failed\n");
1642 return PTR_ERR(phydev);
1645 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1646 phydev->addr, phydev->drv->name);
1648 mdp->phydev = phydev;
1653 /* PHY control start function */
1654 static int sh_eth_phy_start(struct net_device *ndev)
1656 struct sh_eth_private *mdp = netdev_priv(ndev);
1659 ret = sh_eth_phy_init(ndev);
1663 /* reset phy - this also wakes it from PDOWN */
1664 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1665 phy_start(mdp->phydev);
1670 static int sh_eth_get_settings(struct net_device *ndev,
1671 struct ethtool_cmd *ecmd)
1673 struct sh_eth_private *mdp = netdev_priv(ndev);
1674 unsigned long flags;
1677 spin_lock_irqsave(&mdp->lock, flags);
1678 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1679 spin_unlock_irqrestore(&mdp->lock, flags);
1684 static int sh_eth_set_settings(struct net_device *ndev,
1685 struct ethtool_cmd *ecmd)
1687 struct sh_eth_private *mdp = netdev_priv(ndev);
1688 unsigned long flags;
1691 spin_lock_irqsave(&mdp->lock, flags);
1693 /* disable tx and rx */
1694 sh_eth_rcv_snd_disable(ndev);
1696 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1700 if (ecmd->duplex == DUPLEX_FULL)
1705 if (mdp->cd->set_duplex)
1706 mdp->cd->set_duplex(ndev);
1711 /* enable tx and rx */
1712 sh_eth_rcv_snd_enable(ndev);
1714 spin_unlock_irqrestore(&mdp->lock, flags);
1719 static int sh_eth_nway_reset(struct net_device *ndev)
1721 struct sh_eth_private *mdp = netdev_priv(ndev);
1722 unsigned long flags;
1725 spin_lock_irqsave(&mdp->lock, flags);
1726 ret = phy_start_aneg(mdp->phydev);
1727 spin_unlock_irqrestore(&mdp->lock, flags);
1732 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1734 struct sh_eth_private *mdp = netdev_priv(ndev);
1735 return mdp->msg_enable;
1738 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1740 struct sh_eth_private *mdp = netdev_priv(ndev);
1741 mdp->msg_enable = value;
1744 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1745 "rx_current", "tx_current",
1746 "rx_dirty", "tx_dirty",
1748 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1750 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1754 return SH_ETH_STATS_LEN;
1760 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1761 struct ethtool_stats *stats, u64 *data)
1763 struct sh_eth_private *mdp = netdev_priv(ndev);
1766 /* device-specific stats */
1767 data[i++] = mdp->cur_rx;
1768 data[i++] = mdp->cur_tx;
1769 data[i++] = mdp->dirty_rx;
1770 data[i++] = mdp->dirty_tx;
1773 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1775 switch (stringset) {
1777 memcpy(data, *sh_eth_gstrings_stats,
1778 sizeof(sh_eth_gstrings_stats));
1783 static void sh_eth_get_ringparam(struct net_device *ndev,
1784 struct ethtool_ringparam *ring)
1786 struct sh_eth_private *mdp = netdev_priv(ndev);
1788 ring->rx_max_pending = RX_RING_MAX;
1789 ring->tx_max_pending = TX_RING_MAX;
1790 ring->rx_pending = mdp->num_rx_ring;
1791 ring->tx_pending = mdp->num_tx_ring;
1794 static int sh_eth_set_ringparam(struct net_device *ndev,
1795 struct ethtool_ringparam *ring)
1797 struct sh_eth_private *mdp = netdev_priv(ndev);
1800 if (ring->tx_pending > TX_RING_MAX ||
1801 ring->rx_pending > RX_RING_MAX ||
1802 ring->tx_pending < TX_RING_MIN ||
1803 ring->rx_pending < RX_RING_MIN)
1805 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1808 if (netif_running(ndev)) {
1809 netif_tx_disable(ndev);
1810 /* Disable interrupts by clearing the interrupt mask. */
1811 sh_eth_write(ndev, 0x0000, EESIPR);
1812 /* Stop the chip's Tx and Rx processes. */
1813 sh_eth_write(ndev, 0, EDTRR);
1814 sh_eth_write(ndev, 0, EDRRR);
1815 synchronize_irq(ndev->irq);
1818 /* Free all the skbuffs in the Rx queue. */
1819 sh_eth_ring_free(ndev);
1820 /* Free DMA buffer */
1821 sh_eth_free_dma_buffer(mdp);
1823 /* Set new parameters */
1824 mdp->num_rx_ring = ring->rx_pending;
1825 mdp->num_tx_ring = ring->tx_pending;
1827 ret = sh_eth_ring_init(ndev);
1829 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1832 ret = sh_eth_dev_init(ndev, false);
1834 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1838 if (netif_running(ndev)) {
1839 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1840 /* Setting the Rx mode will start the Rx process. */
1841 sh_eth_write(ndev, EDRRR_R, EDRRR);
1842 netif_wake_queue(ndev);
1848 static const struct ethtool_ops sh_eth_ethtool_ops = {
1849 .get_settings = sh_eth_get_settings,
1850 .set_settings = sh_eth_set_settings,
1851 .nway_reset = sh_eth_nway_reset,
1852 .get_msglevel = sh_eth_get_msglevel,
1853 .set_msglevel = sh_eth_set_msglevel,
1854 .get_link = ethtool_op_get_link,
1855 .get_strings = sh_eth_get_strings,
1856 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1857 .get_sset_count = sh_eth_get_sset_count,
1858 .get_ringparam = sh_eth_get_ringparam,
1859 .set_ringparam = sh_eth_set_ringparam,
1862 /* network device open function */
1863 static int sh_eth_open(struct net_device *ndev)
1866 struct sh_eth_private *mdp = netdev_priv(ndev);
1868 pm_runtime_get_sync(&mdp->pdev->dev);
1870 ret = request_irq(ndev->irq, sh_eth_interrupt,
1871 mdp->cd->irq_flags, ndev->name, ndev);
1873 dev_err(&ndev->dev, "Can not assign IRQ number\n");
1877 /* Descriptor set */
1878 ret = sh_eth_ring_init(ndev);
1883 ret = sh_eth_dev_init(ndev, true);
1887 /* PHY control start*/
1888 ret = sh_eth_phy_start(ndev);
1892 napi_enable(&mdp->napi);
1897 free_irq(ndev->irq, ndev);
1898 pm_runtime_put_sync(&mdp->pdev->dev);
1902 /* Timeout function */
1903 static void sh_eth_tx_timeout(struct net_device *ndev)
1905 struct sh_eth_private *mdp = netdev_priv(ndev);
1906 struct sh_eth_rxdesc *rxdesc;
1909 netif_stop_queue(ndev);
1911 if (netif_msg_timer(mdp))
1912 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1913 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1915 /* tx_errors count up */
1916 ndev->stats.tx_errors++;
1918 /* Free all the skbuffs in the Rx queue. */
1919 for (i = 0; i < mdp->num_rx_ring; i++) {
1920 rxdesc = &mdp->rx_ring[i];
1922 rxdesc->addr = 0xBADF00D0;
1923 if (mdp->rx_skbuff[i])
1924 dev_kfree_skb(mdp->rx_skbuff[i]);
1925 mdp->rx_skbuff[i] = NULL;
1927 for (i = 0; i < mdp->num_tx_ring; i++) {
1928 if (mdp->tx_skbuff[i])
1929 dev_kfree_skb(mdp->tx_skbuff[i]);
1930 mdp->tx_skbuff[i] = NULL;
1934 sh_eth_dev_init(ndev, true);
1937 /* Packet transmit function */
1938 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1940 struct sh_eth_private *mdp = netdev_priv(ndev);
1941 struct sh_eth_txdesc *txdesc;
1943 unsigned long flags;
1945 spin_lock_irqsave(&mdp->lock, flags);
1946 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
1947 if (!sh_eth_txfree(ndev)) {
1948 if (netif_msg_tx_queued(mdp))
1949 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1950 netif_stop_queue(ndev);
1951 spin_unlock_irqrestore(&mdp->lock, flags);
1952 return NETDEV_TX_BUSY;
1955 spin_unlock_irqrestore(&mdp->lock, flags);
1957 entry = mdp->cur_tx % mdp->num_tx_ring;
1958 mdp->tx_skbuff[entry] = skb;
1959 txdesc = &mdp->tx_ring[entry];
1961 if (!mdp->cd->hw_swap)
1962 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1964 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1966 if (skb->len < ETHERSMALL)
1967 txdesc->buffer_length = ETHERSMALL;
1969 txdesc->buffer_length = skb->len;
1971 if (entry >= mdp->num_tx_ring - 1)
1972 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1974 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1978 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1979 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1981 return NETDEV_TX_OK;
1984 /* device close function */
1985 static int sh_eth_close(struct net_device *ndev)
1987 struct sh_eth_private *mdp = netdev_priv(ndev);
1989 napi_disable(&mdp->napi);
1991 netif_stop_queue(ndev);
1993 /* Disable interrupts by clearing the interrupt mask. */
1994 sh_eth_write(ndev, 0x0000, EESIPR);
1996 /* Stop the chip's Tx and Rx processes. */
1997 sh_eth_write(ndev, 0, EDTRR);
1998 sh_eth_write(ndev, 0, EDRRR);
2000 /* PHY Disconnect */
2002 phy_stop(mdp->phydev);
2003 phy_disconnect(mdp->phydev);
2006 free_irq(ndev->irq, ndev);
2008 /* Free all the skbuffs in the Rx queue. */
2009 sh_eth_ring_free(ndev);
2011 /* free DMA buffer */
2012 sh_eth_free_dma_buffer(mdp);
2014 pm_runtime_put_sync(&mdp->pdev->dev);
2019 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2021 struct sh_eth_private *mdp = netdev_priv(ndev);
2023 pm_runtime_get_sync(&mdp->pdev->dev);
2025 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2026 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2027 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2028 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2029 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2030 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2031 if (sh_eth_is_gether(mdp)) {
2032 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2033 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2034 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2035 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2037 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2038 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2040 pm_runtime_put_sync(&mdp->pdev->dev);
2042 return &ndev->stats;
2045 /* ioctl to device function */
2046 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
2049 struct sh_eth_private *mdp = netdev_priv(ndev);
2050 struct phy_device *phydev = mdp->phydev;
2052 if (!netif_running(ndev))
2058 return phy_mii_ioctl(phydev, rq, cmd);
2061 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2062 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2065 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2068 static u32 sh_eth_tsu_get_post_mask(int entry)
2070 return 0x0f << (28 - ((entry % 8) * 4));
2073 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2075 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2078 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2081 struct sh_eth_private *mdp = netdev_priv(ndev);
2085 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2086 tmp = ioread32(reg_offset);
2087 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2090 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2093 struct sh_eth_private *mdp = netdev_priv(ndev);
2094 u32 post_mask, ref_mask, tmp;
2097 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2098 post_mask = sh_eth_tsu_get_post_mask(entry);
2099 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2101 tmp = ioread32(reg_offset);
2102 iowrite32(tmp & ~post_mask, reg_offset);
2104 /* If other port enables, the function returns "true" */
2105 return tmp & ref_mask;
2108 static int sh_eth_tsu_busy(struct net_device *ndev)
2110 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2111 struct sh_eth_private *mdp = netdev_priv(ndev);
2113 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2117 dev_err(&ndev->dev, "%s: timeout\n", __func__);
2125 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2130 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2131 iowrite32(val, reg);
2132 if (sh_eth_tsu_busy(ndev) < 0)
2135 val = addr[4] << 8 | addr[5];
2136 iowrite32(val, reg + 4);
2137 if (sh_eth_tsu_busy(ndev) < 0)
2143 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2147 val = ioread32(reg);
2148 addr[0] = (val >> 24) & 0xff;
2149 addr[1] = (val >> 16) & 0xff;
2150 addr[2] = (val >> 8) & 0xff;
2151 addr[3] = val & 0xff;
2152 val = ioread32(reg + 4);
2153 addr[4] = (val >> 8) & 0xff;
2154 addr[5] = val & 0xff;
2158 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2160 struct sh_eth_private *mdp = netdev_priv(ndev);
2161 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2163 u8 c_addr[ETH_ALEN];
2165 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2166 sh_eth_tsu_read_entry(reg_offset, c_addr);
2167 if (memcmp(addr, c_addr, ETH_ALEN) == 0)
2174 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2179 memset(blank, 0, sizeof(blank));
2180 entry = sh_eth_tsu_find_entry(ndev, blank);
2181 return (entry < 0) ? -ENOMEM : entry;
2184 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2187 struct sh_eth_private *mdp = netdev_priv(ndev);
2188 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2192 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2193 ~(1 << (31 - entry)), TSU_TEN);
2195 memset(blank, 0, sizeof(blank));
2196 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2202 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2204 struct sh_eth_private *mdp = netdev_priv(ndev);
2205 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2211 i = sh_eth_tsu_find_entry(ndev, addr);
2213 /* No entry found, create one */
2214 i = sh_eth_tsu_find_empty(ndev);
2217 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2221 /* Enable the entry */
2222 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2223 (1 << (31 - i)), TSU_TEN);
2226 /* Entry found or created, enable POST */
2227 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2232 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2234 struct sh_eth_private *mdp = netdev_priv(ndev);
2240 i = sh_eth_tsu_find_entry(ndev, addr);
2243 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2246 /* Disable the entry if both ports was disabled */
2247 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2255 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2257 struct sh_eth_private *mdp = netdev_priv(ndev);
2260 if (unlikely(!mdp->cd->tsu))
2263 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2264 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2267 /* Disable the entry if both ports was disabled */
2268 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2276 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2278 struct sh_eth_private *mdp = netdev_priv(ndev);
2280 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2283 if (unlikely(!mdp->cd->tsu))
2286 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2287 sh_eth_tsu_read_entry(reg_offset, addr);
2288 if (is_multicast_ether_addr(addr))
2289 sh_eth_tsu_del_entry(ndev, addr);
2293 /* Multicast reception directions set */
2294 static void sh_eth_set_multicast_list(struct net_device *ndev)
2296 struct sh_eth_private *mdp = netdev_priv(ndev);
2299 unsigned long flags;
2301 spin_lock_irqsave(&mdp->lock, flags);
2303 * Initial condition is MCT = 1, PRM = 0.
2304 * Depending on ndev->flags, set PRM or clear MCT
2306 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2308 if (!(ndev->flags & IFF_MULTICAST)) {
2309 sh_eth_tsu_purge_mcast(ndev);
2312 if (ndev->flags & IFF_ALLMULTI) {
2313 sh_eth_tsu_purge_mcast(ndev);
2314 ecmr_bits &= ~ECMR_MCT;
2318 if (ndev->flags & IFF_PROMISC) {
2319 sh_eth_tsu_purge_all(ndev);
2320 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2321 } else if (mdp->cd->tsu) {
2322 struct netdev_hw_addr *ha;
2323 netdev_for_each_mc_addr(ha, ndev) {
2324 if (mcast_all && is_multicast_ether_addr(ha->addr))
2327 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2329 sh_eth_tsu_purge_mcast(ndev);
2330 ecmr_bits &= ~ECMR_MCT;
2336 /* Normal, unicast/broadcast-only mode. */
2337 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2340 /* update the ethernet mode */
2341 sh_eth_write(ndev, ecmr_bits, ECMR);
2343 spin_unlock_irqrestore(&mdp->lock, flags);
2346 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2354 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2355 __be16 proto, u16 vid)
2357 struct sh_eth_private *mdp = netdev_priv(ndev);
2358 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2360 if (unlikely(!mdp->cd->tsu))
2363 /* No filtering if vid = 0 */
2367 mdp->vlan_num_ids++;
2370 * The controller has one VLAN tag HW filter. So, if the filter is
2371 * already enabled, the driver disables it and the filte
2373 if (mdp->vlan_num_ids > 1) {
2374 /* disable VLAN filter */
2375 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2379 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2385 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2386 __be16 proto, u16 vid)
2388 struct sh_eth_private *mdp = netdev_priv(ndev);
2389 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2391 if (unlikely(!mdp->cd->tsu))
2394 /* No filtering if vid = 0 */
2398 mdp->vlan_num_ids--;
2399 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2404 /* SuperH's TSU register init function */
2405 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2407 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2408 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2409 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2410 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2411 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2412 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2413 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2414 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2415 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2416 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2417 if (sh_eth_is_gether(mdp)) {
2418 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2419 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2421 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2422 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2424 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2425 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2426 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2427 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2428 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2429 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2430 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
2433 /* MDIO bus release function */
2434 static int sh_mdio_release(struct net_device *ndev)
2436 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2438 /* unregister mdio bus */
2439 mdiobus_unregister(bus);
2441 /* remove mdio bus info from net_device */
2442 dev_set_drvdata(&ndev->dev, NULL);
2444 /* free bitbang info */
2445 free_mdio_bitbang(bus);
2450 /* MDIO bus init function */
2451 static int sh_mdio_init(struct net_device *ndev, int id,
2452 struct sh_eth_plat_data *pd)
2455 struct bb_info *bitbang;
2456 struct sh_eth_private *mdp = netdev_priv(ndev);
2458 /* create bit control struct for PHY */
2459 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2467 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2468 bitbang->set_gate = pd->set_mdio_gate;
2469 bitbang->mdi_msk = PIR_MDI;
2470 bitbang->mdo_msk = PIR_MDO;
2471 bitbang->mmd_msk = PIR_MMD;
2472 bitbang->mdc_msk = PIR_MDC;
2473 bitbang->ctrl.ops = &bb_ops;
2475 /* MII controller setting */
2476 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2477 if (!mdp->mii_bus) {
2482 /* Hook up MII support for ethtool */
2483 mdp->mii_bus->name = "sh_mii";
2484 mdp->mii_bus->parent = &ndev->dev;
2485 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2486 mdp->pdev->name, id);
2489 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2490 sizeof(int) * PHY_MAX_ADDR,
2492 if (!mdp->mii_bus->irq) {
2497 for (i = 0; i < PHY_MAX_ADDR; i++)
2498 mdp->mii_bus->irq[i] = PHY_POLL;
2500 /* register mdio bus */
2501 ret = mdiobus_register(mdp->mii_bus);
2505 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2510 free_mdio_bitbang(mdp->mii_bus);
2516 static const u16 *sh_eth_get_register_offset(int register_type)
2518 const u16 *reg_offset = NULL;
2520 switch (register_type) {
2521 case SH_ETH_REG_GIGABIT:
2522 reg_offset = sh_eth_offset_gigabit;
2524 case SH_ETH_REG_FAST_RCAR:
2525 reg_offset = sh_eth_offset_fast_rcar;
2527 case SH_ETH_REG_FAST_SH4:
2528 reg_offset = sh_eth_offset_fast_sh4;
2530 case SH_ETH_REG_FAST_SH3_SH2:
2531 reg_offset = sh_eth_offset_fast_sh3_sh2;
2534 pr_err("Unknown register type (%d)\n", register_type);
2541 static const struct net_device_ops sh_eth_netdev_ops = {
2542 .ndo_open = sh_eth_open,
2543 .ndo_stop = sh_eth_close,
2544 .ndo_start_xmit = sh_eth_start_xmit,
2545 .ndo_get_stats = sh_eth_get_stats,
2546 .ndo_tx_timeout = sh_eth_tx_timeout,
2547 .ndo_do_ioctl = sh_eth_do_ioctl,
2548 .ndo_validate_addr = eth_validate_addr,
2549 .ndo_set_mac_address = eth_mac_addr,
2550 .ndo_change_mtu = eth_change_mtu,
2553 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2554 .ndo_open = sh_eth_open,
2555 .ndo_stop = sh_eth_close,
2556 .ndo_start_xmit = sh_eth_start_xmit,
2557 .ndo_get_stats = sh_eth_get_stats,
2558 .ndo_set_rx_mode = sh_eth_set_multicast_list,
2559 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2560 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2561 .ndo_tx_timeout = sh_eth_tx_timeout,
2562 .ndo_do_ioctl = sh_eth_do_ioctl,
2563 .ndo_validate_addr = eth_validate_addr,
2564 .ndo_set_mac_address = eth_mac_addr,
2565 .ndo_change_mtu = eth_change_mtu,
2568 static int sh_eth_drv_probe(struct platform_device *pdev)
2571 struct resource *res;
2572 struct net_device *ndev = NULL;
2573 struct sh_eth_private *mdp = NULL;
2574 struct sh_eth_plat_data *pd = pdev->dev.platform_data;
2575 const struct platform_device_id *id = platform_get_device_id(pdev);
2578 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2579 if (unlikely(res == NULL)) {
2580 dev_err(&pdev->dev, "invalid resource\n");
2585 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2591 /* The sh Ether-specific entries in the device structure. */
2592 ndev->base_addr = res->start;
2598 ret = platform_get_irq(pdev, 0);
2605 SET_NETDEV_DEV(ndev, &pdev->dev);
2607 /* Fill in the fields of the device structure with ethernet values. */
2610 mdp = netdev_priv(ndev);
2611 mdp->num_tx_ring = TX_RING_SIZE;
2612 mdp->num_rx_ring = RX_RING_SIZE;
2613 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2614 if (IS_ERR(mdp->addr)) {
2615 ret = PTR_ERR(mdp->addr);
2619 spin_lock_init(&mdp->lock);
2621 pm_runtime_enable(&pdev->dev);
2622 pm_runtime_resume(&pdev->dev);
2625 mdp->phy_id = pd->phy;
2626 mdp->phy_interface = pd->phy_interface;
2628 mdp->edmac_endian = pd->edmac_endian;
2629 mdp->no_ether_link = pd->no_ether_link;
2630 mdp->ether_link_active_low = pd->ether_link_active_low;
2631 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2634 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2635 sh_eth_set_default_cpu_data(mdp->cd);
2639 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2641 ndev->netdev_ops = &sh_eth_netdev_ops;
2642 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2643 ndev->watchdog_timeo = TX_TIMEOUT;
2645 /* debug message level */
2646 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2648 /* read and set MAC address */
2649 read_mac_address(ndev, pd->mac_addr);
2650 if (!is_valid_ether_addr(ndev->dev_addr)) {
2651 dev_warn(&pdev->dev,
2652 "no valid MAC address supplied, using a random one.\n");
2653 eth_hw_addr_random(ndev);
2656 /* ioremap the TSU registers */
2658 struct resource *rtsu;
2659 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2660 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2661 if (IS_ERR(mdp->tsu_addr)) {
2662 ret = PTR_ERR(mdp->tsu_addr);
2665 mdp->port = devno % 2;
2666 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2669 /* initialize first or needed device */
2670 if (!devno || pd->needs_init) {
2671 if (mdp->cd->chip_reset)
2672 mdp->cd->chip_reset(ndev);
2675 /* TSU init (Init only)*/
2676 sh_eth_tsu_init(mdp);
2680 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2682 /* network device register */
2683 ret = register_netdev(ndev);
2688 ret = sh_mdio_init(ndev, pdev->id, pd);
2690 goto out_unregister;
2692 /* print device information */
2693 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2694 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2696 platform_set_drvdata(pdev, ndev);
2701 unregister_netdev(ndev);
2704 netif_napi_del(&mdp->napi);
2715 static int sh_eth_drv_remove(struct platform_device *pdev)
2717 struct net_device *ndev = platform_get_drvdata(pdev);
2718 struct sh_eth_private *mdp = netdev_priv(ndev);
2720 sh_mdio_release(ndev);
2721 unregister_netdev(ndev);
2722 netif_napi_del(&mdp->napi);
2723 pm_runtime_disable(&pdev->dev);
2730 static int sh_eth_runtime_nop(struct device *dev)
2733 * Runtime PM callback shared between ->runtime_suspend()
2734 * and ->runtime_resume(). Simply returns success.
2736 * This driver re-initializes all registers after
2737 * pm_runtime_get_sync() anyway so there is no need
2738 * to save and restore registers here.
2743 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
2744 .runtime_suspend = sh_eth_runtime_nop,
2745 .runtime_resume = sh_eth_runtime_nop,
2747 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2749 #define SH_ETH_PM_OPS NULL
2752 static struct platform_device_id sh_eth_id_table[] = {
2753 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
2754 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
2755 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
2756 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
2757 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2758 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
2759 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2760 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2761 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2764 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2766 static struct platform_driver sh_eth_driver = {
2767 .probe = sh_eth_drv_probe,
2768 .remove = sh_eth_drv_remove,
2769 .id_table = sh_eth_id_table,
2772 .pm = SH_ETH_PM_OPS,
2776 module_platform_driver(sh_eth_driver);
2778 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2779 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2780 MODULE_LICENSE("GPL v2");