2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/phy.h>
118 #include <linux/mdio.h>
119 #include <linux/clk.h>
120 #include <linux/bitrev.h>
121 #include <linux/crc32.h>
124 #include "xgbe-common.h"
126 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
128 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
131 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
137 DBGPR("-->xgbe_usec_to_riwt\n");
139 rate = pdata->sysclk_rate;
142 * Convert the input usec value to the watchdog timer value. Each
143 * watchdog timer value is equivalent to 256 clock cycles.
144 * Calculate the required value as:
145 * ( usec * ( system_clock_mhz / 10^6 ) / 256
147 ret = (usec * (rate / 1000000)) / 256;
149 DBGPR("<--xgbe_usec_to_riwt\n");
154 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
160 DBGPR("-->xgbe_riwt_to_usec\n");
162 rate = pdata->sysclk_rate;
165 * Convert the input watchdog timer value to the usec value. Each
166 * watchdog timer value is equivalent to 256 clock cycles.
167 * Calculate the required value as:
168 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
170 ret = (riwt * 256) / (rate / 1000000);
172 DBGPR("<--xgbe_riwt_to_usec\n");
177 static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
179 unsigned int pblx8, pbl;
182 pblx8 = DMA_PBL_X8_DISABLE;
185 if (pdata->pbl > 32) {
186 pblx8 = DMA_PBL_X8_ENABLE;
190 for (i = 0; i < pdata->channel_count; i++) {
191 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
194 if (pdata->channel[i]->tx_ring)
195 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
198 if (pdata->channel[i]->rx_ring)
199 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
206 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
210 for (i = 0; i < pdata->channel_count; i++) {
211 if (!pdata->channel[i]->tx_ring)
214 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
221 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
225 for (i = 0; i < pdata->rx_q_count; i++)
226 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
231 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
235 for (i = 0; i < pdata->tx_q_count; i++)
236 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
241 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
246 for (i = 0; i < pdata->rx_q_count; i++)
247 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
252 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
257 for (i = 0; i < pdata->tx_q_count; i++)
258 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
263 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
267 for (i = 0; i < pdata->channel_count; i++) {
268 if (!pdata->channel[i]->rx_ring)
271 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
278 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
283 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
287 for (i = 0; i < pdata->channel_count; i++) {
288 if (!pdata->channel[i]->rx_ring)
291 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
296 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
300 for (i = 0; i < pdata->channel_count; i++) {
301 if (!pdata->channel[i]->tx_ring)
304 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
308 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
312 for (i = 0; i < pdata->channel_count; i++) {
313 if (!pdata->channel[i]->rx_ring)
316 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
319 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
322 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
323 unsigned int index, unsigned int val)
328 mutex_lock(&pdata->rss_mutex);
330 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
335 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
337 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
338 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
339 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
340 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
344 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
347 usleep_range(1000, 1500);
353 mutex_unlock(&pdata->rss_mutex);
358 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
360 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
361 unsigned int *key = (unsigned int *)&pdata->rss_key;
365 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
374 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
379 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
380 ret = xgbe_write_rss_reg(pdata,
381 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
382 pdata->rss_table[i]);
390 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
392 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
394 return xgbe_write_rss_hash_key(pdata);
397 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
402 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
403 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
405 return xgbe_write_rss_lookup_table(pdata);
408 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
412 if (!pdata->hw_feat.rss)
415 /* Program the hash key */
416 ret = xgbe_write_rss_hash_key(pdata);
420 /* Program the lookup table */
421 ret = xgbe_write_rss_lookup_table(pdata);
425 /* Set the RSS options */
426 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
429 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
434 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
436 if (!pdata->hw_feat.rss)
439 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
444 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
448 if (!pdata->hw_feat.rss)
451 if (pdata->netdev->features & NETIF_F_RXHASH)
452 ret = xgbe_enable_rss(pdata);
454 ret = xgbe_disable_rss(pdata);
457 netdev_err(pdata->netdev,
458 "error configuring RSS, RSS disabled\n");
461 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
464 unsigned int prio, tc;
466 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
467 /* Does this queue handle the priority? */
468 if (pdata->prio2q_map[prio] != queue)
471 /* Get the Traffic Class for this priority */
472 tc = pdata->ets->prio_tc[prio];
474 /* Check if PFC is enabled for this traffic class */
475 if (pdata->pfc->pfc_en & (1 << tc))
482 static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata)
484 /* Program the VXLAN port */
485 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port);
487 netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n",
491 static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata)
493 if (!pdata->hw_feat.vxn)
496 /* Program the VXLAN port */
497 xgbe_set_vxlan_id(pdata);
499 /* Allow for IPv6/UDP zero-checksum VXLAN packets */
500 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1);
502 /* Enable VXLAN tunneling mode */
503 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0);
504 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1);
506 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n");
509 static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
511 if (!pdata->hw_feat.vxn)
514 /* Disable tunneling mode */
515 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0);
517 /* Clear IPv6/UDP zero-checksum VXLAN packets setting */
518 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0);
520 /* Clear the VXLAN port */
521 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0);
523 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
526 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
528 unsigned int max_q_count, q_count;
529 unsigned int reg, reg_val;
532 /* Clear MTL flow control */
533 for (i = 0; i < pdata->rx_q_count; i++)
534 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
536 /* Clear MAC flow control */
537 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
538 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
540 for (i = 0; i < q_count; i++) {
541 reg_val = XGMAC_IOREAD(pdata, reg);
542 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
543 XGMAC_IOWRITE(pdata, reg, reg_val);
545 reg += MAC_QTFCR_INC;
551 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
553 struct ieee_pfc *pfc = pdata->pfc;
554 struct ieee_ets *ets = pdata->ets;
555 unsigned int max_q_count, q_count;
556 unsigned int reg, reg_val;
559 /* Set MTL flow control */
560 for (i = 0; i < pdata->rx_q_count; i++) {
561 unsigned int ehfc = 0;
563 if (pdata->rx_rfd[i]) {
564 /* Flow control thresholds are established */
566 if (xgbe_is_pfc_queue(pdata, i))
573 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
575 netif_dbg(pdata, drv, pdata->netdev,
576 "flow control %s for RXq%u\n",
577 ehfc ? "enabled" : "disabled", i);
580 /* Set MAC flow control */
581 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
582 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
584 for (i = 0; i < q_count; i++) {
585 reg_val = XGMAC_IOREAD(pdata, reg);
587 /* Enable transmit flow control */
588 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
590 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
592 XGMAC_IOWRITE(pdata, reg, reg_val);
594 reg += MAC_QTFCR_INC;
600 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
602 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
607 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
609 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
614 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
616 struct ieee_pfc *pfc = pdata->pfc;
618 if (pdata->tx_pause || (pfc && pfc->pfc_en))
619 xgbe_enable_tx_flow_control(pdata);
621 xgbe_disable_tx_flow_control(pdata);
626 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
628 struct ieee_pfc *pfc = pdata->pfc;
630 if (pdata->rx_pause || (pfc && pfc->pfc_en))
631 xgbe_enable_rx_flow_control(pdata);
633 xgbe_disable_rx_flow_control(pdata);
638 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
640 struct ieee_pfc *pfc = pdata->pfc;
642 xgbe_config_tx_flow_control(pdata);
643 xgbe_config_rx_flow_control(pdata);
645 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
646 (pfc && pfc->pfc_en) ? 1 : 0);
649 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
651 struct xgbe_channel *channel;
654 /* Set the interrupt mode if supported */
655 if (pdata->channel_irq_mode)
656 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
657 pdata->channel_irq_mode);
659 for (i = 0; i < pdata->channel_count; i++) {
660 channel = pdata->channel[i];
662 /* Clear all the interrupts which are set */
663 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
664 XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
666 /* Clear all interrupt enable bits */
667 channel->curr_ier = 0;
669 /* Enable following interrupts
670 * NIE - Normal Interrupt Summary Enable
671 * AIE - Abnormal Interrupt Summary Enable
672 * FBEE - Fatal Bus Error Enable
674 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
675 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
676 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
678 if (channel->tx_ring) {
679 /* Enable the following Tx interrupts
680 * TIE - Transmit Interrupt Enable (unless using
681 * per channel interrupts in edge triggered
684 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
685 XGMAC_SET_BITS(channel->curr_ier,
688 if (channel->rx_ring) {
689 /* Enable following Rx interrupts
690 * RBUE - Receive Buffer Unavailable Enable
691 * RIE - Receive Interrupt Enable (unless using
692 * per channel interrupts in edge triggered
695 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
696 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
697 XGMAC_SET_BITS(channel->curr_ier,
701 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
705 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
707 unsigned int mtl_q_isr;
708 unsigned int q_count, i;
710 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
711 for (i = 0; i < q_count; i++) {
712 /* Clear all the interrupts which are set */
713 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
714 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
716 /* No MTL interrupts to be enabled */
717 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
721 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
723 unsigned int mac_ier = 0;
725 /* Enable Timestamp interrupt */
726 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
728 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
730 /* Enable all counter interrupts */
731 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
732 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
734 /* Enable MDIO single command completion interrupt */
735 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
738 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
740 unsigned int ecc_isr, ecc_ier = 0;
742 if (!pdata->vdata->ecc_support)
745 /* Clear all the interrupts which are set */
746 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
747 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
749 /* Enable ECC interrupts */
750 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
751 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
752 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
753 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
754 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
755 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
757 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
760 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
762 unsigned int ecc_ier;
764 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
766 /* Disable ECC DED interrupts */
767 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
768 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
769 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
771 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
774 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
775 enum xgbe_ecc_sec sec)
777 unsigned int ecc_ier;
779 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
781 /* Disable ECC SEC interrupt */
783 case XGBE_ECC_SEC_TX:
784 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
786 case XGBE_ECC_SEC_RX:
787 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
789 case XGBE_ECC_SEC_DESC:
790 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
794 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
797 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
815 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
816 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
821 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
823 /* Put the VLAN tag in the Rx descriptor */
824 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
826 /* Don't check the VLAN type */
827 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
829 /* Check only C-TAG (0x8100) packets */
830 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
832 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
833 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
835 /* Enable VLAN tag stripping */
836 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
841 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
843 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
848 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
850 /* Enable VLAN filtering */
851 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
853 /* Enable VLAN Hash Table filtering */
854 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
856 /* Disable VLAN tag inverse matching */
857 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
859 /* Only filter on the lower 12-bits of the VLAN tag */
860 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
862 /* In order for the VLAN Hash Table filtering to be effective,
863 * the VLAN tag identifier in the VLAN Tag Register must not
864 * be zero. Set the VLAN tag identifier to "1" to enable the
865 * VLAN Hash Table filtering. This implies that a VLAN tag of
866 * 1 will always pass filtering.
868 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
873 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
875 /* Disable VLAN filtering */
876 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
881 static u32 xgbe_vid_crc32_le(__le16 vid_le)
883 u32 poly = 0xedb88320; /* CRCPOLY_LE */
886 unsigned char *data = (unsigned char *)&vid_le;
887 unsigned char data_byte = 0;
890 bits = get_bitmask_order(VLAN_VID_MASK);
891 for (i = 0; i < bits; i++) {
893 data_byte = data[i / 8];
895 temp = ((crc & 1) ^ data_byte) & 1;
906 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
911 u16 vlan_hash_table = 0;
913 /* Generate the VLAN Hash Table value */
914 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
915 /* Get the CRC32 value of the VLAN ID */
916 vid_le = cpu_to_le16(vid);
917 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
919 vlan_hash_table |= (1 << crc);
922 /* Set the VLAN Hash Table filtering register */
923 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
928 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
931 unsigned int val = enable ? 1 : 0;
933 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
936 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
937 enable ? "entering" : "leaving");
938 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
940 /* Hardware will still perform VLAN filtering in promiscuous mode */
942 xgbe_disable_rx_vlan_filtering(pdata);
944 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
945 xgbe_enable_rx_vlan_filtering(pdata);
951 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
954 unsigned int val = enable ? 1 : 0;
956 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
959 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
960 enable ? "entering" : "leaving");
961 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
966 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
967 struct netdev_hw_addr *ha, unsigned int *mac_reg)
969 unsigned int mac_addr_hi, mac_addr_lo;
976 mac_addr = (u8 *)&mac_addr_lo;
977 mac_addr[0] = ha->addr[0];
978 mac_addr[1] = ha->addr[1];
979 mac_addr[2] = ha->addr[2];
980 mac_addr[3] = ha->addr[3];
981 mac_addr = (u8 *)&mac_addr_hi;
982 mac_addr[0] = ha->addr[4];
983 mac_addr[1] = ha->addr[5];
985 netif_dbg(pdata, drv, pdata->netdev,
986 "adding mac address %pM at %#x\n",
989 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
992 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
993 *mac_reg += MAC_MACA_INC;
994 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
995 *mac_reg += MAC_MACA_INC;
998 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
1000 struct net_device *netdev = pdata->netdev;
1001 struct netdev_hw_addr *ha;
1002 unsigned int mac_reg;
1003 unsigned int addn_macs;
1005 mac_reg = MAC_MACA1HR;
1006 addn_macs = pdata->hw_feat.addn_mac;
1008 if (netdev_uc_count(netdev) > addn_macs) {
1009 xgbe_set_promiscuous_mode(pdata, 1);
1011 netdev_for_each_uc_addr(ha, netdev) {
1012 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1016 if (netdev_mc_count(netdev) > addn_macs) {
1017 xgbe_set_all_multicast_mode(pdata, 1);
1019 netdev_for_each_mc_addr(ha, netdev) {
1020 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1026 /* Clear remaining additional MAC address entries */
1028 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
1031 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
1033 struct net_device *netdev = pdata->netdev;
1034 struct netdev_hw_addr *ha;
1035 unsigned int hash_reg;
1036 unsigned int hash_table_shift, hash_table_count;
1037 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
1041 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
1042 hash_table_count = pdata->hw_feat.hash_table_size / 32;
1043 memset(hash_table, 0, sizeof(hash_table));
1045 /* Build the MAC Hash Table register values */
1046 netdev_for_each_uc_addr(ha, netdev) {
1047 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1048 crc >>= hash_table_shift;
1049 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1052 netdev_for_each_mc_addr(ha, netdev) {
1053 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1054 crc >>= hash_table_shift;
1055 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1058 /* Set the MAC Hash Table registers */
1059 hash_reg = MAC_HTR0;
1060 for (i = 0; i < hash_table_count; i++) {
1061 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
1062 hash_reg += MAC_HTR_INC;
1066 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
1068 if (pdata->hw_feat.hash_table_size)
1069 xgbe_set_mac_hash_table(pdata);
1071 xgbe_set_mac_addn_addrs(pdata);
1076 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
1078 unsigned int mac_addr_hi, mac_addr_lo;
1080 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
1081 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1082 (addr[1] << 8) | (addr[0] << 0);
1084 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1085 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1090 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
1092 struct net_device *netdev = pdata->netdev;
1093 unsigned int pr_mode, am_mode;
1095 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1096 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1098 xgbe_set_promiscuous_mode(pdata, pr_mode);
1099 xgbe_set_all_multicast_mode(pdata, am_mode);
1101 xgbe_add_mac_addresses(pdata);
1106 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1113 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1115 reg &= ~(1 << (gpio + 16));
1116 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1121 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1128 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1130 reg |= (1 << (gpio + 16));
1131 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1136 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1139 unsigned long flags;
1140 unsigned int mmd_address, index, offset;
1143 if (mmd_reg & MII_ADDR_C45)
1144 mmd_address = mmd_reg & ~MII_ADDR_C45;
1146 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1148 /* The PCS registers are accessed using mmio. The underlying
1149 * management interface uses indirect addressing to access the MMD
1150 * register sets. This requires accessing of the PCS register in two
1151 * phases, an address phase and a data phase.
1153 * The mmio interface is based on 16-bit offsets and values. All
1154 * register offsets must therefore be adjusted by left shifting the
1155 * offset 1 bit and reading 16 bits of data.
1158 index = mmd_address & ~pdata->xpcs_window_mask;
1159 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1161 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1162 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1163 mmd_data = XPCS16_IOREAD(pdata, offset);
1164 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1169 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1170 int mmd_reg, int mmd_data)
1172 unsigned long flags;
1173 unsigned int mmd_address, index, offset;
1175 if (mmd_reg & MII_ADDR_C45)
1176 mmd_address = mmd_reg & ~MII_ADDR_C45;
1178 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1180 /* The PCS registers are accessed using mmio. The underlying
1181 * management interface uses indirect addressing to access the MMD
1182 * register sets. This requires accessing of the PCS register in two
1183 * phases, an address phase and a data phase.
1185 * The mmio interface is based on 16-bit offsets and values. All
1186 * register offsets must therefore be adjusted by left shifting the
1187 * offset 1 bit and writing 16 bits of data.
1190 index = mmd_address & ~pdata->xpcs_window_mask;
1191 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1193 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1194 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1195 XPCS16_IOWRITE(pdata, offset, mmd_data);
1196 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1199 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1202 unsigned long flags;
1203 unsigned int mmd_address;
1206 if (mmd_reg & MII_ADDR_C45)
1207 mmd_address = mmd_reg & ~MII_ADDR_C45;
1209 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1211 /* The PCS registers are accessed using mmio. The underlying APB3
1212 * management interface uses indirect addressing to access the MMD
1213 * register sets. This requires accessing of the PCS register in two
1214 * phases, an address phase and a data phase.
1216 * The mmio interface is based on 32-bit offsets and values. All
1217 * register offsets must therefore be adjusted by left shifting the
1218 * offset 2 bits and reading 32 bits of data.
1220 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1221 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1222 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
1223 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1228 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1229 int mmd_reg, int mmd_data)
1231 unsigned int mmd_address;
1232 unsigned long flags;
1234 if (mmd_reg & MII_ADDR_C45)
1235 mmd_address = mmd_reg & ~MII_ADDR_C45;
1237 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1239 /* The PCS registers are accessed using mmio. The underlying APB3
1240 * management interface uses indirect addressing to access the MMD
1241 * register sets. This requires accessing of the PCS register in two
1242 * phases, an address phase and a data phase.
1244 * The mmio interface is based on 32-bit offsets and values. All
1245 * register offsets must therefore be adjusted by left shifting the
1246 * offset 2 bits and writing 32 bits of data.
1248 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1249 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1250 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1251 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1254 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1257 switch (pdata->vdata->xpcs_access) {
1258 case XGBE_XPCS_ACCESS_V1:
1259 return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
1261 case XGBE_XPCS_ACCESS_V2:
1263 return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
1267 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1268 int mmd_reg, int mmd_data)
1270 switch (pdata->vdata->xpcs_access) {
1271 case XGBE_XPCS_ACCESS_V1:
1272 return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
1274 case XGBE_XPCS_ACCESS_V2:
1276 return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
1280 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1283 unsigned int mdio_sca, mdio_sccd;
1285 reinit_completion(&pdata->mdio_complete);
1288 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1289 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1290 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1293 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
1294 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
1295 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1296 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1298 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1299 netdev_err(pdata->netdev, "mdio write operation timed out\n");
1306 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1309 unsigned int mdio_sca, mdio_sccd;
1311 reinit_completion(&pdata->mdio_complete);
1314 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1315 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1316 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1319 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1320 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1321 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1323 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1324 netdev_err(pdata->netdev, "mdio read operation timed out\n");
1328 return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
1331 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1332 enum xgbe_mdio_mode mode)
1334 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1337 case XGBE_MDIO_MODE_CL22:
1338 if (port > XGMAC_MAX_C22_PORT)
1340 reg_val |= (1 << port);
1342 case XGBE_MDIO_MODE_CL45:
1348 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1353 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1355 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
1358 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1360 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1365 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1367 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1372 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1374 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1376 /* Reset the Tx descriptor
1377 * Set buffer 1 (lo) address to zero
1378 * Set buffer 1 (hi) address to zero
1379 * Reset all other control bits (IC, TTSE, B2L & B1L)
1380 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1387 /* Make sure ownership is written to the descriptor */
1391 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1393 struct xgbe_ring *ring = channel->tx_ring;
1394 struct xgbe_ring_data *rdata;
1396 int start_index = ring->cur;
1398 DBGPR("-->tx_desc_init\n");
1400 /* Initialze all descriptors */
1401 for (i = 0; i < ring->rdesc_count; i++) {
1402 rdata = XGBE_GET_DESC_DATA(ring, i);
1404 /* Initialize Tx descriptor */
1405 xgbe_tx_desc_reset(rdata);
1408 /* Update the total number of Tx descriptors */
1409 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1411 /* Update the starting address of descriptor ring */
1412 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1413 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1414 upper_32_bits(rdata->rdesc_dma));
1415 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1416 lower_32_bits(rdata->rdesc_dma));
1418 DBGPR("<--tx_desc_init\n");
1421 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1422 struct xgbe_ring_data *rdata, unsigned int index)
1424 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1425 unsigned int rx_usecs = pdata->rx_usecs;
1426 unsigned int rx_frames = pdata->rx_frames;
1428 dma_addr_t hdr_dma, buf_dma;
1430 if (!rx_usecs && !rx_frames) {
1431 /* No coalescing, interrupt for every descriptor */
1434 /* Set interrupt based on Rx frame coalescing setting */
1435 if (rx_frames && !((index + 1) % rx_frames))
1441 /* Reset the Rx descriptor
1442 * Set buffer 1 (lo) address to header dma address (lo)
1443 * Set buffer 1 (hi) address to header dma address (hi)
1444 * Set buffer 2 (lo) address to buffer dma address (lo)
1445 * Set buffer 2 (hi) address to buffer dma address (hi) and
1446 * set control bits OWN and INTE
1448 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1449 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1450 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1451 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1452 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1453 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1455 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1457 /* Since the Rx DMA engine is likely running, make sure everything
1458 * is written to the descriptor(s) before setting the OWN bit
1459 * for the descriptor
1463 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1465 /* Make sure ownership is written to the descriptor */
1469 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1471 struct xgbe_prv_data *pdata = channel->pdata;
1472 struct xgbe_ring *ring = channel->rx_ring;
1473 struct xgbe_ring_data *rdata;
1474 unsigned int start_index = ring->cur;
1477 DBGPR("-->rx_desc_init\n");
1479 /* Initialize all descriptors */
1480 for (i = 0; i < ring->rdesc_count; i++) {
1481 rdata = XGBE_GET_DESC_DATA(ring, i);
1483 /* Initialize Rx descriptor */
1484 xgbe_rx_desc_reset(pdata, rdata, i);
1487 /* Update the total number of Rx descriptors */
1488 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1490 /* Update the starting address of descriptor ring */
1491 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1492 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1493 upper_32_bits(rdata->rdesc_dma));
1494 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1495 lower_32_bits(rdata->rdesc_dma));
1497 /* Update the Rx Descriptor Tail Pointer */
1498 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1499 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1500 lower_32_bits(rdata->rdesc_dma));
1502 DBGPR("<--rx_desc_init\n");
1505 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1506 unsigned int addend)
1508 unsigned int count = 10000;
1510 /* Set the addend register value and tell the device */
1511 XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1512 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1514 /* Wait for addend update to complete */
1515 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1519 netdev_err(pdata->netdev,
1520 "timed out updating timestamp addend register\n");
1523 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1526 unsigned int count = 10000;
1528 /* Set the time values and tell the device */
1529 XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1530 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1531 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1533 /* Wait for time update to complete */
1534 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1538 netdev_err(pdata->netdev, "timed out initializing timestamp\n");
1541 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1545 nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1546 nsec *= NSEC_PER_SEC;
1547 nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1552 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1554 unsigned int tx_snr, tx_ssr;
1557 if (pdata->vdata->tx_tstamp_workaround) {
1558 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1559 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1561 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1562 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1565 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
1569 nsec *= NSEC_PER_SEC;
1575 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
1576 struct xgbe_ring_desc *rdesc)
1580 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
1581 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
1582 nsec = le32_to_cpu(rdesc->desc1);
1584 nsec |= le32_to_cpu(rdesc->desc0);
1585 if (nsec != 0xffffffffffffffffULL) {
1586 packet->rx_tstamp = nsec;
1587 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1593 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1594 unsigned int mac_tscr)
1596 /* Set one nano-second accuracy */
1597 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1599 /* Set fine timestamp update */
1600 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1602 /* Overwrite earlier timestamps */
1603 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1605 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1607 /* Exit if timestamping is not enabled */
1608 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
1611 /* Initialize time registers */
1612 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1613 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1614 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1615 xgbe_set_tstamp_time(pdata, 0, 0);
1617 /* Initialize the timecounter */
1618 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1619 ktime_to_ns(ktime_get_real()));
1624 static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1625 struct xgbe_ring *ring)
1627 struct xgbe_prv_data *pdata = channel->pdata;
1628 struct xgbe_ring_data *rdata;
1630 /* Make sure everything is written before the register write */
1633 /* Issue a poll command to Tx DMA by writing address
1634 * of next immediate free descriptor */
1635 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1636 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1637 lower_32_bits(rdata->rdesc_dma));
1639 /* Start the Tx timer */
1640 if (pdata->tx_usecs && !channel->tx_timer_active) {
1641 channel->tx_timer_active = 1;
1642 mod_timer(&channel->tx_timer,
1643 jiffies + usecs_to_jiffies(pdata->tx_usecs));
1646 ring->tx.xmit_more = 0;
1649 static void xgbe_dev_xmit(struct xgbe_channel *channel)
1651 struct xgbe_prv_data *pdata = channel->pdata;
1652 struct xgbe_ring *ring = channel->tx_ring;
1653 struct xgbe_ring_data *rdata;
1654 struct xgbe_ring_desc *rdesc;
1655 struct xgbe_packet_data *packet = &ring->packet_data;
1656 unsigned int tx_packets, tx_bytes;
1657 unsigned int csum, tso, vlan, vxlan;
1658 unsigned int tso_context, vlan_context;
1659 unsigned int tx_set_ic;
1660 int start_index = ring->cur;
1661 int cur_index = ring->cur;
1664 DBGPR("-->xgbe_dev_xmit\n");
1666 tx_packets = packet->tx_packets;
1667 tx_bytes = packet->tx_bytes;
1669 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1671 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1673 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1675 vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1678 if (tso && (packet->mss != ring->tx.cur_mss))
1683 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1688 /* Determine if an interrupt should be generated for this Tx:
1690 * - Tx frame count exceeds the frame count setting
1691 * - Addition of Tx frame count to the frame count since the
1692 * last interrupt was set exceeds the frame count setting
1694 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
1695 * - Addition of Tx frame count to the frame count since the
1696 * last interrupt was set does not exceed the frame count setting
1698 ring->coalesce_count += tx_packets;
1699 if (!pdata->tx_frames)
1701 else if (tx_packets > pdata->tx_frames)
1703 else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
1708 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1709 rdesc = rdata->rdesc;
1711 /* Create a context descriptor if this is a TSO packet */
1712 if (tso_context || vlan_context) {
1714 netif_dbg(pdata, tx_queued, pdata->netdev,
1715 "TSO context descriptor, mss=%u\n",
1718 /* Set the MSS size */
1719 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1722 /* Mark it as a CONTEXT descriptor */
1723 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1726 /* Indicate this descriptor contains the MSS */
1727 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1730 ring->tx.cur_mss = packet->mss;
1734 netif_dbg(pdata, tx_queued, pdata->netdev,
1735 "VLAN context descriptor, ctag=%u\n",
1738 /* Mark it as a CONTEXT descriptor */
1739 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1742 /* Set the VLAN tag */
1743 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1744 VT, packet->vlan_ctag);
1746 /* Indicate this descriptor contains the VLAN tag */
1747 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1750 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1754 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1755 rdesc = rdata->rdesc;
1758 /* Update buffer address (for TSO this is the header) */
1759 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1760 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1762 /* Update the buffer length */
1763 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1764 rdata->skb_dma_len);
1766 /* VLAN tag insertion check */
1768 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1769 TX_NORMAL_DESC2_VLAN_INSERT);
1771 /* Timestamp enablement check */
1772 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1773 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1775 /* Mark it as First Descriptor */
1776 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1778 /* Mark it as a NORMAL descriptor */
1779 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1781 /* Set OWN bit if not the first descriptor */
1782 if (cur_index != start_index)
1783 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1787 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1788 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1789 packet->tcp_payload_len);
1790 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1791 packet->tcp_header_len / 4);
1793 pdata->ext_stats.tx_tso_packets += tx_packets;
1795 /* Enable CRC and Pad Insertion */
1796 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1798 /* Enable HW CSUM */
1800 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1803 /* Set the total length to be transmitted */
1804 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1809 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP,
1810 TX_NORMAL_DESC3_VXLAN_PACKET);
1812 pdata->ext_stats.tx_vxlan_packets += packet->tx_packets;
1815 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1817 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1818 rdesc = rdata->rdesc;
1820 /* Update buffer address */
1821 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1822 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1824 /* Update the buffer length */
1825 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1826 rdata->skb_dma_len);
1829 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1831 /* Mark it as NORMAL descriptor */
1832 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1834 /* Enable HW CSUM */
1836 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1840 /* Set LAST bit for the last descriptor */
1841 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1843 /* Set IC bit based on Tx coalescing settings */
1845 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1847 /* Save the Tx info to report back during cleanup */
1848 rdata->tx.packets = tx_packets;
1849 rdata->tx.bytes = tx_bytes;
1851 pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets;
1852 pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes;
1854 /* In case the Tx DMA engine is running, make sure everything
1855 * is written to the descriptor(s) before setting the OWN bit
1856 * for the first descriptor
1860 /* Set OWN bit for the first descriptor */
1861 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1862 rdesc = rdata->rdesc;
1863 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1865 if (netif_msg_tx_queued(pdata))
1866 xgbe_dump_tx_desc(pdata, ring, start_index,
1867 packet->rdesc_count, 1);
1869 /* Make sure ownership is written to the descriptor */
1872 ring->cur = cur_index + 1;
1873 if (!packet->skb->xmit_more ||
1874 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1875 channel->queue_index)))
1876 xgbe_tx_start_xmit(channel, ring);
1878 ring->tx.xmit_more = 1;
1880 DBGPR(" %s: descriptors %u to %u written\n",
1881 channel->name, start_index & (ring->rdesc_count - 1),
1882 (ring->cur - 1) & (ring->rdesc_count - 1));
1884 DBGPR("<--xgbe_dev_xmit\n");
1887 static int xgbe_dev_read(struct xgbe_channel *channel)
1889 struct xgbe_prv_data *pdata = channel->pdata;
1890 struct xgbe_ring *ring = channel->rx_ring;
1891 struct xgbe_ring_data *rdata;
1892 struct xgbe_ring_desc *rdesc;
1893 struct xgbe_packet_data *packet = &ring->packet_data;
1894 struct net_device *netdev = pdata->netdev;
1895 unsigned int err, etlt, l34t;
1897 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1899 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1900 rdesc = rdata->rdesc;
1902 /* Check for data availability */
1903 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1906 /* Make sure descriptor fields are read after reading the OWN bit */
1909 if (netif_msg_rx_status(pdata))
1910 xgbe_dump_rx_desc(pdata, ring, ring->cur);
1912 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1913 /* Timestamp Context Descriptor */
1914 xgbe_get_rx_tstamp(packet, rdesc);
1916 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1918 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1923 /* Normal Descriptor, be sure Context Descriptor bit is off */
1924 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1926 /* Indicate if a Context Descriptor is next */
1927 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1928 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1931 /* Get the header length */
1932 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1933 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1935 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1936 RX_NORMAL_DESC2, HL);
1937 if (rdata->rx.hdr_len)
1938 pdata->ext_stats.rx_split_header_packets++;
1940 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1944 /* Get the RSS hash */
1945 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1946 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1949 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1951 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1953 case RX_DESC3_L34T_IPV4_TCP:
1954 case RX_DESC3_L34T_IPV4_UDP:
1955 case RX_DESC3_L34T_IPV6_TCP:
1956 case RX_DESC3_L34T_IPV6_UDP:
1957 packet->rss_hash_type = PKT_HASH_TYPE_L4;
1960 packet->rss_hash_type = PKT_HASH_TYPE_L3;
1964 /* Not all the data has been transferred for this packet */
1965 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
1968 /* This is the last of the data for this packet */
1969 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1972 /* Get the packet length */
1973 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1975 /* Set checksum done indicator as appropriate */
1976 if (netdev->features & NETIF_F_RXCSUM) {
1977 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1979 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1983 /* Set the tunneled packet indicator */
1984 if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) {
1985 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1987 pdata->ext_stats.rx_vxlan_packets++;
1989 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1991 case RX_DESC3_L34T_IPV4_UNKNOWN:
1992 case RX_DESC3_L34T_IPV6_UNKNOWN:
1993 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1999 /* Check for errors (only valid in last descriptor) */
2000 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
2001 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
2002 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
2004 if (!err || !etlt) {
2005 /* No error if err is 0 or etlt is 0 */
2006 if ((etlt == 0x09) &&
2007 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
2008 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2010 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
2013 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
2017 unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
2018 RX_PACKET_ATTRIBUTES, TNP);
2020 if ((etlt == 0x05) || (etlt == 0x06)) {
2021 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2023 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2025 pdata->ext_stats.rx_csum_errors++;
2026 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
2027 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2029 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2031 pdata->ext_stats.rx_vxlan_csum_errors++;
2033 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
2038 pdata->ext_stats.rxq_packets[channel->queue_index]++;
2039 pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len;
2041 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
2042 ring->cur & (ring->rdesc_count - 1), ring->cur);
2047 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
2049 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
2050 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
2053 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
2055 /* Rx and Tx share LD bit, so check TDES3.LD bit */
2056 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
2059 static int xgbe_enable_int(struct xgbe_channel *channel,
2060 enum xgbe_int int_id)
2063 case XGMAC_INT_DMA_CH_SR_TI:
2064 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
2066 case XGMAC_INT_DMA_CH_SR_TPS:
2067 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
2069 case XGMAC_INT_DMA_CH_SR_TBU:
2070 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
2072 case XGMAC_INT_DMA_CH_SR_RI:
2073 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
2075 case XGMAC_INT_DMA_CH_SR_RBU:
2076 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
2078 case XGMAC_INT_DMA_CH_SR_RPS:
2079 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
2081 case XGMAC_INT_DMA_CH_SR_TI_RI:
2082 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
2083 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
2085 case XGMAC_INT_DMA_CH_SR_FBE:
2086 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
2088 case XGMAC_INT_DMA_ALL:
2089 channel->curr_ier |= channel->saved_ier;
2095 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
2100 static int xgbe_disable_int(struct xgbe_channel *channel,
2101 enum xgbe_int int_id)
2104 case XGMAC_INT_DMA_CH_SR_TI:
2105 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
2107 case XGMAC_INT_DMA_CH_SR_TPS:
2108 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
2110 case XGMAC_INT_DMA_CH_SR_TBU:
2111 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
2113 case XGMAC_INT_DMA_CH_SR_RI:
2114 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
2116 case XGMAC_INT_DMA_CH_SR_RBU:
2117 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
2119 case XGMAC_INT_DMA_CH_SR_RPS:
2120 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
2122 case XGMAC_INT_DMA_CH_SR_TI_RI:
2123 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
2124 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
2126 case XGMAC_INT_DMA_CH_SR_FBE:
2127 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
2129 case XGMAC_INT_DMA_ALL:
2130 channel->saved_ier = channel->curr_ier;
2131 channel->curr_ier = 0;
2137 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
2142 static int __xgbe_exit(struct xgbe_prv_data *pdata)
2144 unsigned int count = 2000;
2146 DBGPR("-->xgbe_exit\n");
2148 /* Issue a software reset */
2149 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
2150 usleep_range(10, 15);
2152 /* Poll Until Poll Condition */
2153 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
2154 usleep_range(500, 600);
2159 DBGPR("<--xgbe_exit\n");
2164 static int xgbe_exit(struct xgbe_prv_data *pdata)
2168 /* To guard against possible incorrectly generated interrupts,
2169 * issue the software reset twice.
2171 ret = __xgbe_exit(pdata);
2175 return __xgbe_exit(pdata);
2178 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
2180 unsigned int i, count;
2182 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
2185 for (i = 0; i < pdata->tx_q_count; i++)
2186 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
2188 /* Poll Until Poll Condition */
2189 for (i = 0; i < pdata->tx_q_count; i++) {
2191 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
2193 usleep_range(500, 600);
2202 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
2206 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
2208 /* Set enhanced addressing mode */
2209 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
2211 /* Set the System Bus mode */
2212 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
2213 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
2214 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
2215 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
2216 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
2218 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
2220 /* Set descriptor fetching threshold */
2221 if (pdata->vdata->tx_desc_prefetch)
2222 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
2223 pdata->vdata->tx_desc_prefetch);
2225 if (pdata->vdata->rx_desc_prefetch)
2226 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
2227 pdata->vdata->rx_desc_prefetch);
2230 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
2232 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
2233 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
2235 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
2238 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
2242 /* Set Tx to weighted round robin scheduling algorithm */
2243 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
2245 /* Set Tx traffic classes to use WRR algorithm with equal weights */
2246 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2247 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2249 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
2252 /* Set Rx to strict priority algorithm */
2253 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
2256 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
2258 unsigned int q_fifo_size)
2260 unsigned int frame_fifo_size;
2261 unsigned int rfa, rfd;
2263 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
2265 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
2266 /* PFC is active for this queue */
2267 rfa = pdata->pfc_rfa;
2268 rfd = rfa + frame_fifo_size;
2269 if (rfd > XGMAC_FLOW_CONTROL_MAX)
2270 rfd = XGMAC_FLOW_CONTROL_MAX;
2271 if (rfa >= XGMAC_FLOW_CONTROL_MAX)
2272 rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
2274 /* This path deals with just maximum frame sizes which are
2275 * limited to a jumbo frame of 9,000 (plus headers, etc.)
2276 * so we can never exceed the maximum allowable RFA/RFD
2279 if (q_fifo_size <= 2048) {
2280 /* rx_rfd to zero to signal no flow control */
2281 pdata->rx_rfa[queue] = 0;
2282 pdata->rx_rfd[queue] = 0;
2286 if (q_fifo_size <= 4096) {
2287 /* Between 2048 and 4096 */
2288 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
2289 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
2293 if (q_fifo_size <= frame_fifo_size) {
2294 /* Between 4096 and max-frame */
2295 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
2296 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
2300 if (q_fifo_size <= (frame_fifo_size * 3)) {
2301 /* Between max-frame and 3 max-frames,
2302 * trigger if we get just over a frame of data and
2303 * resume when we have just under half a frame left.
2305 rfa = q_fifo_size - frame_fifo_size;
2306 rfd = rfa + (frame_fifo_size / 2);
2308 /* Above 3 max-frames - trigger when just over
2309 * 2 frames of space available
2311 rfa = frame_fifo_size * 2;
2312 rfa += XGMAC_FLOW_CONTROL_UNIT;
2313 rfd = rfa + frame_fifo_size;
2317 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
2318 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
2321 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
2324 unsigned int q_fifo_size;
2327 for (i = 0; i < pdata->rx_q_count; i++) {
2328 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
2330 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
2334 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2338 for (i = 0; i < pdata->rx_q_count; i++) {
2339 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
2341 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
2346 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
2348 /* The configured value may not be the actual amount of fifo RAM */
2349 return min_t(unsigned int, pdata->tx_max_fifo_size,
2350 pdata->hw_feat.tx_fifo_size);
2353 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
2355 /* The configured value may not be the actual amount of fifo RAM */
2356 return min_t(unsigned int, pdata->rx_max_fifo_size,
2357 pdata->hw_feat.rx_fifo_size);
2360 static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
2361 unsigned int queue_count,
2364 unsigned int q_fifo_size;
2365 unsigned int p_fifo;
2368 q_fifo_size = fifo_size / queue_count;
2370 /* Calculate the fifo setting by dividing the queue's fifo size
2371 * by the fifo allocation increment (with 0 representing the
2372 * base allocation increment so decrement the result by 1).
2374 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
2378 /* Distribute the fifo equally amongst the queues */
2379 for (i = 0; i < queue_count; i++)
2383 static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
2384 unsigned int queue_count,
2389 BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
2391 if (queue_count <= IEEE_8021QAZ_MAX_TCS)
2394 /* Rx queues 9 and up are for specialized packets,
2395 * such as PTP or DCB control packets, etc. and
2396 * don't require a large fifo
2398 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
2399 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
2400 fifo_size -= XGMAC_FIFO_MIN_ALLOC;
2406 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
2410 /* If a delay has been provided, use that */
2411 if (pdata->pfc->delay)
2412 return pdata->pfc->delay / 8;
2414 /* Allow for two maximum size frames */
2415 delay = xgbe_get_max_frame(pdata);
2416 delay += XGMAC_ETH_PREAMBLE;
2419 /* Allow for PFC frame */
2420 delay += XGMAC_PFC_DATA_LEN;
2421 delay += ETH_HLEN + ETH_FCS_LEN;
2422 delay += XGMAC_ETH_PREAMBLE;
2424 /* Allow for miscellaneous delays (LPI exit, cable, etc.) */
2425 delay += XGMAC_PFC_DELAYS;
2430 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
2432 unsigned int count, prio_queues;
2435 if (!pdata->pfc->pfc_en)
2439 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2440 for (i = 0; i < prio_queues; i++) {
2441 if (!xgbe_is_pfc_queue(pdata, i))
2451 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
2452 unsigned int fifo_size,
2455 unsigned int q_fifo_size, rem_fifo, addn_fifo;
2456 unsigned int prio_queues;
2457 unsigned int pfc_count;
2460 q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
2461 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2462 pfc_count = xgbe_get_pfc_queues(pdata);
2464 if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
2465 /* No traffic classes with PFC enabled or can't do lossless */
2466 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2470 /* Calculate how much fifo we have to play with */
2471 rem_fifo = fifo_size - (q_fifo_size * prio_queues);
2473 /* Calculate how much more than base fifo PFC needs, which also
2474 * becomes the threshold activation point (RFA)
2476 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
2477 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
2479 if (pdata->pfc_rfa > q_fifo_size) {
2480 addn_fifo = pdata->pfc_rfa - q_fifo_size;
2481 addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
2486 /* Calculate DCB fifo settings:
2487 * - distribute remaining fifo between the VLAN priority
2488 * queues based on traffic class PFC enablement and overall
2489 * priority (0 is lowest priority, so start at highest)
2495 fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
2497 if (!pdata->pfcq[i] || !addn_fifo)
2500 if (addn_fifo > rem_fifo) {
2501 netdev_warn(pdata->netdev,
2502 "RXq%u cannot set needed fifo size\n", i);
2506 addn_fifo = rem_fifo;
2509 fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
2510 rem_fifo -= addn_fifo;
2514 unsigned int inc_fifo = rem_fifo / prio_queues;
2516 /* Distribute remaining fifo across queues */
2517 for (i = 0; i < prio_queues; i++)
2518 fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
2522 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2524 unsigned int fifo_size;
2525 unsigned int fifo[XGBE_MAX_QUEUES];
2528 fifo_size = xgbe_get_tx_fifo_size(pdata);
2530 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
2532 for (i = 0; i < pdata->tx_q_count; i++)
2533 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
2535 netif_info(pdata, drv, pdata->netdev,
2536 "%d Tx hardware queues, %d byte fifo per queue\n",
2537 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2540 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2542 unsigned int fifo_size;
2543 unsigned int fifo[XGBE_MAX_QUEUES];
2544 unsigned int prio_queues;
2547 /* Clear any DCB related fifo/queue information */
2548 memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
2551 fifo_size = xgbe_get_rx_fifo_size(pdata);
2552 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2554 /* Assign a minimum fifo to the non-VLAN priority queues */
2555 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
2557 if (pdata->pfc && pdata->ets)
2558 xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
2560 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2562 for (i = 0; i < pdata->rx_q_count; i++)
2563 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
2565 xgbe_calculate_flow_control_threshold(pdata, fifo);
2566 xgbe_config_flow_control_threshold(pdata);
2568 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
2569 netif_info(pdata, drv, pdata->netdev,
2570 "%u Rx hardware queues\n", pdata->rx_q_count);
2571 for (i = 0; i < pdata->rx_q_count; i++)
2572 netif_info(pdata, drv, pdata->netdev,
2573 "RxQ%u, %u byte fifo queue\n", i,
2574 ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
2576 netif_info(pdata, drv, pdata->netdev,
2577 "%u Rx hardware queues, %u byte fifo per queue\n",
2579 ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2583 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2585 unsigned int qptc, qptc_extra, queue;
2586 unsigned int prio_queues;
2587 unsigned int ppq, ppq_extra, prio;
2589 unsigned int i, j, reg, reg_val;
2591 /* Map the MTL Tx Queues to Traffic Classes
2592 * Note: Tx Queues >= Traffic Classes
2594 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2595 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2597 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2598 for (j = 0; j < qptc; j++) {
2599 netif_dbg(pdata, drv, pdata->netdev,
2600 "TXq%u mapped to TC%u\n", queue, i);
2601 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2603 pdata->q2tc_map[queue++] = i;
2606 if (i < qptc_extra) {
2607 netif_dbg(pdata, drv, pdata->netdev,
2608 "TXq%u mapped to TC%u\n", queue, i);
2609 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2611 pdata->q2tc_map[queue++] = i;
2615 /* Map the 8 VLAN priority values to available MTL Rx queues */
2616 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2617 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2618 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2622 for (i = 0, prio = 0; i < prio_queues;) {
2624 for (j = 0; j < ppq; j++) {
2625 netif_dbg(pdata, drv, pdata->netdev,
2626 "PRIO%u mapped to RXq%u\n", prio, i);
2627 mask |= (1 << prio);
2628 pdata->prio2q_map[prio++] = i;
2631 if (i < ppq_extra) {
2632 netif_dbg(pdata, drv, pdata->netdev,
2633 "PRIO%u mapped to RXq%u\n", prio, i);
2634 mask |= (1 << prio);
2635 pdata->prio2q_map[prio++] = i;
2638 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2640 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2643 XGMAC_IOWRITE(pdata, reg, reg_val);
2644 reg += MAC_RQC2_INC;
2648 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2651 for (i = 0; i < pdata->rx_q_count;) {
2652 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2654 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2657 XGMAC_IOWRITE(pdata, reg, reg_val);
2659 reg += MTL_RQDCM_INC;
2664 static void xgbe_config_tc(struct xgbe_prv_data *pdata)
2666 unsigned int offset, queue, prio;
2669 netdev_reset_tc(pdata->netdev);
2670 if (!pdata->num_tcs)
2673 netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
2675 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
2676 while ((queue < pdata->tx_q_count) &&
2677 (pdata->q2tc_map[queue] == i))
2680 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
2681 i, offset, queue - 1);
2682 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
2689 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
2690 netdev_set_prio_tc_map(pdata->netdev, prio,
2691 pdata->ets->prio_tc[prio]);
2694 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
2696 struct ieee_ets *ets = pdata->ets;
2697 unsigned int total_weight, min_weight, weight;
2698 unsigned int mask, reg, reg_val;
2699 unsigned int i, prio;
2704 /* Set Tx to deficit weighted round robin scheduling algorithm (when
2705 * traffic class is using ETS algorithm)
2707 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
2709 /* Set Traffic Class algorithms */
2710 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
2711 min_weight = total_weight / 100;
2715 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2716 /* Map the priorities to the traffic class */
2718 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
2719 if (ets->prio_tc[prio] == i)
2720 mask |= (1 << prio);
2724 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
2726 reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
2727 reg_val = XGMAC_IOREAD(pdata, reg);
2729 reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
2730 reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
2732 XGMAC_IOWRITE(pdata, reg, reg_val);
2734 /* Set the traffic class algorithm */
2735 switch (ets->tc_tsa[i]) {
2736 case IEEE_8021QAZ_TSA_STRICT:
2737 netif_dbg(pdata, drv, pdata->netdev,
2738 "TC%u using SP\n", i);
2739 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2742 case IEEE_8021QAZ_TSA_ETS:
2743 weight = total_weight * ets->tc_tx_bw[i] / 100;
2744 weight = clamp(weight, min_weight, total_weight);
2746 netif_dbg(pdata, drv, pdata->netdev,
2747 "TC%u using DWRR (weight %u)\n", i, weight);
2748 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2750 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
2756 xgbe_config_tc(pdata);
2759 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
2761 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2762 /* Just stop the Tx queues while Rx fifo is changed */
2763 netif_tx_stop_all_queues(pdata->netdev);
2765 /* Suspend Rx so that fifo's can be adjusted */
2766 pdata->hw_if.disable_rx(pdata);
2769 xgbe_config_rx_fifo_size(pdata);
2770 xgbe_config_flow_control(pdata);
2772 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2774 pdata->hw_if.enable_rx(pdata);
2776 /* Resume Tx queues */
2777 netif_tx_start_all_queues(pdata->netdev);
2781 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2783 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2785 /* Filtering is done using perfect filtering and hash filtering */
2786 if (pdata->hw_feat.hash_table_size) {
2787 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2788 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2789 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2793 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2797 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2799 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2802 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2804 xgbe_set_speed(pdata, pdata->phy_speed);
2807 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2809 if (pdata->netdev->features & NETIF_F_RXCSUM)
2810 xgbe_enable_rx_csum(pdata);
2812 xgbe_disable_rx_csum(pdata);
2815 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2817 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2818 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2819 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2821 /* Set the current VLAN Hash Table register value */
2822 xgbe_update_vlan_hash_table(pdata);
2824 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2825 xgbe_enable_rx_vlan_filtering(pdata);
2827 xgbe_disable_rx_vlan_filtering(pdata);
2829 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2830 xgbe_enable_rx_vlan_stripping(pdata);
2832 xgbe_disable_rx_vlan_stripping(pdata);
2835 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2840 if (pdata->vdata->mmc_64bit) {
2842 /* These registers are always 32 bit */
2843 case MMC_RXRUNTERROR:
2844 case MMC_RXJABBERERROR:
2845 case MMC_RXUNDERSIZE_G:
2846 case MMC_RXOVERSIZE_G:
2847 case MMC_RXWATCHDOGERROR:
2856 /* These registers are always 64 bit */
2857 case MMC_TXOCTETCOUNT_GB_LO:
2858 case MMC_TXOCTETCOUNT_G_LO:
2859 case MMC_RXOCTETCOUNT_GB_LO:
2860 case MMC_RXOCTETCOUNT_G_LO:
2869 val = XGMAC_IOREAD(pdata, reg_lo);
2872 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2877 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2879 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2880 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2882 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2883 stats->txoctetcount_gb +=
2884 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2886 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2887 stats->txframecount_gb +=
2888 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2890 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2891 stats->txbroadcastframes_g +=
2892 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2894 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2895 stats->txmulticastframes_g +=
2896 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2898 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2899 stats->tx64octets_gb +=
2900 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2902 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2903 stats->tx65to127octets_gb +=
2904 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2906 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2907 stats->tx128to255octets_gb +=
2908 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2910 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2911 stats->tx256to511octets_gb +=
2912 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2914 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2915 stats->tx512to1023octets_gb +=
2916 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2918 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2919 stats->tx1024tomaxoctets_gb +=
2920 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2922 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2923 stats->txunicastframes_gb +=
2924 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2926 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2927 stats->txmulticastframes_gb +=
2928 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2930 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2931 stats->txbroadcastframes_g +=
2932 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2934 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2935 stats->txunderflowerror +=
2936 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2938 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2939 stats->txoctetcount_g +=
2940 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2942 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2943 stats->txframecount_g +=
2944 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2946 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2947 stats->txpauseframes +=
2948 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2950 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2951 stats->txvlanframes_g +=
2952 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2955 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2957 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2958 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2960 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2961 stats->rxframecount_gb +=
2962 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2964 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2965 stats->rxoctetcount_gb +=
2966 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2968 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2969 stats->rxoctetcount_g +=
2970 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2972 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2973 stats->rxbroadcastframes_g +=
2974 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2976 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2977 stats->rxmulticastframes_g +=
2978 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2980 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2981 stats->rxcrcerror +=
2982 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2984 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2985 stats->rxrunterror +=
2986 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2988 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2989 stats->rxjabbererror +=
2990 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2992 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2993 stats->rxundersize_g +=
2994 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2996 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2997 stats->rxoversize_g +=
2998 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3000 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
3001 stats->rx64octets_gb +=
3002 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3004 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
3005 stats->rx65to127octets_gb +=
3006 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3008 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
3009 stats->rx128to255octets_gb +=
3010 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3012 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
3013 stats->rx256to511octets_gb +=
3014 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3016 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
3017 stats->rx512to1023octets_gb +=
3018 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3020 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
3021 stats->rx1024tomaxoctets_gb +=
3022 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3024 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
3025 stats->rxunicastframes_g +=
3026 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3028 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
3029 stats->rxlengtherror +=
3030 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3032 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
3033 stats->rxoutofrangetype +=
3034 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3036 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
3037 stats->rxpauseframes +=
3038 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3040 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
3041 stats->rxfifooverflow +=
3042 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3044 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
3045 stats->rxvlanframes_gb +=
3046 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3048 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
3049 stats->rxwatchdogerror +=
3050 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3053 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
3055 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
3057 /* Freeze counters */
3058 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
3060 stats->txoctetcount_gb +=
3061 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
3063 stats->txframecount_gb +=
3064 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
3066 stats->txbroadcastframes_g +=
3067 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
3069 stats->txmulticastframes_g +=
3070 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
3072 stats->tx64octets_gb +=
3073 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
3075 stats->tx65to127octets_gb +=
3076 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
3078 stats->tx128to255octets_gb +=
3079 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
3081 stats->tx256to511octets_gb +=
3082 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
3084 stats->tx512to1023octets_gb +=
3085 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
3087 stats->tx1024tomaxoctets_gb +=
3088 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
3090 stats->txunicastframes_gb +=
3091 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
3093 stats->txmulticastframes_gb +=
3094 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
3096 stats->txbroadcastframes_g +=
3097 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
3099 stats->txunderflowerror +=
3100 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
3102 stats->txoctetcount_g +=
3103 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
3105 stats->txframecount_g +=
3106 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
3108 stats->txpauseframes +=
3109 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
3111 stats->txvlanframes_g +=
3112 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
3114 stats->rxframecount_gb +=
3115 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
3117 stats->rxoctetcount_gb +=
3118 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
3120 stats->rxoctetcount_g +=
3121 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
3123 stats->rxbroadcastframes_g +=
3124 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
3126 stats->rxmulticastframes_g +=
3127 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
3129 stats->rxcrcerror +=
3130 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3132 stats->rxrunterror +=
3133 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3135 stats->rxjabbererror +=
3136 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3138 stats->rxundersize_g +=
3139 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3141 stats->rxoversize_g +=
3142 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3144 stats->rx64octets_gb +=
3145 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3147 stats->rx65to127octets_gb +=
3148 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3150 stats->rx128to255octets_gb +=
3151 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3153 stats->rx256to511octets_gb +=
3154 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3156 stats->rx512to1023octets_gb +=
3157 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3159 stats->rx1024tomaxoctets_gb +=
3160 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3162 stats->rxunicastframes_g +=
3163 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3165 stats->rxlengtherror +=
3166 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3168 stats->rxoutofrangetype +=
3169 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3171 stats->rxpauseframes +=
3172 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3174 stats->rxfifooverflow +=
3175 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3177 stats->rxvlanframes_gb +=
3178 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3180 stats->rxwatchdogerror +=
3181 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3183 /* Un-freeze counters */
3184 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
3187 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
3189 /* Set counters to reset on read */
3190 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
3192 /* Reset the counters */
3193 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
3196 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
3199 unsigned int tx_status;
3200 unsigned long tx_timeout;
3202 /* The Tx engine cannot be stopped if it is actively processing
3203 * packets. Wait for the Tx queue to empty the Tx fifo. Don't
3204 * wait forever though...
3206 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3207 while (time_before(jiffies, tx_timeout)) {
3208 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
3209 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
3210 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
3213 usleep_range(500, 1000);
3216 if (!time_before(jiffies, tx_timeout))
3217 netdev_info(pdata->netdev,
3218 "timed out waiting for Tx queue %u to empty\n",
3222 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
3225 unsigned int tx_dsr, tx_pos, tx_qidx;
3226 unsigned int tx_status;
3227 unsigned long tx_timeout;
3229 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
3230 return xgbe_txq_prepare_tx_stop(pdata, queue);
3232 /* Calculate the status register to read and the position within */
3233 if (queue < DMA_DSRX_FIRST_QUEUE) {
3235 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
3237 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
3239 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
3240 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
3244 /* The Tx engine cannot be stopped if it is actively processing
3245 * descriptors. Wait for the Tx engine to enter the stopped or
3246 * suspended state. Don't wait forever though...
3248 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3249 while (time_before(jiffies, tx_timeout)) {
3250 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
3251 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
3252 if ((tx_status == DMA_TPS_STOPPED) ||
3253 (tx_status == DMA_TPS_SUSPENDED))
3256 usleep_range(500, 1000);
3259 if (!time_before(jiffies, tx_timeout))
3260 netdev_info(pdata->netdev,
3261 "timed out waiting for Tx DMA channel %u to stop\n",
3265 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
3269 /* Enable each Tx DMA channel */
3270 for (i = 0; i < pdata->channel_count; i++) {
3271 if (!pdata->channel[i]->tx_ring)
3274 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3277 /* Enable each Tx queue */
3278 for (i = 0; i < pdata->tx_q_count; i++)
3279 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
3283 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3286 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
3290 /* Prepare for Tx DMA channel stop */
3291 for (i = 0; i < pdata->tx_q_count; i++)
3292 xgbe_prepare_tx_stop(pdata, i);
3294 /* Disable MAC Tx */
3295 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3297 /* Disable each Tx queue */
3298 for (i = 0; i < pdata->tx_q_count; i++)
3299 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
3301 /* Disable each Tx DMA channel */
3302 for (i = 0; i < pdata->channel_count; i++) {
3303 if (!pdata->channel[i]->tx_ring)
3306 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3310 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
3313 unsigned int rx_status;
3314 unsigned long rx_timeout;
3316 /* The Rx engine cannot be stopped if it is actively processing
3317 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
3318 * wait forever though...
3320 rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3321 while (time_before(jiffies, rx_timeout)) {
3322 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
3323 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
3324 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
3327 usleep_range(500, 1000);
3330 if (!time_before(jiffies, rx_timeout))
3331 netdev_info(pdata->netdev,
3332 "timed out waiting for Rx queue %u to empty\n",
3336 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
3338 unsigned int reg_val, i;
3340 /* Enable each Rx DMA channel */
3341 for (i = 0; i < pdata->channel_count; i++) {
3342 if (!pdata->channel[i]->rx_ring)
3345 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3348 /* Enable each Rx queue */
3350 for (i = 0; i < pdata->rx_q_count; i++)
3351 reg_val |= (0x02 << (i << 1));
3352 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
3355 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
3356 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
3357 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
3358 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
3361 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
3365 /* Disable MAC Rx */
3366 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
3367 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
3368 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
3369 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
3371 /* Prepare for Rx DMA channel stop */
3372 for (i = 0; i < pdata->rx_q_count; i++)
3373 xgbe_prepare_rx_stop(pdata, i);
3375 /* Disable each Rx queue */
3376 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
3378 /* Disable each Rx DMA channel */
3379 for (i = 0; i < pdata->channel_count; i++) {
3380 if (!pdata->channel[i]->rx_ring)
3383 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3387 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
3391 /* Enable each Tx DMA channel */
3392 for (i = 0; i < pdata->channel_count; i++) {
3393 if (!pdata->channel[i]->tx_ring)
3396 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3400 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3403 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
3407 /* Prepare for Tx DMA channel stop */
3408 for (i = 0; i < pdata->tx_q_count; i++)
3409 xgbe_prepare_tx_stop(pdata, i);
3411 /* Disable MAC Tx */
3412 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3414 /* Disable each Tx DMA channel */
3415 for (i = 0; i < pdata->channel_count; i++) {
3416 if (!pdata->channel[i]->tx_ring)
3419 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3423 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
3427 /* Enable each Rx DMA channel */
3428 for (i = 0; i < pdata->channel_count; i++) {
3429 if (!pdata->channel[i]->rx_ring)
3432 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3436 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
3440 /* Disable each Rx DMA channel */
3441 for (i = 0; i < pdata->channel_count; i++) {
3442 if (!pdata->channel[i]->rx_ring)
3445 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3449 static int xgbe_init(struct xgbe_prv_data *pdata)
3451 struct xgbe_desc_if *desc_if = &pdata->desc_if;
3454 DBGPR("-->xgbe_init\n");
3456 /* Flush Tx queues */
3457 ret = xgbe_flush_tx_queues(pdata);
3459 netdev_err(pdata->netdev, "error flushing TX queues\n");
3464 * Initialize DMA related features
3466 xgbe_config_dma_bus(pdata);
3467 xgbe_config_dma_cache(pdata);
3468 xgbe_config_osp_mode(pdata);
3469 xgbe_config_pbl_val(pdata);
3470 xgbe_config_rx_coalesce(pdata);
3471 xgbe_config_tx_coalesce(pdata);
3472 xgbe_config_rx_buffer_size(pdata);
3473 xgbe_config_tso_mode(pdata);
3474 xgbe_config_sph_mode(pdata);
3475 xgbe_config_rss(pdata);
3476 desc_if->wrapper_tx_desc_init(pdata);
3477 desc_if->wrapper_rx_desc_init(pdata);
3478 xgbe_enable_dma_interrupts(pdata);
3481 * Initialize MTL related features
3483 xgbe_config_mtl_mode(pdata);
3484 xgbe_config_queue_mapping(pdata);
3485 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
3486 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
3487 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
3488 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
3489 xgbe_config_tx_fifo_size(pdata);
3490 xgbe_config_rx_fifo_size(pdata);
3491 /*TODO: Error Packet and undersized good Packet forwarding enable
3494 xgbe_config_dcb_tc(pdata);
3495 xgbe_enable_mtl_interrupts(pdata);
3498 * Initialize MAC related features
3500 xgbe_config_mac_address(pdata);
3501 xgbe_config_rx_mode(pdata);
3502 xgbe_config_jumbo_enable(pdata);
3503 xgbe_config_flow_control(pdata);
3504 xgbe_config_mac_speed(pdata);
3505 xgbe_config_checksum_offload(pdata);
3506 xgbe_config_vlan_support(pdata);
3507 xgbe_config_mmc(pdata);
3508 xgbe_enable_mac_interrupts(pdata);
3511 * Initialize ECC related features
3513 xgbe_enable_ecc_interrupts(pdata);
3515 DBGPR("<--xgbe_init\n");
3520 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
3522 DBGPR("-->xgbe_init_function_ptrs\n");
3524 hw_if->tx_complete = xgbe_tx_complete;
3526 hw_if->set_mac_address = xgbe_set_mac_address;
3527 hw_if->config_rx_mode = xgbe_config_rx_mode;
3529 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
3530 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
3532 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
3533 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
3534 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
3535 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
3536 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
3538 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
3539 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
3541 hw_if->set_speed = xgbe_set_speed;
3543 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
3544 hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
3545 hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
3547 hw_if->set_gpio = xgbe_set_gpio;
3548 hw_if->clr_gpio = xgbe_clr_gpio;
3550 hw_if->enable_tx = xgbe_enable_tx;
3551 hw_if->disable_tx = xgbe_disable_tx;
3552 hw_if->enable_rx = xgbe_enable_rx;
3553 hw_if->disable_rx = xgbe_disable_rx;
3555 hw_if->powerup_tx = xgbe_powerup_tx;
3556 hw_if->powerdown_tx = xgbe_powerdown_tx;
3557 hw_if->powerup_rx = xgbe_powerup_rx;
3558 hw_if->powerdown_rx = xgbe_powerdown_rx;
3560 hw_if->dev_xmit = xgbe_dev_xmit;
3561 hw_if->dev_read = xgbe_dev_read;
3562 hw_if->enable_int = xgbe_enable_int;
3563 hw_if->disable_int = xgbe_disable_int;
3564 hw_if->init = xgbe_init;
3565 hw_if->exit = xgbe_exit;
3567 /* Descriptor related Sequences have to be initialized here */
3568 hw_if->tx_desc_init = xgbe_tx_desc_init;
3569 hw_if->rx_desc_init = xgbe_rx_desc_init;
3570 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
3571 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
3572 hw_if->is_last_desc = xgbe_is_last_desc;
3573 hw_if->is_context_desc = xgbe_is_context_desc;
3574 hw_if->tx_start_xmit = xgbe_tx_start_xmit;
3577 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
3578 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
3580 /* For RX coalescing */
3581 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
3582 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
3583 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
3584 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
3586 /* For RX and TX threshold config */
3587 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
3588 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
3590 /* For RX and TX Store and Forward Mode config */
3591 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
3592 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
3594 /* For TX DMA Operating on Second Frame config */
3595 hw_if->config_osp_mode = xgbe_config_osp_mode;
3597 /* For MMC statistics support */
3598 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
3599 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
3600 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
3602 /* For PTP config */
3603 hw_if->config_tstamp = xgbe_config_tstamp;
3604 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
3605 hw_if->set_tstamp_time = xgbe_set_tstamp_time;
3606 hw_if->get_tstamp_time = xgbe_get_tstamp_time;
3607 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
3609 /* For Data Center Bridging config */
3610 hw_if->config_tc = xgbe_config_tc;
3611 hw_if->config_dcb_tc = xgbe_config_dcb_tc;
3612 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
3614 /* For Receive Side Scaling */
3615 hw_if->enable_rss = xgbe_enable_rss;
3616 hw_if->disable_rss = xgbe_disable_rss;
3617 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
3618 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
3621 hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
3622 hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
3625 hw_if->enable_vxlan = xgbe_enable_vxlan;
3626 hw_if->disable_vxlan = xgbe_disable_vxlan;
3627 hw_if->set_vxlan_id = xgbe_set_vxlan_id;
3629 DBGPR("<--xgbe_init_function_ptrs\n");