1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac XGMAC support.
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
13 static void dwxgmac2_core_init(struct mac_device_info *hw,
14 struct net_device *dev)
16 void __iomem *ioaddr = hw->pcsr;
20 tx = readl(ioaddr + XGMAC_TX_CONFIG);
21 rx = readl(ioaddr + XGMAC_RX_CONFIG);
23 tx |= XGMAC_CORE_INIT_TX;
24 rx |= XGMAC_CORE_INIT_RX;
27 rx |= XGMAC_CONFIG_GPSLCE;
28 rx |= XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT;
29 rx |= XGMAC_CONFIG_WD;
30 } else if (mtu > 2000) {
31 rx |= XGMAC_CONFIG_JE;
32 } else if (mtu > 1500) {
33 rx |= XGMAC_CONFIG_S2KP;
37 tx |= XGMAC_CONFIG_TE;
38 tx &= ~hw->link.speed_mask;
42 tx |= hw->link.xgmii.speed10000;
45 tx |= hw->link.speed2500;
49 tx |= hw->link.speed1000;
54 writel(tx, ioaddr + XGMAC_TX_CONFIG);
55 writel(rx, ioaddr + XGMAC_RX_CONFIG);
56 writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
59 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
61 u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
62 u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
65 tx |= XGMAC_CONFIG_TE;
66 rx |= XGMAC_CONFIG_RE;
68 tx &= ~XGMAC_CONFIG_TE;
69 rx &= ~XGMAC_CONFIG_RE;
72 writel(tx, ioaddr + XGMAC_TX_CONFIG);
73 writel(rx, ioaddr + XGMAC_RX_CONFIG);
76 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
78 void __iomem *ioaddr = hw->pcsr;
81 value = readl(ioaddr + XGMAC_RX_CONFIG);
83 value |= XGMAC_CONFIG_IPC;
85 value &= ~XGMAC_CONFIG_IPC;
86 writel(value, ioaddr + XGMAC_RX_CONFIG);
88 return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
91 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
94 void __iomem *ioaddr = hw->pcsr;
97 value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
98 if (mode == MTL_QUEUE_AVB)
99 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
100 else if (mode == MTL_QUEUE_DCB)
101 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
102 writel(value, ioaddr + XGMAC_RXQ_CTRL0);
105 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
108 void __iomem *ioaddr = hw->pcsr;
111 reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
115 value = readl(ioaddr + reg);
116 value &= ~XGMAC_PSRQ(queue);
117 value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
119 writel(value, ioaddr + reg);
122 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
125 void __iomem *ioaddr = hw->pcsr;
128 reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
132 value = readl(ioaddr + reg);
133 value &= ~XGMAC_PSTC(queue);
134 value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
136 writel(value, ioaddr + reg);
139 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
142 void __iomem *ioaddr = hw->pcsr;
145 value = readl(ioaddr + XGMAC_MTL_OPMODE);
149 case MTL_RX_ALGORITHM_SP:
151 case MTL_RX_ALGORITHM_WSP:
158 writel(value, ioaddr + XGMAC_MTL_OPMODE);
161 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
164 void __iomem *ioaddr = hw->pcsr;
169 value = readl(ioaddr + XGMAC_MTL_OPMODE);
170 value &= ~XGMAC_ETSALG;
173 case MTL_TX_ALGORITHM_WRR:
176 case MTL_TX_ALGORITHM_WFQ:
179 case MTL_TX_ALGORITHM_DWRR:
187 writel(value, ioaddr + XGMAC_MTL_OPMODE);
189 /* Set ETS if desired */
190 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
191 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
195 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
199 static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
200 u32 weight, u32 queue)
202 void __iomem *ioaddr = hw->pcsr;
204 writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
207 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
210 void __iomem *ioaddr = hw->pcsr;
213 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
217 value = readl(ioaddr + reg);
218 value &= ~XGMAC_QxMDMACH(queue);
219 value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
221 writel(value, ioaddr + reg);
224 static void dwxgmac2_config_cbs(struct mac_device_info *hw,
225 u32 send_slope, u32 idle_slope,
226 u32 high_credit, u32 low_credit, u32 queue)
228 void __iomem *ioaddr = hw->pcsr;
231 writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
232 writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
233 writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
234 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
236 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
237 value |= XGMAC_CC | XGMAC_CBS;
238 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
241 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
242 struct stmmac_extra_stats *x)
244 void __iomem *ioaddr = hw->pcsr;
247 en = readl(ioaddr + XGMAC_INT_EN);
248 stat = readl(ioaddr + XGMAC_INT_STATUS);
252 if (stat & XGMAC_PMTIS) {
253 x->irq_receive_pmt_irq_n++;
254 readl(ioaddr + XGMAC_PMT);
260 static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
262 void __iomem *ioaddr = hw->pcsr;
266 status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
267 if (status & BIT(chan)) {
268 u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
270 if (chan_status & XGMAC_RXOVFIS)
271 ret |= CORE_IRQ_MTL_RX_OVERFLOW;
273 writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
279 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
280 unsigned int fc, unsigned int pause_time,
283 void __iomem *ioaddr = hw->pcsr;
287 writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
289 for (i = 0; i < tx_cnt; i++) {
290 u32 value = XGMAC_TFE;
293 value |= pause_time << XGMAC_PT_SHIFT;
295 writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
300 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
302 void __iomem *ioaddr = hw->pcsr;
305 if (mode & WAKE_MAGIC)
306 val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
307 if (mode & WAKE_UCAST)
308 val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
310 u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
311 cfg |= XGMAC_CONFIG_RE;
312 writel(cfg, ioaddr + XGMAC_RX_CONFIG);
315 writel(val, ioaddr + XGMAC_PMT);
318 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
319 unsigned char *addr, unsigned int reg_n)
321 void __iomem *ioaddr = hw->pcsr;
324 value = (addr[5] << 8) | addr[4];
325 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
327 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
328 writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
331 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
332 unsigned char *addr, unsigned int reg_n)
334 void __iomem *ioaddr = hw->pcsr;
335 u32 hi_addr, lo_addr;
337 /* Read the MAC address from the hardware */
338 hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
339 lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
341 /* Extract the MAC address from the high and low words */
342 addr[0] = lo_addr & 0xff;
343 addr[1] = (lo_addr >> 8) & 0xff;
344 addr[2] = (lo_addr >> 16) & 0xff;
345 addr[3] = (lo_addr >> 24) & 0xff;
346 addr[4] = hi_addr & 0xff;
347 addr[5] = (hi_addr >> 8) & 0xff;
350 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
353 int numhashregs, regs;
355 switch (mcbitslog2) {
369 for (regs = 0; regs < numhashregs; regs++)
370 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
373 static void dwxgmac2_set_filter(struct mac_device_info *hw,
374 struct net_device *dev)
376 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
377 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
378 int mcbitslog2 = hw->mcast_bits_log2;
382 value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
383 value |= XGMAC_FILTER_HPF;
385 memset(mc_filter, 0, sizeof(mc_filter));
387 if (dev->flags & IFF_PROMISC) {
388 value |= XGMAC_FILTER_PR;
389 value |= XGMAC_FILTER_PCF;
390 } else if ((dev->flags & IFF_ALLMULTI) ||
391 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
392 value |= XGMAC_FILTER_PM;
394 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
395 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
396 } else if (!netdev_mc_empty(dev)) {
397 struct netdev_hw_addr *ha;
399 value |= XGMAC_FILTER_HMC;
401 netdev_for_each_mc_addr(ha, dev) {
402 int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
404 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
408 dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
410 /* Handle multiple unicast addresses */
411 if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
412 value |= XGMAC_FILTER_PR;
414 struct netdev_hw_addr *ha;
417 netdev_for_each_uc_addr(ha, dev) {
418 dwxgmac2_set_umac_addr(hw, ha->addr, reg);
422 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
423 writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
424 writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
428 writel(value, ioaddr + XGMAC_PACKET_FILTER);
431 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
433 u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
436 value |= XGMAC_CONFIG_LM;
438 value &= ~XGMAC_CONFIG_LM;
440 writel(value, ioaddr + XGMAC_RX_CONFIG);
443 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
448 writel(val, ioaddr + XGMAC_RSS_DATA);
449 ctrl |= idx << XGMAC_RSSIA_SHIFT;
450 ctrl |= is_key ? XGMAC_ADDRT : 0x0;
452 writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
454 return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
455 !(ctrl & XGMAC_OB), 100, 10000);
458 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
459 struct stmmac_rss *cfg, u32 num_rxq)
461 void __iomem *ioaddr = hw->pcsr;
462 u32 *key = (u32 *)cfg->key;
466 value = readl(ioaddr + XGMAC_RSS_CTRL);
468 value &= ~XGMAC_RSSE;
469 writel(value, ioaddr + XGMAC_RSS_CTRL);
473 for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
474 ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++);
479 for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
480 ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
485 for (i = 0; i < num_rxq; i++)
486 dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
488 value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
489 writel(value, ioaddr + XGMAC_RSS_CTRL);
493 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
496 void __iomem *ioaddr = hw->pcsr;
498 writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
501 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
503 value |= XGMAC_FILTER_VTFE;
505 writel(value, ioaddr + XGMAC_PACKET_FILTER);
507 value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
509 value |= XGMAC_VLAN_EDVLP;
510 value |= XGMAC_VLAN_ESVL;
511 value |= XGMAC_VLAN_DOVLTC;
514 writel(value, ioaddr + XGMAC_VLAN_TAG);
516 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
518 value &= ~XGMAC_FILTER_VTFE;
520 writel(value, ioaddr + XGMAC_PACKET_FILTER);
522 value = readl(ioaddr + XGMAC_VLAN_TAG);
524 value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
525 value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
526 value &= ~XGMAC_VLAN_DOVLTC;
527 value &= ~XGMAC_VLAN_VID;
529 writel(value, ioaddr + XGMAC_VLAN_TAG);
533 struct dwxgmac3_error_desc {
536 const char *detailed_desc;
539 #define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
541 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
542 const char *module_name,
543 const struct dwxgmac3_error_desc *desc,
544 unsigned long field_offset,
545 struct stmmac_safety_stats *stats)
547 unsigned long loc, mask;
548 u8 *bptr = (u8 *)stats;
551 ptr = (unsigned long *)(bptr + field_offset);
554 for_each_set_bit(loc, &mask, 32) {
555 netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
556 "correctable" : "uncorrectable", module_name,
557 desc[loc].desc, desc[loc].detailed_desc);
559 /* Update counters */
564 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
565 { true, "ATPES", "Application Transmit Interface Parity Check Error" },
566 { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
567 { true, "TPES", "TSO Data Path Parity Check Error" },
568 { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
569 { true, "MTPES", "MTL Data Path Parity Check Error" },
570 { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
571 { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
572 { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
573 { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
574 { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
575 { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
576 { true, "CWPES", "CSR Write Data Path Parity Check Error" },
577 { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
578 { true, "TTES", "TX FSM Timeout Error" },
579 { true, "RTES", "RX FSM Timeout Error" },
580 { true, "CTES", "CSR FSM Timeout Error" },
581 { true, "ATES", "APP FSM Timeout Error" },
582 { true, "PTES", "PTP FSM Timeout Error" },
583 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
584 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
585 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
586 { true, "MSTTES", "Master Read/Write Timeout Error" },
587 { true, "SLVTES", "Slave Read/Write Timeout Error" },
588 { true, "ATITES", "Application Timeout on ATI Interface Error" },
589 { true, "ARITES", "Application Timeout on ARI Interface Error" },
590 { true, "FSMPES", "FSM State Parity Error" },
591 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
592 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
593 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
594 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
595 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
596 { true, "CPI", "Control Register Parity Check Error" },
599 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
600 void __iomem *ioaddr, bool correctable,
601 struct stmmac_safety_stats *stats)
605 value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
606 writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
608 dwxgmac3_log_error(ndev, value, correctable, "MAC",
609 dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
612 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
613 { true, "TXCES", "MTL TX Memory Error" },
614 { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
615 { true, "TXUES", "MTL TX Memory Error" },
616 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
617 { true, "RXCES", "MTL RX Memory Error" },
618 { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
619 { true, "RXUES", "MTL RX Memory Error" },
620 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
621 { true, "ECES", "MTL EST Memory Error" },
622 { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
623 { true, "EUES", "MTL EST Memory Error" },
624 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
625 { true, "RPCES", "MTL RX Parser Memory Error" },
626 { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
627 { true, "RPUES", "MTL RX Parser Memory Error" },
628 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
629 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
630 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
631 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
632 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
633 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
634 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
635 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
636 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
637 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
638 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
639 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
640 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
641 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
642 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
643 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
644 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
647 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
648 void __iomem *ioaddr, bool correctable,
649 struct stmmac_safety_stats *stats)
653 value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
654 writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
656 dwxgmac3_log_error(ndev, value, correctable, "MTL",
657 dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
660 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
661 { true, "TCES", "DMA TSO Memory Error" },
662 { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
663 { true, "TUES", "DMA TSO Memory Error" },
664 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
665 { true, "DCES", "DMA DCACHE Memory Error" },
666 { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
667 { true, "DUES", "DMA DCACHE Memory Error" },
668 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
669 { false, "UNKNOWN", "Unknown Error" }, /* 8 */
670 { false, "UNKNOWN", "Unknown Error" }, /* 9 */
671 { false, "UNKNOWN", "Unknown Error" }, /* 10 */
672 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
673 { false, "UNKNOWN", "Unknown Error" }, /* 12 */
674 { false, "UNKNOWN", "Unknown Error" }, /* 13 */
675 { false, "UNKNOWN", "Unknown Error" }, /* 14 */
676 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
677 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
678 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
679 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
680 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
681 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
682 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
683 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
684 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
685 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
686 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
687 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
688 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
689 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
690 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
691 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
692 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
695 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
696 void __iomem *ioaddr, bool correctable,
697 struct stmmac_safety_stats *stats)
701 value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
702 writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
704 dwxgmac3_log_error(ndev, value, correctable, "DMA",
705 dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
708 static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
715 /* 1. Enable Safety Features */
716 writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
718 /* 2. Enable MTL Safety Interrupts */
719 value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
720 value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
721 value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
722 value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
723 value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
724 writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
726 /* 3. Enable DMA Safety Interrupts */
727 value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
728 value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
729 value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
730 writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
732 /* Only ECC Protection for External Memory feature is selected */
736 /* 4. Enable Parity and Timeout for FSM */
737 value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
738 value |= XGMAC_PRTYEN; /* FSM Parity Feature */
739 value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
740 writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
745 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
746 void __iomem *ioaddr,
748 struct stmmac_safety_stats *stats)
757 mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
758 dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
760 err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
763 dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
767 err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
768 (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
769 corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
771 dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
775 err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
776 corr = dma & XGMAC_DECIS;
778 dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
785 static const struct dwxgmac3_error {
786 const struct dwxgmac3_error_desc *desc;
787 } dwxgmac3_all_errors[] = {
788 { dwxgmac3_mac_errors },
789 { dwxgmac3_mtl_errors },
790 { dwxgmac3_dma_errors },
793 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
794 int index, unsigned long *count,
797 int module = index / 32, offset = index % 32;
798 unsigned long *ptr = (unsigned long *)stats;
800 if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
802 if (!dwxgmac3_all_errors[module].desc[offset].valid)
805 *count = *(ptr + index);
807 *desc = dwxgmac3_all_errors[module].desc[offset].desc;
811 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
813 u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
816 writel(val, ioaddr + XGMAC_MTL_OPMODE);
821 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
825 val = readl(ioaddr + XGMAC_MTL_OPMODE);
827 writel(val, ioaddr + XGMAC_MTL_OPMODE);
830 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
831 struct stmmac_tc_entry *entry,
836 for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
837 int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
841 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
842 val, !(val & XGMAC_STARTBUSY), 1, 10000);
847 val = *((u32 *)&entry->val + i);
848 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
851 val = real_pos & XGMAC_ADDR;
852 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
856 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
859 val |= XGMAC_STARTBUSY;
860 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
863 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
864 val, !(val & XGMAC_STARTBUSY), 1, 10000);
872 static struct stmmac_tc_entry *
873 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
874 unsigned int count, u32 curr_prio)
876 struct stmmac_tc_entry *entry;
881 for (i = count - 1; i >= 0; i--) {
884 /* Do not update unused entries */
887 /* Do not update already updated entries (i.e. fragments) */
890 /* Let last entry be updated last */
893 /* Do not return fragments */
896 /* Check if we already checked this prio */
897 if (entry->prio < curr_prio)
899 /* Check if this is the minimum prio */
900 if (entry->prio < min_prio) {
901 min_prio = entry->prio;
908 return &entries[min_prio_idx];
912 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
913 struct stmmac_tc_entry *entries,
916 struct stmmac_tc_entry *entry, *frag;
921 /* Force disable RX */
922 old_val = readl(ioaddr + XGMAC_RX_CONFIG);
923 val = old_val & ~XGMAC_CONFIG_RE;
924 writel(val, ioaddr + XGMAC_RX_CONFIG);
926 /* Disable RX Parser */
927 ret = dwxgmac3_rxp_disable(ioaddr);
931 /* Set all entries as NOT in HW */
932 for (i = 0; i < count; i++) {
934 entry->in_hw = false;
937 /* Update entries by reverse order */
939 entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
943 curr_prio = entry->prio;
944 frag = entry->frag_ptr;
946 /* Set special fragment requirements */
951 entry->val.ok_index = nve + 2;
954 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
958 entry->table_pos = nve++;
961 if (frag && !frag->in_hw) {
962 ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
965 frag->table_pos = nve++;
973 /* Update all pass entry */
974 for (i = 0; i < count; i++) {
979 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
983 entry->table_pos = nve++;
986 /* Assume n. of parsable entries == n. of valid entries */
987 val = (nve << 16) & XGMAC_NPE;
988 val |= nve & XGMAC_NVE;
989 writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
991 /* Enable RX Parser */
992 dwxgmac3_rxp_enable(ioaddr);
996 writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1000 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1002 void __iomem *ioaddr = hw->pcsr;
1005 if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1006 value, value & XGMAC_TXTSC, 100, 10000))
1009 *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1010 *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1014 const struct stmmac_ops dwxgmac210_ops = {
1015 .core_init = dwxgmac2_core_init,
1016 .set_mac = dwxgmac2_set_mac,
1017 .rx_ipc = dwxgmac2_rx_ipc,
1018 .rx_queue_enable = dwxgmac2_rx_queue_enable,
1019 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1020 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1021 .rx_queue_routing = NULL,
1022 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1023 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1024 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1025 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1026 .config_cbs = dwxgmac2_config_cbs,
1028 .host_irq_status = dwxgmac2_host_irq_status,
1029 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1030 .flow_ctrl = dwxgmac2_flow_ctrl,
1031 .pmt = dwxgmac2_pmt,
1032 .set_umac_addr = dwxgmac2_set_umac_addr,
1033 .get_umac_addr = dwxgmac2_get_umac_addr,
1034 .set_eee_mode = NULL,
1035 .reset_eee_mode = NULL,
1036 .set_eee_timer = NULL,
1037 .set_eee_pls = NULL,
1038 .pcs_ctrl_ane = NULL,
1040 .pcs_get_adv_lp = NULL,
1042 .set_filter = dwxgmac2_set_filter,
1043 .safety_feat_config = dwxgmac3_safety_feat_config,
1044 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1045 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1046 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1047 .rss_configure = dwxgmac2_rss_configure,
1048 .update_vlan_hash = dwxgmac2_update_vlan_hash,
1049 .rxp_config = dwxgmac3_rxp_config,
1050 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1053 int dwxgmac2_setup(struct stmmac_priv *priv)
1055 struct mac_device_info *mac = priv->hw;
1057 dev_info(priv->device, "\tXGMAC2\n");
1059 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1060 mac->pcsr = priv->ioaddr;
1061 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1062 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1063 mac->mcast_bits_log2 = 0;
1065 if (mac->multicast_filter_bins)
1066 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1068 mac->link.duplex = 0;
1069 mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1070 mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1071 mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1072 mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1073 mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1074 mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1075 mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1076 mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1078 mac->mii.addr = XGMAC_MDIO_ADDR;
1079 mac->mii.data = XGMAC_MDIO_DATA;
1080 mac->mii.addr_shift = 16;
1081 mac->mii.addr_mask = GENMASK(20, 16);
1082 mac->mii.reg_shift = 0;
1083 mac->mii.reg_mask = GENMASK(15, 0);
1084 mac->mii.clk_csr_shift = 19;
1085 mac->mii.clk_csr_mask = GENMASK(21, 19);