1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac XGMAC support.
7 #include <linux/iopoll.h>
11 static int dwxgmac2_dma_reset(void __iomem *ioaddr)
13 u32 value = readl(ioaddr + XGMAC_DMA_MODE);
16 writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
18 return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
19 !(value & XGMAC_SWR), 0, 100000);
22 static void dwxgmac2_dma_init(void __iomem *ioaddr,
23 struct stmmac_dma_cfg *dma_cfg, int atds)
25 u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
33 writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
36 static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
37 struct stmmac_dma_cfg *dma_cfg, u32 chan)
39 u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
44 writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
45 writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
48 static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
49 struct stmmac_dma_cfg *dma_cfg,
50 dma_addr_t phy, u32 chan)
52 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
55 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
56 value &= ~XGMAC_RxPBL;
57 value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
58 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
60 writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
61 writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
64 static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
65 struct stmmac_dma_cfg *dma_cfg,
66 dma_addr_t phy, u32 chan)
68 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
71 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
72 value &= ~XGMAC_TxPBL;
73 value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
75 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
77 writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
78 writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
81 static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
83 u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
87 value |= XGMAC_EN_LPI;
89 value |= XGMAC_LPI_XIT_PKT;
91 value &= ~XGMAC_WR_OSR_LMT;
92 value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
95 value &= ~XGMAC_RD_OSR_LMT;
96 value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
100 value |= XGMAC_UNDEF;
102 value &= ~XGMAC_BLEN;
103 for (i = 0; i < AXI_BLEN; i++) {
104 switch (axi->axi_blen[i]) {
106 value |= XGMAC_BLEN256;
109 value |= XGMAC_BLEN128;
112 value |= XGMAC_BLEN64;
115 value |= XGMAC_BLEN32;
118 value |= XGMAC_BLEN16;
121 value |= XGMAC_BLEN8;
124 value |= XGMAC_BLEN4;
129 writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
130 writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
131 writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
134 static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space)
138 for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
139 reg_space[i] = readl(ioaddr + i * 4);
142 static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
143 u32 channel, int fifosz, u8 qmode)
145 u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
146 unsigned int rqs = fifosz / 256 - 1;
148 if (mode == SF_DMA_MODE) {
155 value |= 0x0 << XGMAC_RTC_SHIFT;
157 value |= 0x2 << XGMAC_RTC_SHIFT;
159 value |= 0x3 << XGMAC_RTC_SHIFT;
163 value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
165 if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
166 u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
167 unsigned int rfd, rfa;
171 /* Set Threshold for Activating Flow Control to min 2 frames,
172 * i.e. 1500 * 2 = 3000 bytes.
174 * Set Threshold for Deactivating Flow Control to min 1 frame,
179 /* This violates the above formula because of FIFO size
180 * limit therefore overflow may occur in spite of this.
182 rfd = 0x03; /* Full-2.5K */
183 rfa = 0x01; /* Full-1.5K */
187 rfd = 0x07; /* Full-4.5K */
188 rfa = 0x04; /* Full-3K */
193 flow |= rfd << XGMAC_RFD_SHIFT;
196 flow |= rfa << XGMAC_RFA_SHIFT;
198 writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
201 writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
203 /* Enable MTL RX overflow */
204 value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
205 writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
208 static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
209 u32 channel, int fifosz, u8 qmode)
211 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
212 unsigned int tqs = fifosz / 256 - 1;
214 if (mode == SF_DMA_MODE) {
221 value |= 0x0 << XGMAC_TTC_SHIFT;
223 value |= 0x2 << XGMAC_TTC_SHIFT;
224 else if (mode <= 128)
225 value |= 0x3 << XGMAC_TTC_SHIFT;
226 else if (mode <= 192)
227 value |= 0x4 << XGMAC_TTC_SHIFT;
228 else if (mode <= 256)
229 value |= 0x5 << XGMAC_TTC_SHIFT;
230 else if (mode <= 384)
231 value |= 0x6 << XGMAC_TTC_SHIFT;
233 value |= 0x7 << XGMAC_TTC_SHIFT;
236 /* Use static TC to Queue mapping */
237 value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
239 value &= ~XGMAC_TXQEN;
240 if (qmode != MTL_QUEUE_AVB)
241 value |= 0x2 << XGMAC_TXQEN_SHIFT;
243 value |= 0x1 << XGMAC_TXQEN_SHIFT;
246 value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
248 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
251 static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
253 writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
256 static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
258 writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
261 static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
265 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
267 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
269 value = readl(ioaddr + XGMAC_TX_CONFIG);
270 value |= XGMAC_CONFIG_TE;
271 writel(value, ioaddr + XGMAC_TX_CONFIG);
274 static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
278 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
279 value &= ~XGMAC_TXST;
280 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
282 value = readl(ioaddr + XGMAC_TX_CONFIG);
283 value &= ~XGMAC_CONFIG_TE;
284 writel(value, ioaddr + XGMAC_TX_CONFIG);
287 static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
291 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
293 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
295 value = readl(ioaddr + XGMAC_RX_CONFIG);
296 value |= XGMAC_CONFIG_RE;
297 writel(value, ioaddr + XGMAC_RX_CONFIG);
300 static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
304 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
305 value &= ~XGMAC_RXST;
306 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
309 static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
310 struct stmmac_extra_stats *x, u32 chan)
312 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
313 u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
316 /* ABNORMAL interrupts */
317 if (unlikely(intr_status & XGMAC_AIS)) {
318 if (unlikely(intr_status & XGMAC_RBU)) {
319 x->rx_buf_unav_irq++;
322 if (unlikely(intr_status & XGMAC_TPS)) {
323 x->tx_process_stopped_irq++;
324 ret |= tx_hard_error;
326 if (unlikely(intr_status & XGMAC_FBE)) {
327 x->fatal_bus_error_irq++;
328 ret |= tx_hard_error;
332 /* TX/RX NORMAL interrupts */
333 if (likely(intr_status & XGMAC_NIS)) {
336 if (likely(intr_status & XGMAC_RI)) {
337 x->rx_normal_irq_n++;
340 if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
341 x->tx_normal_irq_n++;
346 /* Clear interrupts */
347 writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
352 static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
353 struct dma_features *dma_cap)
357 /* MAC HW feature 0 */
358 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
359 dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
360 dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
361 dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
362 dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
363 dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
364 dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
365 dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
366 dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
367 dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
368 dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
369 dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
370 dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
371 dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
373 /* MAC HW feature 1 */
374 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
375 dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
376 dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
377 dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
378 dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
379 dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
381 dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
382 switch (dma_cap->addr64) {
384 dma_cap->addr64 = 32;
387 dma_cap->addr64 = 40;
390 dma_cap->addr64 = 48;
393 dma_cap->addr64 = 32;
397 dma_cap->tx_fifo_size =
398 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
399 dma_cap->rx_fifo_size =
400 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
402 /* MAC HW feature 2 */
403 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
404 dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
405 dma_cap->number_tx_channel =
406 ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
407 dma_cap->number_rx_channel =
408 ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
409 dma_cap->number_tx_queues =
410 ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
411 dma_cap->number_rx_queues =
412 ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
414 /* MAC HW feature 3 */
415 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
416 dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
417 dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
418 dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
419 dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
420 dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
423 static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
427 for (i = 0; i < nchan; i++)
428 writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i));
431 static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
433 writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
436 static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
438 writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
441 static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
443 writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
446 static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
448 writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
451 static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
453 u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
460 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
463 static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
465 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
466 u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
468 value &= ~XGMAC_TXQEN;
469 if (qmode != MTL_QUEUE_AVB) {
470 value |= 0x2 << XGMAC_TXQEN_SHIFT;
471 writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
473 value |= 0x1 << XGMAC_TXQEN_SHIFT;
474 writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
477 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
480 static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
484 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
485 value |= bfsize << 1;
486 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
489 static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
491 u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
493 value &= ~XGMAC_CONFIG_HDSMS;
494 value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
495 writel(value, ioaddr + XGMAC_RX_CONFIG);
497 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
502 writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
505 const struct stmmac_dma_ops dwxgmac210_dma_ops = {
506 .reset = dwxgmac2_dma_reset,
507 .init = dwxgmac2_dma_init,
508 .init_chan = dwxgmac2_dma_init_chan,
509 .init_rx_chan = dwxgmac2_dma_init_rx_chan,
510 .init_tx_chan = dwxgmac2_dma_init_tx_chan,
511 .axi = dwxgmac2_dma_axi,
512 .dump_regs = dwxgmac2_dma_dump_regs,
513 .dma_rx_mode = dwxgmac2_dma_rx_mode,
514 .dma_tx_mode = dwxgmac2_dma_tx_mode,
515 .enable_dma_irq = dwxgmac2_enable_dma_irq,
516 .disable_dma_irq = dwxgmac2_disable_dma_irq,
517 .start_tx = dwxgmac2_dma_start_tx,
518 .stop_tx = dwxgmac2_dma_stop_tx,
519 .start_rx = dwxgmac2_dma_start_rx,
520 .stop_rx = dwxgmac2_dma_stop_rx,
521 .dma_interrupt = dwxgmac2_dma_interrupt,
522 .get_hw_feature = dwxgmac2_get_hw_feature,
523 .rx_watchdog = dwxgmac2_rx_watchdog,
524 .set_rx_ring_len = dwxgmac2_set_rx_ring_len,
525 .set_tx_ring_len = dwxgmac2_set_tx_ring_len,
526 .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
527 .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
528 .enable_tso = dwxgmac2_enable_tso,
529 .qmode = dwxgmac2_qmode,
530 .set_bfsize = dwxgmac2_set_bfsize,
531 .enable_sph = dwxgmac2_enable_sph,