]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
net: stmmac: Get correct timestamp values from XGMAC
[linux.git] / drivers / net / ethernet / stmicro / stmmac / dwxgmac2_core.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
10 #include "stmmac.h"
11 #include "dwxgmac2.h"
12
13 static void dwxgmac2_core_init(struct mac_device_info *hw,
14                                struct net_device *dev)
15 {
16         void __iomem *ioaddr = hw->pcsr;
17         int mtu = dev->mtu;
18         u32 tx, rx;
19
20         tx = readl(ioaddr + XGMAC_TX_CONFIG);
21         rx = readl(ioaddr + XGMAC_RX_CONFIG);
22
23         tx |= XGMAC_CORE_INIT_TX;
24         rx |= XGMAC_CORE_INIT_RX;
25
26         if (mtu >= 9000) {
27                 rx |= XGMAC_CONFIG_GPSLCE;
28                 rx |= XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT;
29                 rx |= XGMAC_CONFIG_WD;
30         } else if (mtu > 2000) {
31                 rx |= XGMAC_CONFIG_JE;
32         } else if (mtu > 1500) {
33                 rx |= XGMAC_CONFIG_S2KP;
34         }
35
36         if (hw->ps) {
37                 tx |= XGMAC_CONFIG_TE;
38                 tx &= ~hw->link.speed_mask;
39
40                 switch (hw->ps) {
41                 case SPEED_10000:
42                         tx |= hw->link.xgmii.speed10000;
43                         break;
44                 case SPEED_2500:
45                         tx |= hw->link.speed2500;
46                         break;
47                 case SPEED_1000:
48                 default:
49                         tx |= hw->link.speed1000;
50                         break;
51                 }
52         }
53
54         writel(tx, ioaddr + XGMAC_TX_CONFIG);
55         writel(rx, ioaddr + XGMAC_RX_CONFIG);
56         writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
57 }
58
59 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
60 {
61         u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
62         u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
63
64         if (enable) {
65                 tx |= XGMAC_CONFIG_TE;
66                 rx |= XGMAC_CONFIG_RE;
67         } else {
68                 tx &= ~XGMAC_CONFIG_TE;
69                 rx &= ~XGMAC_CONFIG_RE;
70         }
71
72         writel(tx, ioaddr + XGMAC_TX_CONFIG);
73         writel(rx, ioaddr + XGMAC_RX_CONFIG);
74 }
75
76 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
77 {
78         void __iomem *ioaddr = hw->pcsr;
79         u32 value;
80
81         value = readl(ioaddr + XGMAC_RX_CONFIG);
82         if (hw->rx_csum)
83                 value |= XGMAC_CONFIG_IPC;
84         else
85                 value &= ~XGMAC_CONFIG_IPC;
86         writel(value, ioaddr + XGMAC_RX_CONFIG);
87
88         return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
89 }
90
91 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
92                                      u32 queue)
93 {
94         void __iomem *ioaddr = hw->pcsr;
95         u32 value;
96
97         value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
98         if (mode == MTL_QUEUE_AVB)
99                 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
100         else if (mode == MTL_QUEUE_DCB)
101                 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
102         writel(value, ioaddr + XGMAC_RXQ_CTRL0);
103 }
104
105 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
106                                    u32 queue)
107 {
108         void __iomem *ioaddr = hw->pcsr;
109         u32 value, reg;
110
111         reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
112         if (queue >= 4)
113                 queue -= 4;
114
115         value = readl(ioaddr + reg);
116         value &= ~XGMAC_PSRQ(queue);
117         value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
118
119         writel(value, ioaddr + reg);
120 }
121
122 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
123                                    u32 queue)
124 {
125         void __iomem *ioaddr = hw->pcsr;
126         u32 value, reg;
127
128         reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
129         if (queue >= 4)
130                 queue -= 4;
131
132         value = readl(ioaddr + reg);
133         value &= ~XGMAC_PSTC(queue);
134         value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
135
136         writel(value, ioaddr + reg);
137 }
138
139 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
140                                             u32 rx_alg)
141 {
142         void __iomem *ioaddr = hw->pcsr;
143         u32 value;
144
145         value = readl(ioaddr + XGMAC_MTL_OPMODE);
146         value &= ~XGMAC_RAA;
147
148         switch (rx_alg) {
149         case MTL_RX_ALGORITHM_SP:
150                 break;
151         case MTL_RX_ALGORITHM_WSP:
152                 value |= XGMAC_RAA;
153                 break;
154         default:
155                 break;
156         }
157
158         writel(value, ioaddr + XGMAC_MTL_OPMODE);
159 }
160
161 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
162                                             u32 tx_alg)
163 {
164         void __iomem *ioaddr = hw->pcsr;
165         bool ets = true;
166         u32 value;
167         int i;
168
169         value = readl(ioaddr + XGMAC_MTL_OPMODE);
170         value &= ~XGMAC_ETSALG;
171
172         switch (tx_alg) {
173         case MTL_TX_ALGORITHM_WRR:
174                 value |= XGMAC_WRR;
175                 break;
176         case MTL_TX_ALGORITHM_WFQ:
177                 value |= XGMAC_WFQ;
178                 break;
179         case MTL_TX_ALGORITHM_DWRR:
180                 value |= XGMAC_DWRR;
181                 break;
182         default:
183                 ets = false;
184                 break;
185         }
186
187         writel(value, ioaddr + XGMAC_MTL_OPMODE);
188
189         /* Set ETS if desired */
190         for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
191                 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
192                 value &= ~XGMAC_TSA;
193                 if (ets)
194                         value |= XGMAC_ETS;
195                 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
196         }
197 }
198
199 static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
200                                              u32 weight, u32 queue)
201 {
202         void __iomem *ioaddr = hw->pcsr;
203
204         writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
205 }
206
207 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
208                                     u32 chan)
209 {
210         void __iomem *ioaddr = hw->pcsr;
211         u32 value, reg;
212
213         reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
214         if (queue >= 4)
215                 queue -= 4;
216
217         value = readl(ioaddr + reg);
218         value &= ~XGMAC_QxMDMACH(queue);
219         value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
220
221         writel(value, ioaddr + reg);
222 }
223
224 static void dwxgmac2_config_cbs(struct mac_device_info *hw,
225                                 u32 send_slope, u32 idle_slope,
226                                 u32 high_credit, u32 low_credit, u32 queue)
227 {
228         void __iomem *ioaddr = hw->pcsr;
229         u32 value;
230
231         writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
232         writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
233         writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
234         writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
235
236         value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
237         value |= XGMAC_CC | XGMAC_CBS;
238         writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
239 }
240
241 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
242                                     struct stmmac_extra_stats *x)
243 {
244         void __iomem *ioaddr = hw->pcsr;
245         u32 stat, en;
246
247         en = readl(ioaddr + XGMAC_INT_EN);
248         stat = readl(ioaddr + XGMAC_INT_STATUS);
249
250         stat &= en;
251
252         if (stat & XGMAC_PMTIS) {
253                 x->irq_receive_pmt_irq_n++;
254                 readl(ioaddr + XGMAC_PMT);
255         }
256
257         return 0;
258 }
259
260 static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
261 {
262         void __iomem *ioaddr = hw->pcsr;
263         int ret = 0;
264         u32 status;
265
266         status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
267         if (status & BIT(chan)) {
268                 u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
269
270                 if (chan_status & XGMAC_RXOVFIS)
271                         ret |= CORE_IRQ_MTL_RX_OVERFLOW;
272
273                 writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
274         }
275
276         return ret;
277 }
278
279 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
280                                unsigned int fc, unsigned int pause_time,
281                                u32 tx_cnt)
282 {
283         void __iomem *ioaddr = hw->pcsr;
284         u32 i;
285
286         if (fc & FLOW_RX)
287                 writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
288         if (fc & FLOW_TX) {
289                 for (i = 0; i < tx_cnt; i++) {
290                         u32 value = XGMAC_TFE;
291
292                         if (duplex)
293                                 value |= pause_time << XGMAC_PT_SHIFT;
294
295                         writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
296                 }
297         }
298 }
299
300 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
301 {
302         void __iomem *ioaddr = hw->pcsr;
303         u32 val = 0x0;
304
305         if (mode & WAKE_MAGIC)
306                 val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
307         if (mode & WAKE_UCAST)
308                 val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
309         if (val) {
310                 u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
311                 cfg |= XGMAC_CONFIG_RE;
312                 writel(cfg, ioaddr + XGMAC_RX_CONFIG);
313         }
314
315         writel(val, ioaddr + XGMAC_PMT);
316 }
317
318 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
319                                    unsigned char *addr, unsigned int reg_n)
320 {
321         void __iomem *ioaddr = hw->pcsr;
322         u32 value;
323
324         value = (addr[5] << 8) | addr[4];
325         writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
326
327         value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
328         writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
329 }
330
331 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
332                                    unsigned char *addr, unsigned int reg_n)
333 {
334         void __iomem *ioaddr = hw->pcsr;
335         u32 hi_addr, lo_addr;
336
337         /* Read the MAC address from the hardware */
338         hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
339         lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
340
341         /* Extract the MAC address from the high and low words */
342         addr[0] = lo_addr & 0xff;
343         addr[1] = (lo_addr >> 8) & 0xff;
344         addr[2] = (lo_addr >> 16) & 0xff;
345         addr[3] = (lo_addr >> 24) & 0xff;
346         addr[4] = hi_addr & 0xff;
347         addr[5] = (hi_addr >> 8) & 0xff;
348 }
349
350 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
351                                 int mcbitslog2)
352 {
353         int numhashregs, regs;
354
355         switch (mcbitslog2) {
356         case 6:
357                 numhashregs = 2;
358                 break;
359         case 7:
360                 numhashregs = 4;
361                 break;
362         case 8:
363                 numhashregs = 8;
364                 break;
365         default:
366                 return;
367         }
368
369         for (regs = 0; regs < numhashregs; regs++)
370                 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
371 }
372
373 static void dwxgmac2_set_filter(struct mac_device_info *hw,
374                                 struct net_device *dev)
375 {
376         void __iomem *ioaddr = (void __iomem *)dev->base_addr;
377         u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
378         int mcbitslog2 = hw->mcast_bits_log2;
379         u32 mc_filter[8];
380         int i;
381
382         value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
383         value |= XGMAC_FILTER_HPF;
384
385         memset(mc_filter, 0, sizeof(mc_filter));
386
387         if (dev->flags & IFF_PROMISC) {
388                 value |= XGMAC_FILTER_PR;
389                 value |= XGMAC_FILTER_PCF;
390         } else if ((dev->flags & IFF_ALLMULTI) ||
391                    (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
392                 value |= XGMAC_FILTER_PM;
393
394                 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
395                         writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
396         } else if (!netdev_mc_empty(dev)) {
397                 struct netdev_hw_addr *ha;
398
399                 value |= XGMAC_FILTER_HMC;
400
401                 netdev_for_each_mc_addr(ha, dev) {
402                         int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
403                                         (32 - mcbitslog2));
404                         mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
405                 }
406         }
407
408         dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
409
410         /* Handle multiple unicast addresses */
411         if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
412                 value |= XGMAC_FILTER_PR;
413         } else {
414                 struct netdev_hw_addr *ha;
415                 int reg = 1;
416
417                 netdev_for_each_uc_addr(ha, dev) {
418                         dwxgmac2_set_umac_addr(hw, ha->addr, reg);
419                         reg++;
420                 }
421
422                 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
423                         writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
424                         writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
425                 }
426         }
427
428         writel(value, ioaddr + XGMAC_PACKET_FILTER);
429 }
430
431 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
432 {
433         u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
434
435         if (enable)
436                 value |= XGMAC_CONFIG_LM;
437         else
438                 value &= ~XGMAC_CONFIG_LM;
439
440         writel(value, ioaddr + XGMAC_RX_CONFIG);
441 }
442
443 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
444                                   u32 val)
445 {
446         u32 ctrl = 0;
447
448         writel(val, ioaddr + XGMAC_RSS_DATA);
449         ctrl |= idx << XGMAC_RSSIA_SHIFT;
450         ctrl |= is_key ? XGMAC_ADDRT : 0x0;
451         ctrl |= XGMAC_OB;
452         writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
453
454         return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
455                                   !(ctrl & XGMAC_OB), 100, 10000);
456 }
457
458 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
459                                   struct stmmac_rss *cfg, u32 num_rxq)
460 {
461         void __iomem *ioaddr = hw->pcsr;
462         u32 *key = (u32 *)cfg->key;
463         int i, ret;
464         u32 value;
465
466         value = readl(ioaddr + XGMAC_RSS_CTRL);
467         if (!cfg->enable) {
468                 value &= ~XGMAC_RSSE;
469                 writel(value, ioaddr + XGMAC_RSS_CTRL);
470                 return 0;
471         }
472
473         for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
474                 ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++);
475                 if (ret)
476                         return ret;
477         }
478
479         for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
480                 ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
481                 if (ret)
482                         return ret;
483         }
484
485         for (i = 0; i < num_rxq; i++)
486                 dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
487
488         value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
489         writel(value, ioaddr + XGMAC_RSS_CTRL);
490         return 0;
491 }
492
493 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
494                                       bool is_double)
495 {
496         void __iomem *ioaddr = hw->pcsr;
497
498         writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
499
500         if (hash) {
501                 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
502
503                 value |= XGMAC_FILTER_VTFE;
504
505                 writel(value, ioaddr + XGMAC_PACKET_FILTER);
506
507                 value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
508                 if (is_double) {
509                         value |= XGMAC_VLAN_EDVLP;
510                         value |= XGMAC_VLAN_ESVL;
511                         value |= XGMAC_VLAN_DOVLTC;
512                 }
513
514                 writel(value, ioaddr + XGMAC_VLAN_TAG);
515         } else {
516                 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
517
518                 value &= ~XGMAC_FILTER_VTFE;
519
520                 writel(value, ioaddr + XGMAC_PACKET_FILTER);
521
522                 value = readl(ioaddr + XGMAC_VLAN_TAG);
523
524                 value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
525                 value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
526                 value &= ~XGMAC_VLAN_DOVLTC;
527                 value &= ~XGMAC_VLAN_VID;
528
529                 writel(value, ioaddr + XGMAC_VLAN_TAG);
530         }
531 }
532
533 struct dwxgmac3_error_desc {
534         bool valid;
535         const char *desc;
536         const char *detailed_desc;
537 };
538
539 #define STAT_OFF(field)         offsetof(struct stmmac_safety_stats, field)
540
541 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
542                                const char *module_name,
543                                const struct dwxgmac3_error_desc *desc,
544                                unsigned long field_offset,
545                                struct stmmac_safety_stats *stats)
546 {
547         unsigned long loc, mask;
548         u8 *bptr = (u8 *)stats;
549         unsigned long *ptr;
550
551         ptr = (unsigned long *)(bptr + field_offset);
552
553         mask = value;
554         for_each_set_bit(loc, &mask, 32) {
555                 netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
556                                 "correctable" : "uncorrectable", module_name,
557                                 desc[loc].desc, desc[loc].detailed_desc);
558
559                 /* Update counters */
560                 ptr[loc]++;
561         }
562 }
563
564 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
565         { true, "ATPES", "Application Transmit Interface Parity Check Error" },
566         { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
567         { true, "TPES", "TSO Data Path Parity Check Error" },
568         { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
569         { true, "MTPES", "MTL Data Path Parity Check Error" },
570         { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
571         { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
572         { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
573         { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
574         { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
575         { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
576         { true, "CWPES", "CSR Write Data Path Parity Check Error" },
577         { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
578         { true, "TTES", "TX FSM Timeout Error" },
579         { true, "RTES", "RX FSM Timeout Error" },
580         { true, "CTES", "CSR FSM Timeout Error" },
581         { true, "ATES", "APP FSM Timeout Error" },
582         { true, "PTES", "PTP FSM Timeout Error" },
583         { false, "UNKNOWN", "Unknown Error" }, /* 18 */
584         { false, "UNKNOWN", "Unknown Error" }, /* 19 */
585         { false, "UNKNOWN", "Unknown Error" }, /* 20 */
586         { true, "MSTTES", "Master Read/Write Timeout Error" },
587         { true, "SLVTES", "Slave Read/Write Timeout Error" },
588         { true, "ATITES", "Application Timeout on ATI Interface Error" },
589         { true, "ARITES", "Application Timeout on ARI Interface Error" },
590         { true, "FSMPES", "FSM State Parity Error" },
591         { false, "UNKNOWN", "Unknown Error" }, /* 26 */
592         { false, "UNKNOWN", "Unknown Error" }, /* 27 */
593         { false, "UNKNOWN", "Unknown Error" }, /* 28 */
594         { false, "UNKNOWN", "Unknown Error" }, /* 29 */
595         { false, "UNKNOWN", "Unknown Error" }, /* 30 */
596         { true, "CPI", "Control Register Parity Check Error" },
597 };
598
599 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
600                                     void __iomem *ioaddr, bool correctable,
601                                     struct stmmac_safety_stats *stats)
602 {
603         u32 value;
604
605         value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
606         writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
607
608         dwxgmac3_log_error(ndev, value, correctable, "MAC",
609                            dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
610 }
611
612 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
613         { true, "TXCES", "MTL TX Memory Error" },
614         { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
615         { true, "TXUES", "MTL TX Memory Error" },
616         { false, "UNKNOWN", "Unknown Error" }, /* 3 */
617         { true, "RXCES", "MTL RX Memory Error" },
618         { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
619         { true, "RXUES", "MTL RX Memory Error" },
620         { false, "UNKNOWN", "Unknown Error" }, /* 7 */
621         { true, "ECES", "MTL EST Memory Error" },
622         { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
623         { true, "EUES", "MTL EST Memory Error" },
624         { false, "UNKNOWN", "Unknown Error" }, /* 11 */
625         { true, "RPCES", "MTL RX Parser Memory Error" },
626         { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
627         { true, "RPUES", "MTL RX Parser Memory Error" },
628         { false, "UNKNOWN", "Unknown Error" }, /* 15 */
629         { false, "UNKNOWN", "Unknown Error" }, /* 16 */
630         { false, "UNKNOWN", "Unknown Error" }, /* 17 */
631         { false, "UNKNOWN", "Unknown Error" }, /* 18 */
632         { false, "UNKNOWN", "Unknown Error" }, /* 19 */
633         { false, "UNKNOWN", "Unknown Error" }, /* 20 */
634         { false, "UNKNOWN", "Unknown Error" }, /* 21 */
635         { false, "UNKNOWN", "Unknown Error" }, /* 22 */
636         { false, "UNKNOWN", "Unknown Error" }, /* 23 */
637         { false, "UNKNOWN", "Unknown Error" }, /* 24 */
638         { false, "UNKNOWN", "Unknown Error" }, /* 25 */
639         { false, "UNKNOWN", "Unknown Error" }, /* 26 */
640         { false, "UNKNOWN", "Unknown Error" }, /* 27 */
641         { false, "UNKNOWN", "Unknown Error" }, /* 28 */
642         { false, "UNKNOWN", "Unknown Error" }, /* 29 */
643         { false, "UNKNOWN", "Unknown Error" }, /* 30 */
644         { false, "UNKNOWN", "Unknown Error" }, /* 31 */
645 };
646
647 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
648                                     void __iomem *ioaddr, bool correctable,
649                                     struct stmmac_safety_stats *stats)
650 {
651         u32 value;
652
653         value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
654         writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
655
656         dwxgmac3_log_error(ndev, value, correctable, "MTL",
657                            dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
658 }
659
660 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
661         { true, "TCES", "DMA TSO Memory Error" },
662         { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
663         { true, "TUES", "DMA TSO Memory Error" },
664         { false, "UNKNOWN", "Unknown Error" }, /* 3 */
665         { true, "DCES", "DMA DCACHE Memory Error" },
666         { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
667         { true, "DUES", "DMA DCACHE Memory Error" },
668         { false, "UNKNOWN", "Unknown Error" }, /* 7 */
669         { false, "UNKNOWN", "Unknown Error" }, /* 8 */
670         { false, "UNKNOWN", "Unknown Error" }, /* 9 */
671         { false, "UNKNOWN", "Unknown Error" }, /* 10 */
672         { false, "UNKNOWN", "Unknown Error" }, /* 11 */
673         { false, "UNKNOWN", "Unknown Error" }, /* 12 */
674         { false, "UNKNOWN", "Unknown Error" }, /* 13 */
675         { false, "UNKNOWN", "Unknown Error" }, /* 14 */
676         { false, "UNKNOWN", "Unknown Error" }, /* 15 */
677         { false, "UNKNOWN", "Unknown Error" }, /* 16 */
678         { false, "UNKNOWN", "Unknown Error" }, /* 17 */
679         { false, "UNKNOWN", "Unknown Error" }, /* 18 */
680         { false, "UNKNOWN", "Unknown Error" }, /* 19 */
681         { false, "UNKNOWN", "Unknown Error" }, /* 20 */
682         { false, "UNKNOWN", "Unknown Error" }, /* 21 */
683         { false, "UNKNOWN", "Unknown Error" }, /* 22 */
684         { false, "UNKNOWN", "Unknown Error" }, /* 23 */
685         { false, "UNKNOWN", "Unknown Error" }, /* 24 */
686         { false, "UNKNOWN", "Unknown Error" }, /* 25 */
687         { false, "UNKNOWN", "Unknown Error" }, /* 26 */
688         { false, "UNKNOWN", "Unknown Error" }, /* 27 */
689         { false, "UNKNOWN", "Unknown Error" }, /* 28 */
690         { false, "UNKNOWN", "Unknown Error" }, /* 29 */
691         { false, "UNKNOWN", "Unknown Error" }, /* 30 */
692         { false, "UNKNOWN", "Unknown Error" }, /* 31 */
693 };
694
695 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
696                                     void __iomem *ioaddr, bool correctable,
697                                     struct stmmac_safety_stats *stats)
698 {
699         u32 value;
700
701         value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
702         writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
703
704         dwxgmac3_log_error(ndev, value, correctable, "DMA",
705                            dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
706 }
707
708 static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
709 {
710         u32 value;
711
712         if (!asp)
713                 return -EINVAL;
714
715         /* 1. Enable Safety Features */
716         writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
717
718         /* 2. Enable MTL Safety Interrupts */
719         value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
720         value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
721         value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
722         value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
723         value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
724         writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
725
726         /* 3. Enable DMA Safety Interrupts */
727         value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
728         value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
729         value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
730         writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
731
732         /* Only ECC Protection for External Memory feature is selected */
733         if (asp <= 0x1)
734                 return 0;
735
736         /* 4. Enable Parity and Timeout for FSM */
737         value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
738         value |= XGMAC_PRTYEN; /* FSM Parity Feature */
739         value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
740         writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
741
742         return 0;
743 }
744
745 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
746                                            void __iomem *ioaddr,
747                                            unsigned int asp,
748                                            struct stmmac_safety_stats *stats)
749 {
750         bool err, corr;
751         u32 mtl, dma;
752         int ret = 0;
753
754         if (!asp)
755                 return -EINVAL;
756
757         mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
758         dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
759
760         err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
761         corr = false;
762         if (err) {
763                 dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
764                 ret |= !corr;
765         }
766
767         err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
768               (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
769         corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
770         if (err) {
771                 dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
772                 ret |= !corr;
773         }
774
775         err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
776         corr = dma & XGMAC_DECIS;
777         if (err) {
778                 dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
779                 ret |= !corr;
780         }
781
782         return ret;
783 }
784
785 static const struct dwxgmac3_error {
786         const struct dwxgmac3_error_desc *desc;
787 } dwxgmac3_all_errors[] = {
788         { dwxgmac3_mac_errors },
789         { dwxgmac3_mtl_errors },
790         { dwxgmac3_dma_errors },
791 };
792
793 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
794                                      int index, unsigned long *count,
795                                      const char **desc)
796 {
797         int module = index / 32, offset = index % 32;
798         unsigned long *ptr = (unsigned long *)stats;
799
800         if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
801                 return -EINVAL;
802         if (!dwxgmac3_all_errors[module].desc[offset].valid)
803                 return -EINVAL;
804         if (count)
805                 *count = *(ptr + index);
806         if (desc)
807                 *desc = dwxgmac3_all_errors[module].desc[offset].desc;
808         return 0;
809 }
810
811 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
812 {
813         u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
814
815         val &= ~XGMAC_FRPE;
816         writel(val, ioaddr + XGMAC_MTL_OPMODE);
817
818         return 0;
819 }
820
821 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
822 {
823         u32 val;
824
825         val = readl(ioaddr + XGMAC_MTL_OPMODE);
826         val |= XGMAC_FRPE;
827         writel(val, ioaddr + XGMAC_MTL_OPMODE);
828 }
829
830 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
831                                             struct stmmac_tc_entry *entry,
832                                             int pos)
833 {
834         int ret, i;
835
836         for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
837                 int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
838                 u32 val;
839
840                 /* Wait for ready */
841                 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
842                                          val, !(val & XGMAC_STARTBUSY), 1, 10000);
843                 if (ret)
844                         return ret;
845
846                 /* Write data */
847                 val = *((u32 *)&entry->val + i);
848                 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
849
850                 /* Write pos */
851                 val = real_pos & XGMAC_ADDR;
852                 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
853
854                 /* Write OP */
855                 val |= XGMAC_WRRDN;
856                 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
857
858                 /* Start Write */
859                 val |= XGMAC_STARTBUSY;
860                 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
861
862                 /* Wait for done */
863                 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
864                                          val, !(val & XGMAC_STARTBUSY), 1, 10000);
865                 if (ret)
866                         return ret;
867         }
868
869         return 0;
870 }
871
872 static struct stmmac_tc_entry *
873 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
874                             unsigned int count, u32 curr_prio)
875 {
876         struct stmmac_tc_entry *entry;
877         u32 min_prio = ~0x0;
878         int i, min_prio_idx;
879         bool found = false;
880
881         for (i = count - 1; i >= 0; i--) {
882                 entry = &entries[i];
883
884                 /* Do not update unused entries */
885                 if (!entry->in_use)
886                         continue;
887                 /* Do not update already updated entries (i.e. fragments) */
888                 if (entry->in_hw)
889                         continue;
890                 /* Let last entry be updated last */
891                 if (entry->is_last)
892                         continue;
893                 /* Do not return fragments */
894                 if (entry->is_frag)
895                         continue;
896                 /* Check if we already checked this prio */
897                 if (entry->prio < curr_prio)
898                         continue;
899                 /* Check if this is the minimum prio */
900                 if (entry->prio < min_prio) {
901                         min_prio = entry->prio;
902                         min_prio_idx = i;
903                         found = true;
904                 }
905         }
906
907         if (found)
908                 return &entries[min_prio_idx];
909         return NULL;
910 }
911
912 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
913                                struct stmmac_tc_entry *entries,
914                                unsigned int count)
915 {
916         struct stmmac_tc_entry *entry, *frag;
917         int i, ret, nve = 0;
918         u32 curr_prio = 0;
919         u32 old_val, val;
920
921         /* Force disable RX */
922         old_val = readl(ioaddr + XGMAC_RX_CONFIG);
923         val = old_val & ~XGMAC_CONFIG_RE;
924         writel(val, ioaddr + XGMAC_RX_CONFIG);
925
926         /* Disable RX Parser */
927         ret = dwxgmac3_rxp_disable(ioaddr);
928         if (ret)
929                 goto re_enable;
930
931         /* Set all entries as NOT in HW */
932         for (i = 0; i < count; i++) {
933                 entry = &entries[i];
934                 entry->in_hw = false;
935         }
936
937         /* Update entries by reverse order */
938         while (1) {
939                 entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
940                 if (!entry)
941                         break;
942
943                 curr_prio = entry->prio;
944                 frag = entry->frag_ptr;
945
946                 /* Set special fragment requirements */
947                 if (frag) {
948                         entry->val.af = 0;
949                         entry->val.rf = 0;
950                         entry->val.nc = 1;
951                         entry->val.ok_index = nve + 2;
952                 }
953
954                 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
955                 if (ret)
956                         goto re_enable;
957
958                 entry->table_pos = nve++;
959                 entry->in_hw = true;
960
961                 if (frag && !frag->in_hw) {
962                         ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
963                         if (ret)
964                                 goto re_enable;
965                         frag->table_pos = nve++;
966                         frag->in_hw = true;
967                 }
968         }
969
970         if (!nve)
971                 goto re_enable;
972
973         /* Update all pass entry */
974         for (i = 0; i < count; i++) {
975                 entry = &entries[i];
976                 if (!entry->is_last)
977                         continue;
978
979                 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
980                 if (ret)
981                         goto re_enable;
982
983                 entry->table_pos = nve++;
984         }
985
986         /* Assume n. of parsable entries == n. of valid entries */
987         val = (nve << 16) & XGMAC_NPE;
988         val |= nve & XGMAC_NVE;
989         writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
990
991         /* Enable RX Parser */
992         dwxgmac3_rxp_enable(ioaddr);
993
994 re_enable:
995         /* Re-enable RX */
996         writel(old_val, ioaddr + XGMAC_RX_CONFIG);
997         return ret;
998 }
999
1000 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1001 {
1002         void __iomem *ioaddr = hw->pcsr;
1003         u32 value;
1004
1005         if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1006                                       value, value & XGMAC_TXTSC, 100, 10000))
1007                 return -EBUSY;
1008
1009         *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1010         *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1011         return 0;
1012 }
1013
1014 const struct stmmac_ops dwxgmac210_ops = {
1015         .core_init = dwxgmac2_core_init,
1016         .set_mac = dwxgmac2_set_mac,
1017         .rx_ipc = dwxgmac2_rx_ipc,
1018         .rx_queue_enable = dwxgmac2_rx_queue_enable,
1019         .rx_queue_prio = dwxgmac2_rx_queue_prio,
1020         .tx_queue_prio = dwxgmac2_tx_queue_prio,
1021         .rx_queue_routing = NULL,
1022         .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1023         .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1024         .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1025         .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1026         .config_cbs = dwxgmac2_config_cbs,
1027         .dump_regs = NULL,
1028         .host_irq_status = dwxgmac2_host_irq_status,
1029         .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1030         .flow_ctrl = dwxgmac2_flow_ctrl,
1031         .pmt = dwxgmac2_pmt,
1032         .set_umac_addr = dwxgmac2_set_umac_addr,
1033         .get_umac_addr = dwxgmac2_get_umac_addr,
1034         .set_eee_mode = NULL,
1035         .reset_eee_mode = NULL,
1036         .set_eee_timer = NULL,
1037         .set_eee_pls = NULL,
1038         .pcs_ctrl_ane = NULL,
1039         .pcs_rane = NULL,
1040         .pcs_get_adv_lp = NULL,
1041         .debug = NULL,
1042         .set_filter = dwxgmac2_set_filter,
1043         .safety_feat_config = dwxgmac3_safety_feat_config,
1044         .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1045         .safety_feat_dump = dwxgmac3_safety_feat_dump,
1046         .set_mac_loopback = dwxgmac2_set_mac_loopback,
1047         .rss_configure = dwxgmac2_rss_configure,
1048         .update_vlan_hash = dwxgmac2_update_vlan_hash,
1049         .rxp_config = dwxgmac3_rxp_config,
1050         .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1051 };
1052
1053 int dwxgmac2_setup(struct stmmac_priv *priv)
1054 {
1055         struct mac_device_info *mac = priv->hw;
1056
1057         dev_info(priv->device, "\tXGMAC2\n");
1058
1059         priv->dev->priv_flags |= IFF_UNICAST_FLT;
1060         mac->pcsr = priv->ioaddr;
1061         mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1062         mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1063         mac->mcast_bits_log2 = 0;
1064
1065         if (mac->multicast_filter_bins)
1066                 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1067
1068         mac->link.duplex = 0;
1069         mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1070         mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1071         mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1072         mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1073         mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1074         mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1075         mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1076         mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1077
1078         mac->mii.addr = XGMAC_MDIO_ADDR;
1079         mac->mii.data = XGMAC_MDIO_DATA;
1080         mac->mii.addr_shift = 16;
1081         mac->mii.addr_mask = GENMASK(20, 16);
1082         mac->mii.reg_shift = 0;
1083         mac->mii.reg_mask = GENMASK(15, 0);
1084         mac->mii.clk_csr_shift = 19;
1085         mac->mii.clk_csr_mask = GENMASK(21, 19);
1086
1087         return 0;
1088 }