2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy/phy.h>
31 #include <linux/phy.h>
32 #include <linux/phylink.h>
33 #include <linux/platform_device.h>
34 #include <linux/skbuff.h>
36 #include "mvneta_bm.h"
40 #include <net/page_pool.h>
41 #include <linux/bpf_trace.h>
44 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
45 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
46 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
47 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
48 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
49 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
50 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
51 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
52 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
53 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
54 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
55 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
56 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
57 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
58 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
59 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
60 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
61 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
62 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
63 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
64 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
65 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
66 #define MVNETA_PORT_RX_RESET 0x1cc0
67 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
68 #define MVNETA_PHY_ADDR 0x2000
69 #define MVNETA_PHY_ADDR_MASK 0x1f
70 #define MVNETA_MBUS_RETRY 0x2010
71 #define MVNETA_UNIT_INTR_CAUSE 0x2080
72 #define MVNETA_UNIT_CONTROL 0x20B0
73 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
74 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
75 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
76 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
77 #define MVNETA_BASE_ADDR_ENABLE 0x2290
78 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
79 #define MVNETA_PORT_CONFIG 0x2400
80 #define MVNETA_UNI_PROMISC_MODE BIT(0)
81 #define MVNETA_DEF_RXQ(q) ((q) << 1)
82 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
83 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
84 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
85 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
86 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
87 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
88 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
89 MVNETA_DEF_RXQ_ARP(q) | \
90 MVNETA_DEF_RXQ_TCP(q) | \
91 MVNETA_DEF_RXQ_UDP(q) | \
92 MVNETA_DEF_RXQ_BPDU(q) | \
93 MVNETA_TX_UNSET_ERR_SUM | \
94 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
95 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
96 #define MVNETA_MAC_ADDR_LOW 0x2414
97 #define MVNETA_MAC_ADDR_HIGH 0x2418
98 #define MVNETA_SDMA_CONFIG 0x241c
99 #define MVNETA_SDMA_BRST_SIZE_16 4
100 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
101 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
102 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
103 #define MVNETA_DESC_SWAP BIT(6)
104 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
105 #define MVNETA_PORT_STATUS 0x2444
106 #define MVNETA_TX_IN_PRGRS BIT(1)
107 #define MVNETA_TX_FIFO_EMPTY BIT(8)
108 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
109 #define MVNETA_SERDES_CFG 0x24A0
110 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
111 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
112 #define MVNETA_TYPE_PRIO 0x24bc
113 #define MVNETA_FORCE_UNI BIT(21)
114 #define MVNETA_TXQ_CMD_1 0x24e4
115 #define MVNETA_TXQ_CMD 0x2448
116 #define MVNETA_TXQ_DISABLE_SHIFT 8
117 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
118 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
119 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
120 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
121 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
122 #define MVNETA_ACC_MODE 0x2500
123 #define MVNETA_BM_ADDRESS 0x2504
124 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
125 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
126 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
127 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
128 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
129 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
131 /* Exception Interrupt Port/Queue Cause register
133 * Their behavior depend of the mapping done using the PCPX2Q
134 * registers. For a given CPU if the bit associated to a queue is not
135 * set, then for the register a read from this CPU will always return
136 * 0 and a write won't do anything
139 #define MVNETA_INTR_NEW_CAUSE 0x25a0
140 #define MVNETA_INTR_NEW_MASK 0x25a4
142 /* bits 0..7 = TXQ SENT, one bit per queue.
143 * bits 8..15 = RXQ OCCUP, one bit per queue.
144 * bits 16..23 = RXQ FREE, one bit per queue.
145 * bit 29 = OLD_REG_SUM, see old reg ?
146 * bit 30 = TX_ERR_SUM, one bit for 4 ports
147 * bit 31 = MISC_SUM, one bit for 4 ports
149 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
150 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
151 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
152 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
153 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
155 #define MVNETA_INTR_OLD_CAUSE 0x25a8
156 #define MVNETA_INTR_OLD_MASK 0x25ac
158 /* Data Path Port/Queue Cause Register */
159 #define MVNETA_INTR_MISC_CAUSE 0x25b0
160 #define MVNETA_INTR_MISC_MASK 0x25b4
162 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
163 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
164 #define MVNETA_CAUSE_PTP BIT(4)
166 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
167 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
168 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
169 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
170 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
171 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
172 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
173 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
175 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
176 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
177 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
179 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
180 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
181 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
183 #define MVNETA_INTR_ENABLE 0x25b8
184 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
185 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
187 #define MVNETA_RXQ_CMD 0x2680
188 #define MVNETA_RXQ_DISABLE_SHIFT 8
189 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
190 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
191 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
192 #define MVNETA_GMAC_CTRL_0 0x2c00
193 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
194 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
195 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
196 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
197 #define MVNETA_GMAC_CTRL_2 0x2c08
198 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
199 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
200 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
201 #define MVNETA_GMAC2_PORT_RESET BIT(6)
202 #define MVNETA_GMAC_STATUS 0x2c10
203 #define MVNETA_GMAC_LINK_UP BIT(0)
204 #define MVNETA_GMAC_SPEED_1000 BIT(1)
205 #define MVNETA_GMAC_SPEED_100 BIT(2)
206 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
207 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
208 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
209 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
210 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
211 #define MVNETA_GMAC_AN_COMPLETE BIT(11)
212 #define MVNETA_GMAC_SYNC_OK BIT(14)
213 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
214 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
215 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
216 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
217 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
218 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
219 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
220 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
221 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
222 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
223 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
224 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
225 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
226 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
227 #define MVNETA_GMAC_CTRL_4 0x2c90
228 #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1)
229 #define MVNETA_MIB_COUNTERS_BASE 0x3000
230 #define MVNETA_MIB_LATE_COLLISION 0x7c
231 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
232 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
233 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
234 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
235 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
236 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
237 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
238 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
239 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
240 #define MVNETA_TXQ_DEC_SENT_MASK 0xff
241 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
242 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
243 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
244 #define MVNETA_PORT_TX_RESET 0x3cf0
245 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
246 #define MVNETA_TX_MTU 0x3e0c
247 #define MVNETA_TX_TOKEN_SIZE 0x3e14
248 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
249 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
250 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
252 #define MVNETA_LPI_CTRL_0 0x2cc0
253 #define MVNETA_LPI_CTRL_1 0x2cc4
254 #define MVNETA_LPI_REQUEST_ENABLE BIT(0)
255 #define MVNETA_LPI_CTRL_2 0x2cc8
256 #define MVNETA_LPI_STATUS 0x2ccc
258 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
260 /* Descriptor ring Macros */
261 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
262 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
264 /* Various constants */
267 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
268 #define MVNETA_RX_COAL_PKTS 32
269 #define MVNETA_RX_COAL_USEC 100
271 /* The two bytes Marvell header. Either contains a special value used
272 * by Marvell switches when a specific hardware mode is enabled (not
273 * supported by this driver) or is filled automatically by zeroes on
274 * the RX side. Those two bytes being at the front of the Ethernet
275 * header, they allow to have the IP header aligned on a 4 bytes
276 * boundary automatically: the hardware skips those two bytes on its
279 #define MVNETA_MH_SIZE 2
281 #define MVNETA_VLAN_TAG_LEN 4
283 #define MVNETA_TX_CSUM_DEF_SIZE 1600
284 #define MVNETA_TX_CSUM_MAX_SIZE 9800
285 #define MVNETA_ACC_MODE_EXT1 1
286 #define MVNETA_ACC_MODE_EXT2 2
288 #define MVNETA_MAX_DECODE_WIN 6
290 /* Timeout constants */
291 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
292 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
293 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
295 #define MVNETA_TX_MTU_MAX 0x3ffff
297 /* The RSS lookup table actually has 256 entries but we do not use
300 #define MVNETA_RSS_LU_TABLE_SIZE 1
302 /* Max number of Rx descriptors */
303 #define MVNETA_MAX_RXD 512
305 /* Max number of Tx descriptors */
306 #define MVNETA_MAX_TXD 1024
308 /* Max number of allowed TCP segments for software TSO */
309 #define MVNETA_MAX_TSO_SEGS 100
311 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
313 /* descriptor aligned size */
314 #define MVNETA_DESC_ALIGNED_SIZE 32
316 /* Number of bytes to be taken into account by HW when putting incoming data
317 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
318 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
320 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64
322 #define MVNETA_RX_PKT_SIZE(mtu) \
323 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
324 ETH_HLEN + ETH_FCS_LEN, \
327 #define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
329 #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
330 MVNETA_SKB_HEADROOM))
331 #define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
332 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
334 #define IS_TSO_HEADER(txq, addr) \
335 ((addr >= txq->tso_hdrs_phys) && \
336 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
338 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
339 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
342 ETHTOOL_STAT_EEE_WAKEUP,
343 ETHTOOL_STAT_SKB_ALLOC_ERR,
344 ETHTOOL_STAT_REFILL_ERR,
348 struct mvneta_statistic {
349 unsigned short offset;
351 const char name[ETH_GSTRING_LEN];
358 #define MVNETA_XDP_PASS BIT(0)
359 #define MVNETA_XDP_DROPPED BIT(1)
360 #define MVNETA_XDP_TX BIT(2)
361 #define MVNETA_XDP_REDIR BIT(3)
363 static const struct mvneta_statistic mvneta_statistics[] = {
364 { 0x3000, T_REG_64, "good_octets_received", },
365 { 0x3010, T_REG_32, "good_frames_received", },
366 { 0x3008, T_REG_32, "bad_octets_received", },
367 { 0x3014, T_REG_32, "bad_frames_received", },
368 { 0x3018, T_REG_32, "broadcast_frames_received", },
369 { 0x301c, T_REG_32, "multicast_frames_received", },
370 { 0x3050, T_REG_32, "unrec_mac_control_received", },
371 { 0x3058, T_REG_32, "good_fc_received", },
372 { 0x305c, T_REG_32, "bad_fc_received", },
373 { 0x3060, T_REG_32, "undersize_received", },
374 { 0x3064, T_REG_32, "fragments_received", },
375 { 0x3068, T_REG_32, "oversize_received", },
376 { 0x306c, T_REG_32, "jabber_received", },
377 { 0x3070, T_REG_32, "mac_receive_error", },
378 { 0x3074, T_REG_32, "bad_crc_event", },
379 { 0x3078, T_REG_32, "collision", },
380 { 0x307c, T_REG_32, "late_collision", },
381 { 0x2484, T_REG_32, "rx_discard", },
382 { 0x2488, T_REG_32, "rx_overrun", },
383 { 0x3020, T_REG_32, "frames_64_octets", },
384 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
385 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
386 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
387 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
388 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
389 { 0x3038, T_REG_64, "good_octets_sent", },
390 { 0x3040, T_REG_32, "good_frames_sent", },
391 { 0x3044, T_REG_32, "excessive_collision", },
392 { 0x3048, T_REG_32, "multicast_frames_sent", },
393 { 0x304c, T_REG_32, "broadcast_frames_sent", },
394 { 0x3054, T_REG_32, "fc_sent", },
395 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
396 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
397 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
398 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
401 struct mvneta_pcpu_stats {
402 struct u64_stats_sync syncp;
409 struct mvneta_pcpu_port {
410 /* Pointer to the shared port */
411 struct mvneta_port *pp;
413 /* Pointer to the CPU-local NAPI struct */
414 struct napi_struct napi;
416 /* Cause of the previous interrupt */
422 struct mvneta_pcpu_port __percpu *ports;
423 struct mvneta_pcpu_stats __percpu *stats;
427 struct mvneta_rx_queue *rxqs;
428 struct mvneta_tx_queue *txqs;
429 struct net_device *dev;
430 struct hlist_node node_online;
431 struct hlist_node node_dead;
433 /* Protect the access to the percpu interrupt registers,
434 * ensuring that the configuration remains coherent.
440 struct napi_struct napi;
442 struct bpf_prog *xdp_prog;
452 phy_interface_t phy_interface;
453 struct device_node *dn;
454 unsigned int tx_csum_limit;
455 struct phylink *phylink;
456 struct phylink_config phylink_config;
459 struct mvneta_bm *bm_priv;
460 struct mvneta_bm_pool *pool_long;
461 struct mvneta_bm_pool *pool_short;
468 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
470 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
472 /* Flags for special SoC configurations */
473 bool neta_armada3700;
474 u16 rx_offset_correction;
475 const struct mbus_dram_target_info *dram_target_info;
478 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
479 * layout of the transmit and reception DMA descriptors, and their
480 * layout is therefore defined by the hardware design
483 #define MVNETA_TX_L3_OFF_SHIFT 0
484 #define MVNETA_TX_IP_HLEN_SHIFT 8
485 #define MVNETA_TX_L4_UDP BIT(16)
486 #define MVNETA_TX_L3_IP6 BIT(17)
487 #define MVNETA_TXD_IP_CSUM BIT(18)
488 #define MVNETA_TXD_Z_PAD BIT(19)
489 #define MVNETA_TXD_L_DESC BIT(20)
490 #define MVNETA_TXD_F_DESC BIT(21)
491 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
492 MVNETA_TXD_L_DESC | \
494 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
495 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
497 #define MVNETA_RXD_ERR_CRC 0x0
498 #define MVNETA_RXD_BM_POOL_SHIFT 13
499 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
500 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
501 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
502 #define MVNETA_RXD_ERR_LEN BIT(18)
503 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
504 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
505 #define MVNETA_RXD_L3_IP4 BIT(25)
506 #define MVNETA_RXD_LAST_DESC BIT(26)
507 #define MVNETA_RXD_FIRST_DESC BIT(27)
508 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
509 MVNETA_RXD_LAST_DESC)
510 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
512 #if defined(__LITTLE_ENDIAN)
513 struct mvneta_tx_desc {
514 u32 command; /* Options used by HW for packet transmitting.*/
515 u16 reserved1; /* csum_l4 (for future use) */
516 u16 data_size; /* Data size of transmitted packet in bytes */
517 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
518 u32 reserved2; /* hw_cmd - (for future use, PMT) */
519 u32 reserved3[4]; /* Reserved - (for future use) */
522 struct mvneta_rx_desc {
523 u32 status; /* Info about received packet */
524 u16 reserved1; /* pnc_info - (for future use, PnC) */
525 u16 data_size; /* Size of received packet in bytes */
527 u32 buf_phys_addr; /* Physical address of the buffer */
528 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
530 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
531 u16 reserved3; /* prefetch_cmd, for future use */
532 u16 reserved4; /* csum_l4 - (for future use, PnC) */
534 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
535 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
538 struct mvneta_tx_desc {
539 u16 data_size; /* Data size of transmitted packet in bytes */
540 u16 reserved1; /* csum_l4 (for future use) */
541 u32 command; /* Options used by HW for packet transmitting.*/
542 u32 reserved2; /* hw_cmd - (for future use, PMT) */
543 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
544 u32 reserved3[4]; /* Reserved - (for future use) */
547 struct mvneta_rx_desc {
548 u16 data_size; /* Size of received packet in bytes */
549 u16 reserved1; /* pnc_info - (for future use, PnC) */
550 u32 status; /* Info about received packet */
552 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
553 u32 buf_phys_addr; /* Physical address of the buffer */
555 u16 reserved4; /* csum_l4 - (for future use, PnC) */
556 u16 reserved3; /* prefetch_cmd, for future use */
557 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
559 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
560 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
564 enum mvneta_tx_buf_type {
570 struct mvneta_tx_buf {
571 enum mvneta_tx_buf_type type;
573 struct xdp_frame *xdpf;
578 struct mvneta_tx_queue {
579 /* Number of this TX queue, in the range 0-7 */
582 /* Number of TX DMA descriptors in the descriptor ring */
585 /* Number of currently used TX DMA descriptor in the
590 int tx_stop_threshold;
591 int tx_wake_threshold;
593 /* Array of transmitted buffers */
594 struct mvneta_tx_buf *buf;
596 /* Index of last TX DMA descriptor that was inserted */
599 /* Index of the TX DMA descriptor to be cleaned up */
604 /* Virtual address of the TX DMA descriptors array */
605 struct mvneta_tx_desc *descs;
607 /* DMA address of the TX DMA descriptors array */
608 dma_addr_t descs_phys;
610 /* Index of the last TX DMA descriptor */
613 /* Index of the next TX DMA descriptor to process */
614 int next_desc_to_proc;
616 /* DMA buffers for TSO headers */
619 /* DMA address of TSO headers */
620 dma_addr_t tso_hdrs_phys;
622 /* Affinity mask for CPUs*/
623 cpumask_t affinity_mask;
626 struct mvneta_rx_queue {
627 /* rx queue number, in the range 0-7 */
630 /* num of rx descriptors in the rx descriptor ring */
637 struct page_pool *page_pool;
638 struct xdp_rxq_info xdp_rxq;
640 /* Virtual address of the RX buffer */
641 void **buf_virt_addr;
643 /* Virtual address of the RX DMA descriptors array */
644 struct mvneta_rx_desc *descs;
646 /* DMA address of the RX DMA descriptors array */
647 dma_addr_t descs_phys;
649 /* Index of the last RX DMA descriptor */
652 /* Index of the next RX DMA descriptor to process */
653 int next_desc_to_proc;
655 /* Index of first RX DMA descriptor to refill */
659 /* pointer to uncomplete skb buffer */
668 static enum cpuhp_state online_hpstate;
669 /* The hardware supports eight (8) rx queues, but we are only allowing
670 * the first one to be used. Therefore, let's just allocate one queue.
672 static int rxq_number = 8;
673 static int txq_number = 8;
677 static int rx_copybreak __read_mostly = 256;
679 /* HW BM need that each port be identify by a unique ID */
680 static int global_port_id;
682 #define MVNETA_DRIVER_NAME "mvneta"
683 #define MVNETA_DRIVER_VERSION "1.0"
685 /* Utility/helper methods */
687 /* Write helper method */
688 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
690 writel(data, pp->base + offset);
693 /* Read helper method */
694 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
696 return readl(pp->base + offset);
699 /* Increment txq get counter */
700 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
702 txq->txq_get_index++;
703 if (txq->txq_get_index == txq->size)
704 txq->txq_get_index = 0;
707 /* Increment txq put counter */
708 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
710 txq->txq_put_index++;
711 if (txq->txq_put_index == txq->size)
712 txq->txq_put_index = 0;
716 /* Clear all MIB counters */
717 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
722 /* Perform dummy reads from MIB counters */
723 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
724 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
725 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
726 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
729 /* Get System Network Statistics */
731 mvneta_get_stats64(struct net_device *dev,
732 struct rtnl_link_stats64 *stats)
734 struct mvneta_port *pp = netdev_priv(dev);
738 for_each_possible_cpu(cpu) {
739 struct mvneta_pcpu_stats *cpu_stats;
745 cpu_stats = per_cpu_ptr(pp->stats, cpu);
747 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
748 rx_packets = cpu_stats->rx_packets;
749 rx_bytes = cpu_stats->rx_bytes;
750 tx_packets = cpu_stats->tx_packets;
751 tx_bytes = cpu_stats->tx_bytes;
752 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
754 stats->rx_packets += rx_packets;
755 stats->rx_bytes += rx_bytes;
756 stats->tx_packets += tx_packets;
757 stats->tx_bytes += tx_bytes;
760 stats->rx_errors = dev->stats.rx_errors;
761 stats->rx_dropped = dev->stats.rx_dropped;
763 stats->tx_dropped = dev->stats.tx_dropped;
766 /* Rx descriptors helper methods */
768 /* Checks whether the RX descriptor having this status is both the first
769 * and the last descriptor for the RX packet. Each RX packet is currently
770 * received through a single RX descriptor, so not having each RX
771 * descriptor with its first and last bits set is an error
773 static int mvneta_rxq_desc_is_first_last(u32 status)
775 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
776 MVNETA_RXD_FIRST_LAST_DESC;
779 /* Add number of descriptors ready to receive new packets */
780 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
781 struct mvneta_rx_queue *rxq,
784 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
787 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
788 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
789 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
790 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
791 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
794 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
795 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
798 /* Get number of RX descriptors occupied by received packets */
799 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
800 struct mvneta_rx_queue *rxq)
804 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
805 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
808 /* Update num of rx desc called upon return from rx path or
809 * from mvneta_rxq_drop_pkts().
811 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
812 struct mvneta_rx_queue *rxq,
813 int rx_done, int rx_filled)
817 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
819 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
820 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
824 /* Only 255 descriptors can be added at once */
825 while ((rx_done > 0) || (rx_filled > 0)) {
826 if (rx_done <= 0xff) {
833 if (rx_filled <= 0xff) {
834 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
837 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
840 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
844 /* Get pointer to next RX descriptor to be processed by SW */
845 static struct mvneta_rx_desc *
846 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
848 int rx_desc = rxq->next_desc_to_proc;
850 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
851 prefetch(rxq->descs + rxq->next_desc_to_proc);
852 return rxq->descs + rx_desc;
855 /* Change maximum receive size of the port. */
856 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
860 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
861 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
862 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
863 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
864 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
868 /* Set rx queue offset */
869 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
870 struct mvneta_rx_queue *rxq,
875 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
876 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
879 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
880 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
884 /* Tx descriptors helper methods */
886 /* Update HW with number of TX descriptors to be sent */
887 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
888 struct mvneta_tx_queue *txq,
893 pend_desc += txq->pending;
895 /* Only 255 Tx descriptors can be added at once */
897 val = min(pend_desc, 255);
898 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
900 } while (pend_desc > 0);
904 /* Get pointer to next TX descriptor to be processed (send) by HW */
905 static struct mvneta_tx_desc *
906 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
908 int tx_desc = txq->next_desc_to_proc;
910 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
911 return txq->descs + tx_desc;
914 /* Release the last allocated TX descriptor. Useful to handle DMA
915 * mapping failures in the TX path.
917 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
919 if (txq->next_desc_to_proc == 0)
920 txq->next_desc_to_proc = txq->last_desc - 1;
922 txq->next_desc_to_proc--;
925 /* Set rxq buf size */
926 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
927 struct mvneta_rx_queue *rxq,
932 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
934 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
935 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
937 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
940 /* Disable buffer management (BM) */
941 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
942 struct mvneta_rx_queue *rxq)
946 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
947 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
948 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
951 /* Enable buffer management (BM) */
952 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
953 struct mvneta_rx_queue *rxq)
957 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
958 val |= MVNETA_RXQ_HW_BUF_ALLOC;
959 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
962 /* Notify HW about port's assignment of pool for bigger packets */
963 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
964 struct mvneta_rx_queue *rxq)
968 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
969 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
970 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
972 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
975 /* Notify HW about port's assignment of pool for smaller packets */
976 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
977 struct mvneta_rx_queue *rxq)
981 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
982 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
983 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
985 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
988 /* Set port's receive buffer size for assigned BM pool */
989 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
995 if (!IS_ALIGNED(buf_size, 8)) {
996 dev_warn(pp->dev->dev.parent,
997 "illegal buf_size value %d, round to %d\n",
998 buf_size, ALIGN(buf_size, 8));
999 buf_size = ALIGN(buf_size, 8);
1002 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
1003 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
1004 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
1007 /* Configure MBUS window in order to enable access BM internal SRAM */
1008 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
1011 u32 win_enable, win_protect;
1014 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
1016 if (pp->bm_win_id < 0) {
1017 /* Find first not occupied window */
1018 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
1019 if (win_enable & (1 << i)) {
1024 if (i == MVNETA_MAX_DECODE_WIN)
1030 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1031 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1034 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1036 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1037 (attr << 8) | target);
1039 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1041 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1042 win_protect |= 3 << (2 * i);
1043 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1045 win_enable &= ~(1 << i);
1046 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1051 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1057 /* Get BM window information */
1058 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1065 /* Open NETA -> BM window */
1066 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1069 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1075 /* Assign and initialize pools for port. In case of fail
1076 * buffer manager will remain disabled for current port.
1078 static int mvneta_bm_port_init(struct platform_device *pdev,
1079 struct mvneta_port *pp)
1081 struct device_node *dn = pdev->dev.of_node;
1082 u32 long_pool_id, short_pool_id;
1084 if (!pp->neta_armada3700) {
1087 ret = mvneta_bm_port_mbus_init(pp);
1092 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1093 netdev_info(pp->dev, "missing long pool id\n");
1097 /* Create port's long pool depending on mtu */
1098 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1099 MVNETA_BM_LONG, pp->id,
1100 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1101 if (!pp->pool_long) {
1102 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1106 pp->pool_long->port_map |= 1 << pp->id;
1108 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1111 /* If short pool id is not defined, assume using single pool */
1112 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1113 short_pool_id = long_pool_id;
1115 /* Create port's short pool */
1116 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1117 MVNETA_BM_SHORT, pp->id,
1118 MVNETA_BM_SHORT_PKT_SIZE);
1119 if (!pp->pool_short) {
1120 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1121 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1125 if (short_pool_id != long_pool_id) {
1126 pp->pool_short->port_map |= 1 << pp->id;
1127 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1128 pp->pool_short->id);
1134 /* Update settings of a pool for bigger packets */
1135 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1137 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1138 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1141 /* Release all buffers from long pool */
1142 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1143 if (hwbm_pool->buf_num) {
1144 WARN(1, "cannot free all buffers in pool %d\n",
1149 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1150 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1151 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1152 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1154 /* Fill entire long pool */
1155 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1156 if (num != hwbm_pool->size) {
1157 WARN(1, "pool %d: %d of %d allocated\n",
1158 bm_pool->id, num, hwbm_pool->size);
1161 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1166 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1167 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1170 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1171 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1174 /* Start the Ethernet port RX and TX activity */
1175 static void mvneta_port_up(struct mvneta_port *pp)
1180 /* Enable all initialized TXs. */
1182 for (queue = 0; queue < txq_number; queue++) {
1183 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1185 q_map |= (1 << queue);
1187 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1190 /* Enable all initialized RXQs. */
1191 for (queue = 0; queue < rxq_number; queue++) {
1192 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1195 q_map |= (1 << queue);
1197 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1200 /* Stop the Ethernet port activity */
1201 static void mvneta_port_down(struct mvneta_port *pp)
1206 /* Stop Rx port activity. Check port Rx activity. */
1207 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1209 /* Issue stop command for active channels only */
1211 mvreg_write(pp, MVNETA_RXQ_CMD,
1212 val << MVNETA_RXQ_DISABLE_SHIFT);
1214 /* Wait for all Rx activity to terminate. */
1217 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1218 netdev_warn(pp->dev,
1219 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1225 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1226 } while (val & MVNETA_RXQ_ENABLE_MASK);
1228 /* Stop Tx port activity. Check port Tx activity. Issue stop
1229 * command for active channels only
1231 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1234 mvreg_write(pp, MVNETA_TXQ_CMD,
1235 (val << MVNETA_TXQ_DISABLE_SHIFT));
1237 /* Wait for all Tx activity to terminate. */
1240 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1241 netdev_warn(pp->dev,
1242 "TIMEOUT for TX stopped status=0x%08x\n",
1248 /* Check TX Command reg that all Txqs are stopped */
1249 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1251 } while (val & MVNETA_TXQ_ENABLE_MASK);
1253 /* Double check to verify that TX FIFO is empty */
1256 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1257 netdev_warn(pp->dev,
1258 "TX FIFO empty timeout status=0x%08x\n",
1264 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1265 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1266 (val & MVNETA_TX_IN_PRGRS));
1271 /* Enable the port by setting the port enable bit of the MAC control register */
1272 static void mvneta_port_enable(struct mvneta_port *pp)
1277 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1278 val |= MVNETA_GMAC0_PORT_ENABLE;
1279 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1282 /* Disable the port and wait for about 200 usec before retuning */
1283 static void mvneta_port_disable(struct mvneta_port *pp)
1287 /* Reset the Enable bit in the Serial Control Register */
1288 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1289 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1290 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1295 /* Multicast tables methods */
1297 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1298 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1306 val = 0x1 | (queue << 1);
1307 val |= (val << 24) | (val << 16) | (val << 8);
1310 for (offset = 0; offset <= 0xc; offset += 4)
1311 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1314 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1315 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1323 val = 0x1 | (queue << 1);
1324 val |= (val << 24) | (val << 16) | (val << 8);
1327 for (offset = 0; offset <= 0xfc; offset += 4)
1328 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1332 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1333 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1339 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1342 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1343 val = 0x1 | (queue << 1);
1344 val |= (val << 24) | (val << 16) | (val << 8);
1347 for (offset = 0; offset <= 0xfc; offset += 4)
1348 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1351 static void mvneta_percpu_unmask_interrupt(void *arg)
1353 struct mvneta_port *pp = arg;
1355 /* All the queue are unmasked, but actually only the ones
1356 * mapped to this CPU will be unmasked
1358 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1359 MVNETA_RX_INTR_MASK_ALL |
1360 MVNETA_TX_INTR_MASK_ALL |
1361 MVNETA_MISCINTR_INTR_MASK);
1364 static void mvneta_percpu_mask_interrupt(void *arg)
1366 struct mvneta_port *pp = arg;
1368 /* All the queue are masked, but actually only the ones
1369 * mapped to this CPU will be masked
1371 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1372 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1373 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1376 static void mvneta_percpu_clear_intr_cause(void *arg)
1378 struct mvneta_port *pp = arg;
1380 /* All the queue are cleared, but actually only the ones
1381 * mapped to this CPU will be cleared
1383 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1384 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1385 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1388 /* This method sets defaults to the NETA port:
1389 * Clears interrupt Cause and Mask registers.
1390 * Clears all MAC tables.
1391 * Sets defaults to all registers.
1392 * Resets RX and TX descriptor rings.
1394 * This method can be called after mvneta_port_down() to return the port
1395 * settings to defaults.
1397 static void mvneta_defaults_set(struct mvneta_port *pp)
1402 int max_cpu = num_present_cpus();
1404 /* Clear all Cause registers */
1405 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1407 /* Mask all interrupts */
1408 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1409 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1411 /* Enable MBUS Retry bit16 */
1412 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1414 /* Set CPU queue access map. CPUs are assigned to the RX and
1415 * TX queues modulo their number. If there is only one TX
1416 * queue then it is assigned to the CPU associated to the
1419 for_each_present_cpu(cpu) {
1420 int rxq_map = 0, txq_map = 0;
1422 if (!pp->neta_armada3700) {
1423 for (rxq = 0; rxq < rxq_number; rxq++)
1424 if ((rxq % max_cpu) == cpu)
1425 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1427 for (txq = 0; txq < txq_number; txq++)
1428 if ((txq % max_cpu) == cpu)
1429 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1431 /* With only one TX queue we configure a special case
1432 * which will allow to get all the irq on a single
1435 if (txq_number == 1)
1436 txq_map = (cpu == pp->rxq_def) ?
1437 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1440 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1441 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1444 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1447 /* Reset RX and TX DMAs */
1448 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1449 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1451 /* Disable Legacy WRR, Disable EJP, Release from reset */
1452 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1453 for (queue = 0; queue < txq_number; queue++) {
1454 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1455 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1458 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1459 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1461 /* Set Port Acceleration Mode */
1463 /* HW buffer management + legacy parser */
1464 val = MVNETA_ACC_MODE_EXT2;
1466 /* SW buffer management + legacy parser */
1467 val = MVNETA_ACC_MODE_EXT1;
1468 mvreg_write(pp, MVNETA_ACC_MODE, val);
1471 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1473 /* Update val of portCfg register accordingly with all RxQueue types */
1474 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1475 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1478 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1479 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1481 /* Build PORT_SDMA_CONFIG_REG */
1484 /* Default burst size */
1485 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1486 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1487 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1489 #if defined(__BIG_ENDIAN)
1490 val |= MVNETA_DESC_SWAP;
1493 /* Assign port SDMA configuration */
1494 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1496 /* Disable PHY polling in hardware, since we're using the
1497 * kernel phylib to do this.
1499 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1500 val &= ~MVNETA_PHY_POLLING_ENABLE;
1501 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1503 mvneta_set_ucast_table(pp, -1);
1504 mvneta_set_special_mcast_table(pp, -1);
1505 mvneta_set_other_mcast_table(pp, -1);
1507 /* Set port interrupt enable register - default enable all */
1508 mvreg_write(pp, MVNETA_INTR_ENABLE,
1509 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1510 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1512 mvneta_mib_counters_clear(pp);
1515 /* Set max sizes for tx queues */
1516 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1522 mtu = max_tx_size * 8;
1523 if (mtu > MVNETA_TX_MTU_MAX)
1524 mtu = MVNETA_TX_MTU_MAX;
1527 val = mvreg_read(pp, MVNETA_TX_MTU);
1528 val &= ~MVNETA_TX_MTU_MAX;
1530 mvreg_write(pp, MVNETA_TX_MTU, val);
1532 /* TX token size and all TXQs token size must be larger that MTU */
1533 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1535 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1538 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1540 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1542 for (queue = 0; queue < txq_number; queue++) {
1543 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1545 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1548 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1550 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1555 /* Set unicast address */
1556 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1559 unsigned int unicast_reg;
1560 unsigned int tbl_offset;
1561 unsigned int reg_offset;
1563 /* Locate the Unicast table entry */
1564 last_nibble = (0xf & last_nibble);
1566 /* offset from unicast tbl base */
1567 tbl_offset = (last_nibble / 4) * 4;
1569 /* offset within the above reg */
1570 reg_offset = last_nibble % 4;
1572 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1575 /* Clear accepts frame bit at specified unicast DA tbl entry */
1576 unicast_reg &= ~(0xff << (8 * reg_offset));
1578 unicast_reg &= ~(0xff << (8 * reg_offset));
1579 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1582 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1585 /* Set mac address */
1586 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1593 mac_l = (addr[4] << 8) | (addr[5]);
1594 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1595 (addr[2] << 8) | (addr[3] << 0);
1597 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1598 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1601 /* Accept frames of this address */
1602 mvneta_set_ucast_addr(pp, addr[5], queue);
1605 /* Set the number of packets that will be received before RX interrupt
1606 * will be generated by HW.
1608 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1609 struct mvneta_rx_queue *rxq, u32 value)
1611 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1612 value | MVNETA_RXQ_NON_OCCUPIED(0));
1615 /* Set the time delay in usec before RX interrupt will be generated by
1618 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1619 struct mvneta_rx_queue *rxq, u32 value)
1622 unsigned long clk_rate;
1624 clk_rate = clk_get_rate(pp->clk);
1625 val = (clk_rate / 1000000) * value;
1627 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1630 /* Set threshold for TX_DONE pkts coalescing */
1631 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1632 struct mvneta_tx_queue *txq, u32 value)
1636 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1638 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1639 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1641 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1644 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1645 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1646 u32 phys_addr, void *virt_addr,
1647 struct mvneta_rx_queue *rxq)
1651 rx_desc->buf_phys_addr = phys_addr;
1652 i = rx_desc - rxq->descs;
1653 rxq->buf_virt_addr[i] = virt_addr;
1656 /* Decrement sent descriptors counter */
1657 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1658 struct mvneta_tx_queue *txq,
1663 /* Only 255 TX descriptors can be updated at once */
1664 while (sent_desc > 0xff) {
1665 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1666 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1667 sent_desc = sent_desc - 0xff;
1670 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1671 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1674 /* Get number of TX descriptors already sent by HW */
1675 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1676 struct mvneta_tx_queue *txq)
1681 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1682 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1683 MVNETA_TXQ_SENT_DESC_SHIFT;
1688 /* Get number of sent descriptors and decrement counter.
1689 * The number of sent descriptors is returned.
1691 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1692 struct mvneta_tx_queue *txq)
1696 /* Get number of sent descriptors */
1697 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1699 /* Decrement sent descriptors counter */
1701 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1706 /* Set TXQ descriptors fields relevant for CSUM calculation */
1707 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1708 int ip_hdr_len, int l4_proto)
1712 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1713 * G_L4_chk, L4_type; required only for checksum
1716 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1717 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1719 if (l3_proto == htons(ETH_P_IP))
1720 command |= MVNETA_TXD_IP_CSUM;
1722 command |= MVNETA_TX_L3_IP6;
1724 if (l4_proto == IPPROTO_TCP)
1725 command |= MVNETA_TX_L4_CSUM_FULL;
1726 else if (l4_proto == IPPROTO_UDP)
1727 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1729 command |= MVNETA_TX_L4_CSUM_NOT;
1735 /* Display more error info */
1736 static void mvneta_rx_error(struct mvneta_port *pp,
1737 struct mvneta_rx_desc *rx_desc)
1739 u32 status = rx_desc->status;
1741 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1742 case MVNETA_RXD_ERR_CRC:
1743 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1744 status, rx_desc->data_size);
1746 case MVNETA_RXD_ERR_OVERRUN:
1747 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1748 status, rx_desc->data_size);
1750 case MVNETA_RXD_ERR_LEN:
1751 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1752 status, rx_desc->data_size);
1754 case MVNETA_RXD_ERR_RESOURCE:
1755 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1756 status, rx_desc->data_size);
1761 /* Handle RX checksum offload based on the descriptor's status */
1762 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1763 struct sk_buff *skb)
1765 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1766 (status & MVNETA_RXD_L3_IP4) &&
1767 (status & MVNETA_RXD_L4_CSUM_OK)) {
1769 skb->ip_summed = CHECKSUM_UNNECESSARY;
1773 skb->ip_summed = CHECKSUM_NONE;
1776 /* Return tx queue pointer (find last set bit) according to <cause> returned
1777 * form tx_done reg. <cause> must not be null. The return value is always a
1778 * valid queue for matching the first one found in <cause>.
1780 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1783 int queue = fls(cause) - 1;
1785 return &pp->txqs[queue];
1788 /* Free tx queue skbuffs */
1789 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1790 struct mvneta_tx_queue *txq, int num,
1791 struct netdev_queue *nq)
1793 unsigned int bytes_compl = 0, pkts_compl = 0;
1796 for (i = 0; i < num; i++) {
1797 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
1798 struct mvneta_tx_desc *tx_desc = txq->descs +
1801 mvneta_txq_inc_get(txq);
1803 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
1804 buf->type != MVNETA_TYPE_XDP_TX)
1805 dma_unmap_single(pp->dev->dev.parent,
1806 tx_desc->buf_phys_addr,
1807 tx_desc->data_size, DMA_TO_DEVICE);
1808 if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
1809 bytes_compl += buf->skb->len;
1811 dev_kfree_skb_any(buf->skb);
1812 } else if (buf->type == MVNETA_TYPE_XDP_TX ||
1813 buf->type == MVNETA_TYPE_XDP_NDO) {
1814 xdp_return_frame(buf->xdpf);
1818 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1821 /* Handle end of transmission */
1822 static void mvneta_txq_done(struct mvneta_port *pp,
1823 struct mvneta_tx_queue *txq)
1825 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1828 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1832 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1834 txq->count -= tx_done;
1836 if (netif_tx_queue_stopped(nq)) {
1837 if (txq->count <= txq->tx_wake_threshold)
1838 netif_tx_wake_queue(nq);
1842 /* Refill processing for SW buffer management */
1843 /* Allocate page per descriptor */
1844 static int mvneta_rx_refill(struct mvneta_port *pp,
1845 struct mvneta_rx_desc *rx_desc,
1846 struct mvneta_rx_queue *rxq,
1849 dma_addr_t phys_addr;
1852 page = page_pool_alloc_pages(rxq->page_pool,
1853 gfp_mask | __GFP_NOWARN);
1857 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1858 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1863 /* Handle tx checksum */
1864 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1866 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1868 __be16 l3_proto = vlan_get_protocol(skb);
1871 if (l3_proto == htons(ETH_P_IP)) {
1872 struct iphdr *ip4h = ip_hdr(skb);
1874 /* Calculate IPv4 checksum and L4 checksum */
1875 ip_hdr_len = ip4h->ihl;
1876 l4_proto = ip4h->protocol;
1877 } else if (l3_proto == htons(ETH_P_IPV6)) {
1878 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1880 /* Read l4_protocol from one of IPv6 extra headers */
1881 if (skb_network_header_len(skb) > 0)
1882 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1883 l4_proto = ip6h->nexthdr;
1885 return MVNETA_TX_L4_CSUM_NOT;
1887 return mvneta_txq_desc_csum(skb_network_offset(skb),
1888 l3_proto, ip_hdr_len, l4_proto);
1891 return MVNETA_TX_L4_CSUM_NOT;
1894 /* Drop packets received by the RXQ and free buffers */
1895 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1896 struct mvneta_rx_queue *rxq)
1900 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1902 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1905 for (i = 0; i < rx_done; i++) {
1906 struct mvneta_rx_desc *rx_desc =
1907 mvneta_rxq_next_desc_get(rxq);
1908 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1909 struct mvneta_bm_pool *bm_pool;
1911 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1912 /* Return dropped buffer to the pool */
1913 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1914 rx_desc->buf_phys_addr);
1919 for (i = 0; i < rxq->size; i++) {
1920 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1921 void *data = rxq->buf_virt_addr[i];
1922 if (!data || !(rx_desc->buf_phys_addr))
1925 page_pool_put_page(rxq->page_pool, data, false);
1927 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
1928 xdp_rxq_info_unreg(&rxq->xdp_rxq);
1929 page_pool_destroy(rxq->page_pool);
1930 rxq->page_pool = NULL;
1934 mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
1937 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1939 u64_stats_update_begin(&stats->syncp);
1941 stats->tx_packets += pkts;
1942 stats->tx_bytes += len;
1944 stats->rx_packets += pkts;
1945 stats->rx_bytes += len;
1947 u64_stats_update_end(&stats->syncp);
1951 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
1953 struct mvneta_rx_desc *rx_desc;
1954 int curr_desc = rxq->first_to_refill;
1957 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
1958 rx_desc = rxq->descs + curr_desc;
1959 if (!(rx_desc->buf_phys_addr)) {
1960 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
1961 pr_err("Can't refill queue %d. Done %d from %d\n",
1962 rxq->id, i, rxq->refill_num);
1967 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
1969 rxq->refill_num -= i;
1970 rxq->first_to_refill = curr_desc;
1976 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
1977 struct xdp_frame *xdpf, bool dma_map)
1979 struct mvneta_tx_desc *tx_desc;
1980 struct mvneta_tx_buf *buf;
1981 dma_addr_t dma_addr;
1983 if (txq->count >= txq->tx_stop_threshold)
1984 return MVNETA_XDP_DROPPED;
1986 tx_desc = mvneta_txq_next_desc_get(txq);
1988 buf = &txq->buf[txq->txq_put_index];
1991 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
1992 xdpf->len, DMA_TO_DEVICE);
1993 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
1994 mvneta_txq_desc_put(txq);
1995 return MVNETA_XDP_DROPPED;
1997 buf->type = MVNETA_TYPE_XDP_NDO;
1999 struct page *page = virt_to_page(xdpf->data);
2001 dma_addr = page_pool_get_dma_addr(page) +
2002 sizeof(*xdpf) + xdpf->headroom;
2003 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
2004 xdpf->len, DMA_BIDIRECTIONAL);
2005 buf->type = MVNETA_TYPE_XDP_TX;
2009 tx_desc->command = MVNETA_TXD_FLZ_DESC;
2010 tx_desc->buf_phys_addr = dma_addr;
2011 tx_desc->data_size = xdpf->len;
2013 mvneta_update_stats(pp, 1, xdpf->len, true);
2014 mvneta_txq_inc_put(txq);
2018 return MVNETA_XDP_TX;
2022 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2024 struct mvneta_tx_queue *txq;
2025 struct netdev_queue *nq;
2026 struct xdp_frame *xdpf;
2030 xdpf = convert_to_xdp_frame(xdp);
2031 if (unlikely(!xdpf))
2032 return MVNETA_XDP_DROPPED;
2034 cpu = smp_processor_id();
2035 txq = &pp->txqs[cpu % txq_number];
2036 nq = netdev_get_tx_queue(pp->dev, txq->id);
2038 __netif_tx_lock(nq, cpu);
2039 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
2040 if (ret == MVNETA_XDP_TX)
2041 mvneta_txq_pend_desc_add(pp, txq, 0);
2042 __netif_tx_unlock(nq);
2048 mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2049 struct xdp_frame **frames, u32 flags)
2051 struct mvneta_port *pp = netdev_priv(dev);
2052 int cpu = smp_processor_id();
2053 struct mvneta_tx_queue *txq;
2054 struct netdev_queue *nq;
2058 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2061 txq = &pp->txqs[cpu % txq_number];
2062 nq = netdev_get_tx_queue(pp->dev, txq->id);
2064 __netif_tx_lock(nq, cpu);
2065 for (i = 0; i < num_frame; i++) {
2066 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
2067 if (ret != MVNETA_XDP_TX) {
2068 xdp_return_frame_rx_napi(frames[i]);
2073 if (unlikely(flags & XDP_XMIT_FLUSH))
2074 mvneta_txq_pend_desc_add(pp, txq, 0);
2075 __netif_tx_unlock(nq);
2077 return num_frame - drops;
2081 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2082 struct bpf_prog *prog, struct xdp_buff *xdp)
2084 u32 ret, act = bpf_prog_run_xdp(prog, xdp);
2088 ret = MVNETA_XDP_PASS;
2090 case XDP_REDIRECT: {
2093 err = xdp_do_redirect(pp->dev, xdp, prog);
2095 ret = MVNETA_XDP_DROPPED;
2096 __page_pool_put_page(rxq->page_pool,
2097 virt_to_head_page(xdp->data),
2098 xdp->data_end - xdp->data_hard_start,
2101 ret = MVNETA_XDP_REDIR;
2106 ret = mvneta_xdp_xmit_back(pp, xdp);
2107 if (ret != MVNETA_XDP_TX)
2108 __page_pool_put_page(rxq->page_pool,
2109 virt_to_head_page(xdp->data),
2110 xdp->data_end - xdp->data_hard_start,
2114 bpf_warn_invalid_xdp_action(act);
2117 trace_xdp_exception(pp->dev, prog, act);
2120 __page_pool_put_page(rxq->page_pool,
2121 virt_to_head_page(xdp->data),
2122 xdp->data_end - xdp->data_hard_start,
2124 ret = MVNETA_XDP_DROPPED;
2132 mvneta_swbm_rx_frame(struct mvneta_port *pp,
2133 struct mvneta_rx_desc *rx_desc,
2134 struct mvneta_rx_queue *rxq,
2135 struct xdp_buff *xdp,
2136 struct bpf_prog *xdp_prog,
2137 struct page *page, u32 *xdp_ret)
2139 unsigned char *data = page_address(page);
2140 int data_len = -MVNETA_MH_SIZE, len;
2141 struct net_device *dev = pp->dev;
2142 enum dma_data_direction dma_dir;
2144 if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
2145 len = MVNETA_MAX_RX_BUF_SIZE;
2148 len = rx_desc->data_size;
2149 data_len += len - ETH_FCS_LEN;
2152 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2153 dma_sync_single_for_cpu(dev->dev.parent,
2154 rx_desc->buf_phys_addr,
2157 /* Prefetch header */
2160 xdp->data_hard_start = data;
2161 xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
2162 xdp->data_end = xdp->data + data_len;
2163 xdp_set_data_meta_invalid(xdp);
2168 ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
2169 if (ret != MVNETA_XDP_PASS) {
2170 mvneta_update_stats(pp, 1,
2171 xdp->data_end - xdp->data,
2173 rx_desc->buf_phys_addr = 0;
2179 rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
2180 if (unlikely(!rxq->skb)) {
2182 "Can't allocate skb on queue %d\n",
2184 dev->stats.rx_dropped++;
2185 rxq->skb_alloc_err++;
2188 page_pool_release_page(rxq->page_pool, page);
2190 skb_reserve(rxq->skb,
2191 xdp->data - xdp->data_hard_start);
2192 skb_put(rxq->skb, xdp->data_end - xdp->data);
2193 mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
2195 rxq->left_size = rx_desc->data_size - len;
2196 rx_desc->buf_phys_addr = 0;
2202 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2203 struct mvneta_rx_desc *rx_desc,
2204 struct mvneta_rx_queue *rxq,
2207 struct net_device *dev = pp->dev;
2208 enum dma_data_direction dma_dir;
2211 if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) {
2212 len = MVNETA_MAX_RX_BUF_SIZE;
2215 len = rxq->left_size;
2216 data_len = len - ETH_FCS_LEN;
2218 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2219 dma_sync_single_for_cpu(dev->dev.parent,
2220 rx_desc->buf_phys_addr,
2223 /* refill descriptor with new buffer later */
2224 skb_add_rx_frag(rxq->skb,
2225 skb_shinfo(rxq->skb)->nr_frags,
2226 page, pp->rx_offset_correction, data_len,
2229 page_pool_release_page(rxq->page_pool, page);
2230 rx_desc->buf_phys_addr = 0;
2231 rxq->left_size -= len;
2234 /* Main rx processing when using software buffer management */
2235 static int mvneta_rx_swbm(struct napi_struct *napi,
2236 struct mvneta_port *pp, int budget,
2237 struct mvneta_rx_queue *rxq)
2239 int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
2240 struct net_device *dev = pp->dev;
2241 struct bpf_prog *xdp_prog;
2242 struct xdp_buff xdp_buf;
2243 int rx_todo, refill;
2246 /* Get number of received packets */
2247 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2250 xdp_prog = READ_ONCE(pp->xdp_prog);
2251 xdp_buf.rxq = &rxq->xdp_rxq;
2253 /* Fairness NAPI loop */
2254 while (rx_proc < budget && rx_proc < rx_todo) {
2255 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2256 u32 rx_status, index;
2259 index = rx_desc - rxq->descs;
2260 page = (struct page *)rxq->buf_virt_addr[index];
2262 rx_status = rx_desc->status;
2266 if (rx_status & MVNETA_RXD_FIRST_DESC) {
2269 /* Check errors only for FIRST descriptor */
2270 if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
2271 mvneta_rx_error(pp, rx_desc);
2272 dev->stats.rx_errors++;
2273 /* leave the descriptor untouched */
2277 err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2278 xdp_prog, page, &xdp_ret);
2282 if (unlikely(!rxq->skb)) {
2283 pr_debug("no skb for rx_status 0x%x\n",
2287 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page);
2288 } /* Middle or Last descriptor */
2290 if (!(rx_status & MVNETA_RXD_LAST_DESC))
2291 /* no last descriptor this time */
2294 if (rxq->left_size) {
2295 pr_err("get last desc, but left_size (%d) != 0\n",
2297 dev_kfree_skb_any(rxq->skb);
2303 rcvd_bytes += rxq->skb->len;
2305 /* Linux processing */
2306 rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
2308 napi_gro_receive(napi, rxq->skb);
2310 /* clean uncomplete skb pointer in queue */
2315 if (xdp_ret & MVNETA_XDP_REDIR)
2319 mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
2321 /* return some buffers to hardware queue, one at a time is too slow */
2322 refill = mvneta_rx_refill_queue(pp, rxq);
2324 /* Update rxq management counters */
2325 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2330 /* Main rx processing when using hardware buffer management */
2331 static int mvneta_rx_hwbm(struct napi_struct *napi,
2332 struct mvneta_port *pp, int rx_todo,
2333 struct mvneta_rx_queue *rxq)
2335 struct net_device *dev = pp->dev;
2340 /* Get number of received packets */
2341 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2343 if (rx_todo > rx_done)
2348 /* Fairness NAPI loop */
2349 while (rx_done < rx_todo) {
2350 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2351 struct mvneta_bm_pool *bm_pool = NULL;
2352 struct sk_buff *skb;
2353 unsigned char *data;
2354 dma_addr_t phys_addr;
2355 u32 rx_status, frag_size;
2360 rx_status = rx_desc->status;
2361 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2362 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2363 phys_addr = rx_desc->buf_phys_addr;
2364 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2365 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2367 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2368 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2369 err_drop_frame_ret_pool:
2370 /* Return the buffer to the pool */
2371 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2372 rx_desc->buf_phys_addr);
2374 dev->stats.rx_errors++;
2375 mvneta_rx_error(pp, rx_desc);
2376 /* leave the descriptor untouched */
2380 if (rx_bytes <= rx_copybreak) {
2381 /* better copy a small frame and not unmap the DMA region */
2382 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2384 goto err_drop_frame_ret_pool;
2386 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2387 rx_desc->buf_phys_addr,
2388 MVNETA_MH_SIZE + NET_SKB_PAD,
2391 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2394 skb->protocol = eth_type_trans(skb, dev);
2395 mvneta_rx_csum(pp, rx_status, skb);
2396 napi_gro_receive(napi, skb);
2399 rcvd_bytes += rx_bytes;
2401 /* Return the buffer to the pool */
2402 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2403 rx_desc->buf_phys_addr);
2405 /* leave the descriptor and buffer untouched */
2409 /* Refill processing */
2410 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2412 netdev_err(dev, "Linux processing - Can't refill\n");
2414 goto err_drop_frame_ret_pool;
2417 frag_size = bm_pool->hwbm_pool.frag_size;
2419 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2421 /* After refill old buffer has to be unmapped regardless
2422 * the skb is successfully built or not.
2424 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2425 bm_pool->buf_size, DMA_FROM_DEVICE);
2427 goto err_drop_frame;
2430 rcvd_bytes += rx_bytes;
2432 /* Linux processing */
2433 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2434 skb_put(skb, rx_bytes);
2436 skb->protocol = eth_type_trans(skb, dev);
2438 mvneta_rx_csum(pp, rx_status, skb);
2440 napi_gro_receive(napi, skb);
2444 mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
2446 /* Update rxq management counters */
2447 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2453 mvneta_tso_put_hdr(struct sk_buff *skb,
2454 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2456 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2457 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2458 struct mvneta_tx_desc *tx_desc;
2460 tx_desc = mvneta_txq_next_desc_get(txq);
2461 tx_desc->data_size = hdr_len;
2462 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2463 tx_desc->command |= MVNETA_TXD_F_DESC;
2464 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2465 txq->txq_put_index * TSO_HEADER_SIZE;
2466 buf->type = MVNETA_TYPE_SKB;
2469 mvneta_txq_inc_put(txq);
2473 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2474 struct sk_buff *skb, char *data, int size,
2475 bool last_tcp, bool is_last)
2477 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2478 struct mvneta_tx_desc *tx_desc;
2480 tx_desc = mvneta_txq_next_desc_get(txq);
2481 tx_desc->data_size = size;
2482 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2483 size, DMA_TO_DEVICE);
2484 if (unlikely(dma_mapping_error(dev->dev.parent,
2485 tx_desc->buf_phys_addr))) {
2486 mvneta_txq_desc_put(txq);
2490 tx_desc->command = 0;
2491 buf->type = MVNETA_TYPE_SKB;
2495 /* last descriptor in the TCP packet */
2496 tx_desc->command = MVNETA_TXD_L_DESC;
2498 /* last descriptor in SKB */
2502 mvneta_txq_inc_put(txq);
2506 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2507 struct mvneta_tx_queue *txq)
2509 int total_len, data_left;
2511 struct mvneta_port *pp = netdev_priv(dev);
2513 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2516 /* Count needed descriptors */
2517 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2520 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2521 pr_info("*** Is this even possible???!?!?\n");
2525 /* Initialize the TSO handler, and prepare the first payload */
2526 tso_start(skb, &tso);
2528 total_len = skb->len - hdr_len;
2529 while (total_len > 0) {
2532 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2533 total_len -= data_left;
2536 /* prepare packet headers: MAC + IP + TCP */
2537 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2538 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2540 mvneta_tso_put_hdr(skb, pp, txq);
2542 while (data_left > 0) {
2546 size = min_t(int, tso.size, data_left);
2548 if (mvneta_tso_put_data(dev, txq, skb,
2555 tso_build_data(skb, &tso, size);
2562 /* Release all used data descriptors; header descriptors must not
2565 for (i = desc_count - 1; i >= 0; i--) {
2566 struct mvneta_tx_desc *tx_desc = txq->descs + i;
2567 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2568 dma_unmap_single(pp->dev->dev.parent,
2569 tx_desc->buf_phys_addr,
2572 mvneta_txq_desc_put(txq);
2577 /* Handle tx fragmentation processing */
2578 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2579 struct mvneta_tx_queue *txq)
2581 struct mvneta_tx_desc *tx_desc;
2582 int i, nr_frags = skb_shinfo(skb)->nr_frags;
2584 for (i = 0; i < nr_frags; i++) {
2585 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2586 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2587 void *addr = skb_frag_address(frag);
2589 tx_desc = mvneta_txq_next_desc_get(txq);
2590 tx_desc->data_size = skb_frag_size(frag);
2592 tx_desc->buf_phys_addr =
2593 dma_map_single(pp->dev->dev.parent, addr,
2594 tx_desc->data_size, DMA_TO_DEVICE);
2596 if (dma_mapping_error(pp->dev->dev.parent,
2597 tx_desc->buf_phys_addr)) {
2598 mvneta_txq_desc_put(txq);
2602 if (i == nr_frags - 1) {
2603 /* Last descriptor */
2604 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2607 /* Descriptor in the middle: Not First, Not Last */
2608 tx_desc->command = 0;
2611 buf->type = MVNETA_TYPE_SKB;
2612 mvneta_txq_inc_put(txq);
2618 /* Release all descriptors that were used to map fragments of
2619 * this packet, as well as the corresponding DMA mappings
2621 for (i = i - 1; i >= 0; i--) {
2622 tx_desc = txq->descs + i;
2623 dma_unmap_single(pp->dev->dev.parent,
2624 tx_desc->buf_phys_addr,
2627 mvneta_txq_desc_put(txq);
2633 /* Main tx processing */
2634 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2636 struct mvneta_port *pp = netdev_priv(dev);
2637 u16 txq_id = skb_get_queue_mapping(skb);
2638 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2639 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2640 struct mvneta_tx_desc *tx_desc;
2645 if (!netif_running(dev))
2648 if (skb_is_gso(skb)) {
2649 frags = mvneta_tx_tso(skb, dev, txq);
2653 frags = skb_shinfo(skb)->nr_frags + 1;
2655 /* Get a descriptor for the first part of the packet */
2656 tx_desc = mvneta_txq_next_desc_get(txq);
2658 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2660 tx_desc->data_size = skb_headlen(skb);
2662 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2665 if (unlikely(dma_mapping_error(dev->dev.parent,
2666 tx_desc->buf_phys_addr))) {
2667 mvneta_txq_desc_put(txq);
2672 buf->type = MVNETA_TYPE_SKB;
2674 /* First and Last descriptor */
2675 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2676 tx_desc->command = tx_cmd;
2678 mvneta_txq_inc_put(txq);
2680 /* First but not Last */
2681 tx_cmd |= MVNETA_TXD_F_DESC;
2683 mvneta_txq_inc_put(txq);
2684 tx_desc->command = tx_cmd;
2685 /* Continue with other skb fragments */
2686 if (mvneta_tx_frag_process(pp, skb, txq)) {
2687 dma_unmap_single(dev->dev.parent,
2688 tx_desc->buf_phys_addr,
2691 mvneta_txq_desc_put(txq);
2699 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2701 netdev_tx_sent_queue(nq, len);
2703 txq->count += frags;
2704 if (txq->count >= txq->tx_stop_threshold)
2705 netif_tx_stop_queue(nq);
2707 if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2708 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2709 mvneta_txq_pend_desc_add(pp, txq, frags);
2711 txq->pending += frags;
2713 mvneta_update_stats(pp, 1, len, true);
2715 dev->stats.tx_dropped++;
2716 dev_kfree_skb_any(skb);
2719 return NETDEV_TX_OK;
2723 /* Free tx resources, when resetting a port */
2724 static void mvneta_txq_done_force(struct mvneta_port *pp,
2725 struct mvneta_tx_queue *txq)
2728 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2729 int tx_done = txq->count;
2731 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2735 txq->txq_put_index = 0;
2736 txq->txq_get_index = 0;
2739 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2740 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2742 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2744 struct mvneta_tx_queue *txq;
2745 struct netdev_queue *nq;
2746 int cpu = smp_processor_id();
2748 while (cause_tx_done) {
2749 txq = mvneta_tx_done_policy(pp, cause_tx_done);
2751 nq = netdev_get_tx_queue(pp->dev, txq->id);
2752 __netif_tx_lock(nq, cpu);
2755 mvneta_txq_done(pp, txq);
2757 __netif_tx_unlock(nq);
2758 cause_tx_done &= ~((1 << txq->id));
2762 /* Compute crc8 of the specified address, using a unique algorithm ,
2763 * according to hw spec, different than generic crc8 algorithm
2765 static int mvneta_addr_crc(unsigned char *addr)
2770 for (i = 0; i < ETH_ALEN; i++) {
2773 crc = (crc ^ addr[i]) << 8;
2774 for (j = 7; j >= 0; j--) {
2775 if (crc & (0x100 << j))
2783 /* This method controls the net device special MAC multicast support.
2784 * The Special Multicast Table for MAC addresses supports MAC of the form
2785 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2786 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2787 * Table entries in the DA-Filter table. This method set the Special
2788 * Multicast Table appropriate entry.
2790 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2791 unsigned char last_byte,
2794 unsigned int smc_table_reg;
2795 unsigned int tbl_offset;
2796 unsigned int reg_offset;
2798 /* Register offset from SMC table base */
2799 tbl_offset = (last_byte / 4);
2800 /* Entry offset within the above reg */
2801 reg_offset = last_byte % 4;
2803 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2807 smc_table_reg &= ~(0xff << (8 * reg_offset));
2809 smc_table_reg &= ~(0xff << (8 * reg_offset));
2810 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2813 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2817 /* This method controls the network device Other MAC multicast support.
2818 * The Other Multicast Table is used for multicast of another type.
2819 * A CRC-8 is used as an index to the Other Multicast Table entries
2820 * in the DA-Filter table.
2821 * The method gets the CRC-8 value from the calling routine and
2822 * sets the Other Multicast Table appropriate entry according to the
2825 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2829 unsigned int omc_table_reg;
2830 unsigned int tbl_offset;
2831 unsigned int reg_offset;
2833 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2834 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2836 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2839 /* Clear accepts frame bit at specified Other DA table entry */
2840 omc_table_reg &= ~(0xff << (8 * reg_offset));
2842 omc_table_reg &= ~(0xff << (8 * reg_offset));
2843 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2846 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2849 /* The network device supports multicast using two tables:
2850 * 1) Special Multicast Table for MAC addresses of the form
2851 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2852 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2853 * Table entries in the DA-Filter table.
2854 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2855 * is used as an index to the Other Multicast Table entries in the
2858 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2861 unsigned char crc_result = 0;
2863 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2864 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2868 crc_result = mvneta_addr_crc(p_addr);
2870 if (pp->mcast_count[crc_result] == 0) {
2871 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2876 pp->mcast_count[crc_result]--;
2877 if (pp->mcast_count[crc_result] != 0) {
2878 netdev_info(pp->dev,
2879 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2880 pp->mcast_count[crc_result], crc_result);
2884 pp->mcast_count[crc_result]++;
2886 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2891 /* Configure Fitering mode of Ethernet port */
2892 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2895 u32 port_cfg_reg, val;
2897 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2899 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2901 /* Set / Clear UPM bit in port configuration register */
2903 /* Accept all Unicast addresses */
2904 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2905 val |= MVNETA_FORCE_UNI;
2906 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2907 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2909 /* Reject all Unicast addresses */
2910 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2911 val &= ~MVNETA_FORCE_UNI;
2914 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2915 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2918 /* register unicast and multicast addresses */
2919 static void mvneta_set_rx_mode(struct net_device *dev)
2921 struct mvneta_port *pp = netdev_priv(dev);
2922 struct netdev_hw_addr *ha;
2924 if (dev->flags & IFF_PROMISC) {
2925 /* Accept all: Multicast + Unicast */
2926 mvneta_rx_unicast_promisc_set(pp, 1);
2927 mvneta_set_ucast_table(pp, pp->rxq_def);
2928 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2929 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2931 /* Accept single Unicast */
2932 mvneta_rx_unicast_promisc_set(pp, 0);
2933 mvneta_set_ucast_table(pp, -1);
2934 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2936 if (dev->flags & IFF_ALLMULTI) {
2937 /* Accept all multicast */
2938 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2939 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2941 /* Accept only initialized multicast */
2942 mvneta_set_special_mcast_table(pp, -1);
2943 mvneta_set_other_mcast_table(pp, -1);
2945 if (!netdev_mc_empty(dev)) {
2946 netdev_for_each_mc_addr(ha, dev) {
2947 mvneta_mcast_addr_set(pp, ha->addr,
2955 /* Interrupt handling - the callback for request_irq() */
2956 static irqreturn_t mvneta_isr(int irq, void *dev_id)
2958 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2960 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2961 napi_schedule(&pp->napi);
2966 /* Interrupt handling - the callback for request_percpu_irq() */
2967 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2969 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2971 disable_percpu_irq(port->pp->dev->irq);
2972 napi_schedule(&port->napi);
2977 static void mvneta_link_change(struct mvneta_port *pp)
2979 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2981 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
2985 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2986 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2987 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2988 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2989 * Each CPU has its own causeRxTx register
2991 static int mvneta_poll(struct napi_struct *napi, int budget)
2996 struct mvneta_port *pp = netdev_priv(napi->dev);
2997 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2999 if (!netif_running(pp->dev)) {
3000 napi_complete(napi);
3004 /* Read cause register */
3005 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
3006 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
3007 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
3009 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
3011 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
3012 MVNETA_CAUSE_LINK_CHANGE))
3013 mvneta_link_change(pp);
3016 /* Release Tx descriptors */
3017 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
3018 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3019 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
3022 /* For the case where the last mvneta_poll did not process all
3025 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
3027 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
3031 rx_queue = rx_queue - 1;
3033 rx_done = mvneta_rx_hwbm(napi, pp, budget,
3034 &pp->rxqs[rx_queue]);
3036 rx_done = mvneta_rx_swbm(napi, pp, budget,
3037 &pp->rxqs[rx_queue]);
3040 if (rx_done < budget) {
3042 napi_complete_done(napi, rx_done);
3044 if (pp->neta_armada3700) {
3045 unsigned long flags;
3047 local_irq_save(flags);
3048 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
3049 MVNETA_RX_INTR_MASK(rxq_number) |
3050 MVNETA_TX_INTR_MASK(txq_number) |
3051 MVNETA_MISCINTR_INTR_MASK);
3052 local_irq_restore(flags);
3054 enable_percpu_irq(pp->dev->irq, 0);
3058 if (pp->neta_armada3700)
3059 pp->cause_rx_tx = cause_rx_tx;
3061 port->cause_rx_tx = cause_rx_tx;
3066 static int mvneta_create_page_pool(struct mvneta_port *pp,
3067 struct mvneta_rx_queue *rxq, int size)
3069 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3070 struct page_pool_params pp_params = {
3072 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
3074 .nid = cpu_to_node(0),
3075 .dev = pp->dev->dev.parent,
3076 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
3077 .offset = pp->rx_offset_correction,
3078 .max_len = MVNETA_MAX_RX_BUF_SIZE,
3082 rxq->page_pool = page_pool_create(&pp_params);
3083 if (IS_ERR(rxq->page_pool)) {
3084 err = PTR_ERR(rxq->page_pool);
3085 rxq->page_pool = NULL;
3089 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
3093 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
3096 goto err_unregister_rxq;
3101 xdp_rxq_info_unreg(&rxq->xdp_rxq);
3103 page_pool_destroy(rxq->page_pool);
3104 rxq->page_pool = NULL;
3108 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
3109 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3114 err = mvneta_create_page_pool(pp, rxq, num);
3118 for (i = 0; i < num; i++) {
3119 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
3120 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3123 "%s:rxq %d, %d of %d buffs filled\n",
3124 __func__, rxq->id, i, num);
3129 /* Add this number of RX descriptors as non occupied (ready to
3132 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3137 /* Free all packets pending transmit from all TXQs and reset TX port */
3138 static void mvneta_tx_reset(struct mvneta_port *pp)
3142 /* free the skb's in the tx ring */
3143 for (queue = 0; queue < txq_number; queue++)
3144 mvneta_txq_done_force(pp, &pp->txqs[queue]);
3146 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
3147 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
3150 static void mvneta_rx_reset(struct mvneta_port *pp)
3152 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
3153 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
3156 /* Rx/Tx queue initialization/cleanup methods */
3158 static int mvneta_rxq_sw_init(struct mvneta_port *pp,
3159 struct mvneta_rx_queue *rxq)
3161 rxq->size = pp->rx_ring_size;
3163 /* Allocate memory for RX descriptors */
3164 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3165 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3166 &rxq->descs_phys, GFP_KERNEL);
3170 rxq->last_desc = rxq->size - 1;
3175 static void mvneta_rxq_hw_init(struct mvneta_port *pp,
3176 struct mvneta_rx_queue *rxq)
3178 /* Set Rx descriptors queue starting address */
3179 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3180 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3182 /* Set coalescing pkts and time */
3183 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3184 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3188 mvneta_rxq_offset_set(pp, rxq, 0);
3189 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3190 MVNETA_MAX_RX_BUF_SIZE :
3191 MVNETA_RX_BUF_SIZE(pp->pkt_size));
3192 mvneta_rxq_bm_disable(pp, rxq);
3193 mvneta_rxq_fill(pp, rxq, rxq->size);
3196 mvneta_rxq_offset_set(pp, rxq,
3197 NET_SKB_PAD - pp->rx_offset_correction);
3199 mvneta_rxq_bm_enable(pp, rxq);
3200 /* Fill RXQ with buffers from RX pool */
3201 mvneta_rxq_long_pool_set(pp, rxq);
3202 mvneta_rxq_short_pool_set(pp, rxq);
3203 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3207 /* Create a specified RX queue */
3208 static int mvneta_rxq_init(struct mvneta_port *pp,
3209 struct mvneta_rx_queue *rxq)
3214 ret = mvneta_rxq_sw_init(pp, rxq);
3218 mvneta_rxq_hw_init(pp, rxq);
3223 /* Cleanup Rx queue */
3224 static void mvneta_rxq_deinit(struct mvneta_port *pp,
3225 struct mvneta_rx_queue *rxq)
3227 mvneta_rxq_drop_pkts(pp, rxq);
3230 dev_kfree_skb_any(rxq->skb);
3233 dma_free_coherent(pp->dev->dev.parent,
3234 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3240 rxq->next_desc_to_proc = 0;
3241 rxq->descs_phys = 0;
3242 rxq->first_to_refill = 0;
3243 rxq->refill_num = 0;
3248 static int mvneta_txq_sw_init(struct mvneta_port *pp,
3249 struct mvneta_tx_queue *txq)
3253 txq->size = pp->tx_ring_size;
3255 /* A queue must always have room for at least one skb.
3256 * Therefore, stop the queue when the free entries reaches
3257 * the maximum number of descriptors per skb.
3259 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
3260 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
3262 /* Allocate memory for TX descriptors */
3263 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3264 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3265 &txq->descs_phys, GFP_KERNEL);
3269 txq->last_desc = txq->size - 1;
3271 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
3273 dma_free_coherent(pp->dev->dev.parent,
3274 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3275 txq->descs, txq->descs_phys);
3279 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3280 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
3281 txq->size * TSO_HEADER_SIZE,
3282 &txq->tso_hdrs_phys, GFP_KERNEL);
3283 if (!txq->tso_hdrs) {
3285 dma_free_coherent(pp->dev->dev.parent,
3286 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3287 txq->descs, txq->descs_phys);
3291 /* Setup XPS mapping */
3293 cpu = txq->id % num_present_cpus();
3295 cpu = pp->rxq_def % num_present_cpus();
3296 cpumask_set_cpu(cpu, &txq->affinity_mask);
3297 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3302 static void mvneta_txq_hw_init(struct mvneta_port *pp,
3303 struct mvneta_tx_queue *txq)
3305 /* Set maximum bandwidth for enabled TXQs */
3306 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3307 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3309 /* Set Tx descriptors queue starting address */
3310 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3311 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3313 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3316 /* Create and initialize a tx queue */
3317 static int mvneta_txq_init(struct mvneta_port *pp,
3318 struct mvneta_tx_queue *txq)
3322 ret = mvneta_txq_sw_init(pp, txq);
3326 mvneta_txq_hw_init(pp, txq);
3331 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3332 static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3333 struct mvneta_tx_queue *txq)
3335 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3340 dma_free_coherent(pp->dev->dev.parent,
3341 txq->size * TSO_HEADER_SIZE,
3342 txq->tso_hdrs, txq->tso_hdrs_phys);
3344 dma_free_coherent(pp->dev->dev.parent,
3345 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3346 txq->descs, txq->descs_phys);
3348 netdev_tx_reset_queue(nq);
3352 txq->next_desc_to_proc = 0;
3353 txq->descs_phys = 0;
3356 static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3357 struct mvneta_tx_queue *txq)
3359 /* Set minimum bandwidth for disabled TXQs */
3360 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3361 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3363 /* Set Tx descriptors queue starting address and size */
3364 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3365 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3368 static void mvneta_txq_deinit(struct mvneta_port *pp,
3369 struct mvneta_tx_queue *txq)
3371 mvneta_txq_sw_deinit(pp, txq);
3372 mvneta_txq_hw_deinit(pp, txq);
3375 /* Cleanup all Tx queues */
3376 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3380 for (queue = 0; queue < txq_number; queue++)
3381 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3384 /* Cleanup all Rx queues */
3385 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3389 for (queue = 0; queue < rxq_number; queue++)
3390 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3394 /* Init all Rx queues */
3395 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3399 for (queue = 0; queue < rxq_number; queue++) {
3400 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3403 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3405 mvneta_cleanup_rxqs(pp);
3413 /* Init all tx queues */
3414 static int mvneta_setup_txqs(struct mvneta_port *pp)
3418 for (queue = 0; queue < txq_number; queue++) {
3419 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3421 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3423 mvneta_cleanup_txqs(pp);
3431 static int mvneta_comphy_init(struct mvneta_port *pp)
3438 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
3443 return phy_power_on(pp->comphy);
3446 static void mvneta_start_dev(struct mvneta_port *pp)
3450 WARN_ON(mvneta_comphy_init(pp));
3452 mvneta_max_rx_size_set(pp, pp->pkt_size);
3453 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3455 /* start the Rx/Tx activity */
3456 mvneta_port_enable(pp);
3458 if (!pp->neta_armada3700) {
3459 /* Enable polling on the port */
3460 for_each_online_cpu(cpu) {
3461 struct mvneta_pcpu_port *port =
3462 per_cpu_ptr(pp->ports, cpu);
3464 napi_enable(&port->napi);
3467 napi_enable(&pp->napi);
3470 /* Unmask interrupts. It has to be done from each CPU */
3471 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3473 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3474 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3475 MVNETA_CAUSE_LINK_CHANGE);
3477 phylink_start(pp->phylink);
3478 netif_tx_start_all_queues(pp->dev);
3481 static void mvneta_stop_dev(struct mvneta_port *pp)
3485 phylink_stop(pp->phylink);
3487 if (!pp->neta_armada3700) {
3488 for_each_online_cpu(cpu) {
3489 struct mvneta_pcpu_port *port =
3490 per_cpu_ptr(pp->ports, cpu);
3492 napi_disable(&port->napi);
3495 napi_disable(&pp->napi);
3498 netif_carrier_off(pp->dev);
3500 mvneta_port_down(pp);
3501 netif_tx_stop_all_queues(pp->dev);
3503 /* Stop the port activity */
3504 mvneta_port_disable(pp);
3506 /* Clear all ethernet port interrupts */
3507 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3509 /* Mask all ethernet port interrupts */
3510 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3512 mvneta_tx_reset(pp);
3513 mvneta_rx_reset(pp);
3515 WARN_ON(phy_power_off(pp->comphy));
3518 static void mvneta_percpu_enable(void *arg)
3520 struct mvneta_port *pp = arg;
3522 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3525 static void mvneta_percpu_disable(void *arg)
3527 struct mvneta_port *pp = arg;
3529 disable_percpu_irq(pp->dev->irq);
3532 /* Change the device mtu */
3533 static int mvneta_change_mtu(struct net_device *dev, int mtu)
3535 struct mvneta_port *pp = netdev_priv(dev);
3538 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3539 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3540 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3541 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3544 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
3545 netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
3551 if (!netif_running(dev)) {
3553 mvneta_bm_update_mtu(pp, mtu);
3555 netdev_update_features(dev);
3559 /* The interface is running, so we have to force a
3560 * reallocation of the queues
3562 mvneta_stop_dev(pp);
3563 on_each_cpu(mvneta_percpu_disable, pp, true);
3565 mvneta_cleanup_txqs(pp);
3566 mvneta_cleanup_rxqs(pp);
3569 mvneta_bm_update_mtu(pp, mtu);
3571 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3573 ret = mvneta_setup_rxqs(pp);
3575 netdev_err(dev, "unable to setup rxqs after MTU change\n");
3579 ret = mvneta_setup_txqs(pp);
3581 netdev_err(dev, "unable to setup txqs after MTU change\n");
3585 on_each_cpu(mvneta_percpu_enable, pp, true);
3586 mvneta_start_dev(pp);
3588 netdev_update_features(dev);
3593 static netdev_features_t mvneta_fix_features(struct net_device *dev,
3594 netdev_features_t features)
3596 struct mvneta_port *pp = netdev_priv(dev);
3598 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3599 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3601 "Disable IP checksum for MTU greater than %dB\n",
3608 /* Get mac address */
3609 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3611 u32 mac_addr_l, mac_addr_h;
3613 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3614 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3615 addr[0] = (mac_addr_h >> 24) & 0xFF;
3616 addr[1] = (mac_addr_h >> 16) & 0xFF;
3617 addr[2] = (mac_addr_h >> 8) & 0xFF;
3618 addr[3] = mac_addr_h & 0xFF;
3619 addr[4] = (mac_addr_l >> 8) & 0xFF;
3620 addr[5] = mac_addr_l & 0xFF;
3623 /* Handle setting mac address */
3624 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3626 struct mvneta_port *pp = netdev_priv(dev);
3627 struct sockaddr *sockaddr = addr;
3630 ret = eth_prepare_mac_addr_change(dev, addr);
3633 /* Remove previous address table entry */
3634 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3636 /* Set new addr in hw */
3637 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3639 eth_commit_mac_addr_change(dev, addr);
3643 static void mvneta_validate(struct phylink_config *config,
3644 unsigned long *supported,
3645 struct phylink_link_state *state)
3647 struct net_device *ndev = to_net_dev(config->dev);
3648 struct mvneta_port *pp = netdev_priv(ndev);
3649 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
3651 /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
3652 if (state->interface != PHY_INTERFACE_MODE_NA &&
3653 state->interface != PHY_INTERFACE_MODE_QSGMII &&
3654 state->interface != PHY_INTERFACE_MODE_SGMII &&
3655 !phy_interface_mode_is_8023z(state->interface) &&
3656 !phy_interface_mode_is_rgmii(state->interface)) {
3657 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3661 /* Allow all the expected bits */
3662 phylink_set(mask, Autoneg);
3663 phylink_set_port_modes(mask);
3665 /* Asymmetric pause is unsupported */
3666 phylink_set(mask, Pause);
3668 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3669 if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
3670 phylink_set(mask, 1000baseT_Full);
3671 phylink_set(mask, 1000baseX_Full);
3673 if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
3674 phylink_set(mask, 2500baseT_Full);
3675 phylink_set(mask, 2500baseX_Full);
3678 if (!phy_interface_mode_is_8023z(state->interface)) {
3679 /* 10M and 100M are only supported in non-802.3z mode */
3680 phylink_set(mask, 10baseT_Half);
3681 phylink_set(mask, 10baseT_Full);
3682 phylink_set(mask, 100baseT_Half);
3683 phylink_set(mask, 100baseT_Full);
3686 bitmap_and(supported, supported, mask,
3687 __ETHTOOL_LINK_MODE_MASK_NBITS);
3688 bitmap_and(state->advertising, state->advertising, mask,
3689 __ETHTOOL_LINK_MODE_MASK_NBITS);
3691 /* We can only operate at 2500BaseX or 1000BaseX. If requested
3692 * to advertise both, only report advertising at 2500BaseX.
3694 phylink_helper_basex_speed(state);
3697 static void mvneta_mac_pcs_get_state(struct phylink_config *config,
3698 struct phylink_link_state *state)
3700 struct net_device *ndev = to_net_dev(config->dev);
3701 struct mvneta_port *pp = netdev_priv(ndev);
3704 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3706 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3708 state->interface == PHY_INTERFACE_MODE_2500BASEX ?
3709 SPEED_2500 : SPEED_1000;
3710 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3711 state->speed = SPEED_100;
3713 state->speed = SPEED_10;
3715 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3716 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3717 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3720 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
3721 state->pause |= MLO_PAUSE_RX;
3722 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
3723 state->pause |= MLO_PAUSE_TX;
3726 static void mvneta_mac_an_restart(struct phylink_config *config)
3728 struct net_device *ndev = to_net_dev(config->dev);
3729 struct mvneta_port *pp = netdev_priv(ndev);
3730 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3732 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3733 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
3734 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3735 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
3738 static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
3739 const struct phylink_link_state *state)
3741 struct net_device *ndev = to_net_dev(config->dev);
3742 struct mvneta_port *pp = netdev_priv(ndev);
3743 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
3744 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3745 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
3746 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3747 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3749 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
3750 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
3751 MVNETA_GMAC2_PORT_RESET);
3752 new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
3753 new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
3754 new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
3755 MVNETA_GMAC_INBAND_RESTART_AN |
3756 MVNETA_GMAC_CONFIG_MII_SPEED |
3757 MVNETA_GMAC_CONFIG_GMII_SPEED |
3758 MVNETA_GMAC_AN_SPEED_EN |
3759 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
3760 MVNETA_GMAC_CONFIG_FLOW_CTRL |
3761 MVNETA_GMAC_AN_FLOW_CTRL_EN |
3762 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
3763 MVNETA_GMAC_AN_DUPLEX_EN);
3765 /* Even though it might look weird, when we're configured in
3766 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3768 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
3770 if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
3771 state->interface == PHY_INTERFACE_MODE_SGMII ||
3772 phy_interface_mode_is_8023z(state->interface))
3773 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
3775 if (phylink_test(state->advertising, Pause))
3776 new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
3777 if (state->pause & MLO_PAUSE_TXRX_MASK)
3778 new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
3780 if (!phylink_autoneg_inband(mode)) {
3781 /* Phy or fixed speed */
3783 new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3785 if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
3786 new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3787 else if (state->speed == SPEED_100)
3788 new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
3789 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
3790 /* SGMII mode receives the state from the PHY */
3791 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3792 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3793 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3794 MVNETA_GMAC_FORCE_LINK_PASS)) |
3795 MVNETA_GMAC_INBAND_AN_ENABLE |
3796 MVNETA_GMAC_AN_SPEED_EN |
3797 MVNETA_GMAC_AN_DUPLEX_EN;
3799 /* 802.3z negotiation - only 1000base-X */
3800 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
3801 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3802 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3803 MVNETA_GMAC_FORCE_LINK_PASS)) |
3804 MVNETA_GMAC_INBAND_AN_ENABLE |
3805 MVNETA_GMAC_CONFIG_GMII_SPEED |
3806 /* The MAC only supports FD mode */
3807 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3809 if (state->pause & MLO_PAUSE_AN && state->an_enabled)
3810 new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
3813 /* Armada 370 documentation says we can only change the port mode
3814 * and in-band enable when the link is down, so force it down
3815 * while making these changes. We also do this for GMAC_CTRL2 */
3816 if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
3817 (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
3818 (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
3819 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3820 (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
3821 MVNETA_GMAC_FORCE_LINK_DOWN);
3825 /* When at 2.5G, the link partner can send frames with shortened
3828 if (state->speed == SPEED_2500)
3829 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
3831 if (pp->comphy && pp->phy_interface != state->interface &&
3832 (state->interface == PHY_INTERFACE_MODE_SGMII ||
3833 state->interface == PHY_INTERFACE_MODE_1000BASEX ||
3834 state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
3835 pp->phy_interface = state->interface;
3837 WARN_ON(phy_power_off(pp->comphy));
3838 WARN_ON(mvneta_comphy_init(pp));
3841 if (new_ctrl0 != gmac_ctrl0)
3842 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
3843 if (new_ctrl2 != gmac_ctrl2)
3844 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
3845 if (new_ctrl4 != gmac_ctrl4)
3846 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
3847 if (new_clk != gmac_clk)
3848 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
3849 if (new_an != gmac_an)
3850 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
3852 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
3853 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3854 MVNETA_GMAC2_PORT_RESET) != 0)
3859 static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
3863 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
3865 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
3867 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
3868 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
3871 static void mvneta_mac_link_down(struct phylink_config *config,
3872 unsigned int mode, phy_interface_t interface)
3874 struct net_device *ndev = to_net_dev(config->dev);
3875 struct mvneta_port *pp = netdev_priv(ndev);
3878 mvneta_port_down(pp);
3880 if (!phylink_autoneg_inband(mode)) {
3881 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3882 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3883 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3884 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3887 pp->eee_active = false;
3888 mvneta_set_eee(pp, false);
3891 static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
3892 phy_interface_t interface,
3893 struct phy_device *phy)
3895 struct net_device *ndev = to_net_dev(config->dev);
3896 struct mvneta_port *pp = netdev_priv(ndev);
3899 if (!phylink_autoneg_inband(mode)) {
3900 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3901 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3902 val |= MVNETA_GMAC_FORCE_LINK_PASS;
3903 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3908 if (phy && pp->eee_enabled) {
3909 pp->eee_active = phy_init_eee(phy, 0) >= 0;
3910 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
3914 static const struct phylink_mac_ops mvneta_phylink_ops = {
3915 .validate = mvneta_validate,
3916 .mac_pcs_get_state = mvneta_mac_pcs_get_state,
3917 .mac_an_restart = mvneta_mac_an_restart,
3918 .mac_config = mvneta_mac_config,
3919 .mac_link_down = mvneta_mac_link_down,
3920 .mac_link_up = mvneta_mac_link_up,
3923 static int mvneta_mdio_probe(struct mvneta_port *pp)
3925 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3926 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
3929 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
3931 phylink_ethtool_get_wol(pp->phylink, &wol);
3932 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
3937 static void mvneta_mdio_remove(struct mvneta_port *pp)
3939 phylink_disconnect_phy(pp->phylink);
3942 /* Electing a CPU must be done in an atomic way: it should be done
3943 * after or before the removal/insertion of a CPU and this function is
3946 static void mvneta_percpu_elect(struct mvneta_port *pp)
3948 int elected_cpu = 0, max_cpu, cpu, i = 0;
3950 /* Use the cpu associated to the rxq when it is online, in all
3951 * the other cases, use the cpu 0 which can't be offline.
3953 if (cpu_online(pp->rxq_def))
3954 elected_cpu = pp->rxq_def;
3956 max_cpu = num_present_cpus();
3958 for_each_online_cpu(cpu) {
3959 int rxq_map = 0, txq_map = 0;
3962 for (rxq = 0; rxq < rxq_number; rxq++)
3963 if ((rxq % max_cpu) == cpu)
3964 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3966 if (cpu == elected_cpu)
3967 /* Map the default receive queue queue to the
3970 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3972 /* We update the TX queue map only if we have one
3973 * queue. In this case we associate the TX queue to
3974 * the CPU bound to the default RX queue
3976 if (txq_number == 1)
3977 txq_map = (cpu == elected_cpu) ?
3978 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3980 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3981 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3983 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3985 /* Update the interrupt mask on each CPU according the
3988 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3995 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3998 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4000 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4003 spin_lock(&pp->lock);
4005 * Configuring the driver for a new CPU while the driver is
4006 * stopping is racy, so just avoid it.
4008 if (pp->is_stopped) {
4009 spin_unlock(&pp->lock);
4012 netif_tx_stop_all_queues(pp->dev);
4015 * We have to synchronise on tha napi of each CPU except the one
4016 * just being woken up
4018 for_each_online_cpu(other_cpu) {
4019 if (other_cpu != cpu) {
4020 struct mvneta_pcpu_port *other_port =
4021 per_cpu_ptr(pp->ports, other_cpu);
4023 napi_synchronize(&other_port->napi);
4027 /* Mask all ethernet port interrupts */
4028 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4029 napi_enable(&port->napi);
4032 * Enable per-CPU interrupts on the CPU that is
4035 mvneta_percpu_enable(pp);
4038 * Enable per-CPU interrupt on the one CPU we care
4041 mvneta_percpu_elect(pp);
4043 /* Unmask all ethernet port interrupts */
4044 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4045 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4046 MVNETA_CAUSE_PHY_STATUS_CHANGE |
4047 MVNETA_CAUSE_LINK_CHANGE);
4048 netif_tx_start_all_queues(pp->dev);
4049 spin_unlock(&pp->lock);
4053 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
4055 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4057 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4060 * Thanks to this lock we are sure that any pending cpu election is
4063 spin_lock(&pp->lock);
4064 /* Mask all ethernet port interrupts */
4065 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4066 spin_unlock(&pp->lock);
4068 napi_synchronize(&port->napi);
4069 napi_disable(&port->napi);
4070 /* Disable per-CPU interrupts on the CPU that is brought down. */
4071 mvneta_percpu_disable(pp);
4075 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
4077 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4080 /* Check if a new CPU must be elected now this on is down */
4081 spin_lock(&pp->lock);
4082 mvneta_percpu_elect(pp);
4083 spin_unlock(&pp->lock);
4084 /* Unmask all ethernet port interrupts */
4085 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4086 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4087 MVNETA_CAUSE_PHY_STATUS_CHANGE |
4088 MVNETA_CAUSE_LINK_CHANGE);
4089 netif_tx_start_all_queues(pp->dev);
4093 static int mvneta_open(struct net_device *dev)
4095 struct mvneta_port *pp = netdev_priv(dev);
4098 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4100 ret = mvneta_setup_rxqs(pp);
4104 ret = mvneta_setup_txqs(pp);
4106 goto err_cleanup_rxqs;
4108 /* Connect to port interrupt line */
4109 if (pp->neta_armada3700)
4110 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4113 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4114 dev->name, pp->ports);
4116 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4117 goto err_cleanup_txqs;
4120 if (!pp->neta_armada3700) {
4121 /* Enable per-CPU interrupt on all the CPU to handle our RX
4124 on_each_cpu(mvneta_percpu_enable, pp, true);
4126 pp->is_stopped = false;
4127 /* Register a CPU notifier to handle the case where our CPU
4128 * might be taken offline.
4130 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
4135 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4138 goto err_free_online_hp;
4141 ret = mvneta_mdio_probe(pp);
4143 netdev_err(dev, "cannot probe MDIO bus\n");
4144 goto err_free_dead_hp;
4147 mvneta_start_dev(pp);
4152 if (!pp->neta_armada3700)
4153 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4156 if (!pp->neta_armada3700)
4157 cpuhp_state_remove_instance_nocalls(online_hpstate,
4160 if (pp->neta_armada3700) {
4161 free_irq(pp->dev->irq, pp);
4163 on_each_cpu(mvneta_percpu_disable, pp, true);
4164 free_percpu_irq(pp->dev->irq, pp->ports);
4167 mvneta_cleanup_txqs(pp);
4169 mvneta_cleanup_rxqs(pp);
4173 /* Stop the port, free port interrupt line */
4174 static int mvneta_stop(struct net_device *dev)
4176 struct mvneta_port *pp = netdev_priv(dev);
4178 if (!pp->neta_armada3700) {
4179 /* Inform that we are stopping so we don't want to setup the
4180 * driver for new CPUs in the notifiers. The code of the
4181 * notifier for CPU online is protected by the same spinlock,
4182 * so when we get the lock, the notifer work is done.
4184 spin_lock(&pp->lock);
4185 pp->is_stopped = true;
4186 spin_unlock(&pp->lock);
4188 mvneta_stop_dev(pp);
4189 mvneta_mdio_remove(pp);
4191 cpuhp_state_remove_instance_nocalls(online_hpstate,
4193 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4195 on_each_cpu(mvneta_percpu_disable, pp, true);
4196 free_percpu_irq(dev->irq, pp->ports);
4198 mvneta_stop_dev(pp);
4199 mvneta_mdio_remove(pp);
4200 free_irq(dev->irq, pp);
4203 mvneta_cleanup_rxqs(pp);
4204 mvneta_cleanup_txqs(pp);
4209 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4211 struct mvneta_port *pp = netdev_priv(dev);
4213 return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4216 static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
4217 struct netlink_ext_ack *extack)
4219 bool need_update, running = netif_running(dev);
4220 struct mvneta_port *pp = netdev_priv(dev);
4221 struct bpf_prog *old_prog;
4223 if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
4224 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
4228 need_update = !!pp->xdp_prog != !!prog;
4229 if (running && need_update)
4232 old_prog = xchg(&pp->xdp_prog, prog);
4234 bpf_prog_put(old_prog);
4236 if (running && need_update)
4237 return mvneta_open(dev);
4242 static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4244 struct mvneta_port *pp = netdev_priv(dev);
4246 switch (xdp->command) {
4247 case XDP_SETUP_PROG:
4248 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
4249 case XDP_QUERY_PROG:
4250 xdp->prog_id = pp->xdp_prog ? pp->xdp_prog->aux->id : 0;
4257 /* Ethtool methods */
4259 /* Set link ksettings (phy address, speed) for ethtools */
4261 mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
4262 const struct ethtool_link_ksettings *cmd)
4264 struct mvneta_port *pp = netdev_priv(ndev);
4266 return phylink_ethtool_ksettings_set(pp->phylink, cmd);
4269 /* Get link ksettings for ethtools */
4271 mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
4272 struct ethtool_link_ksettings *cmd)
4274 struct mvneta_port *pp = netdev_priv(ndev);
4276 return phylink_ethtool_ksettings_get(pp->phylink, cmd);
4279 static int mvneta_ethtool_nway_reset(struct net_device *dev)
4281 struct mvneta_port *pp = netdev_priv(dev);
4283 return phylink_ethtool_nway_reset(pp->phylink);
4286 /* Set interrupt coalescing for ethtools */
4287 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
4288 struct ethtool_coalesce *c)
4290 struct mvneta_port *pp = netdev_priv(dev);
4293 for (queue = 0; queue < rxq_number; queue++) {
4294 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4295 rxq->time_coal = c->rx_coalesce_usecs;
4296 rxq->pkts_coal = c->rx_max_coalesced_frames;
4297 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4298 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4301 for (queue = 0; queue < txq_number; queue++) {
4302 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4303 txq->done_pkts_coal = c->tx_max_coalesced_frames;
4304 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
4310 /* get coalescing for ethtools */
4311 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
4312 struct ethtool_coalesce *c)
4314 struct mvneta_port *pp = netdev_priv(dev);
4316 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
4317 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
4319 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
4324 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4325 struct ethtool_drvinfo *drvinfo)
4327 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
4328 sizeof(drvinfo->driver));
4329 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
4330 sizeof(drvinfo->version));
4331 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4332 sizeof(drvinfo->bus_info));
4336 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
4337 struct ethtool_ringparam *ring)
4339 struct mvneta_port *pp = netdev_priv(netdev);
4341 ring->rx_max_pending = MVNETA_MAX_RXD;
4342 ring->tx_max_pending = MVNETA_MAX_TXD;
4343 ring->rx_pending = pp->rx_ring_size;
4344 ring->tx_pending = pp->tx_ring_size;
4347 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
4348 struct ethtool_ringparam *ring)
4350 struct mvneta_port *pp = netdev_priv(dev);
4352 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4354 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4355 ring->rx_pending : MVNETA_MAX_RXD;
4357 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4358 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4359 if (pp->tx_ring_size != ring->tx_pending)
4360 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4361 pp->tx_ring_size, ring->tx_pending);
4363 if (netif_running(dev)) {
4365 if (mvneta_open(dev)) {
4367 "error on opening device after ring param change\n");
4375 static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4376 struct ethtool_pauseparam *pause)
4378 struct mvneta_port *pp = netdev_priv(dev);
4380 phylink_ethtool_get_pauseparam(pp->phylink, pause);
4383 static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4384 struct ethtool_pauseparam *pause)
4386 struct mvneta_port *pp = netdev_priv(dev);
4388 return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4391 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4394 if (sset == ETH_SS_STATS) {
4397 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4398 memcpy(data + i * ETH_GSTRING_LEN,
4399 mvneta_statistics[i].name, ETH_GSTRING_LEN);
4403 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4405 const struct mvneta_statistic *s;
4406 void __iomem *base = pp->base;
4411 for (i = 0, s = mvneta_statistics;
4412 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4418 val = readl_relaxed(base + s->offset);
4421 /* Docs say to read low 32-bit then high */
4422 low = readl_relaxed(base + s->offset);
4423 high = readl_relaxed(base + s->offset + 4);
4424 val = (u64)high << 32 | low;
4427 switch (s->offset) {
4428 case ETHTOOL_STAT_EEE_WAKEUP:
4429 val = phylink_get_eee_err(pp->phylink);
4431 case ETHTOOL_STAT_SKB_ALLOC_ERR:
4432 val = pp->rxqs[0].skb_alloc_err;
4434 case ETHTOOL_STAT_REFILL_ERR:
4435 val = pp->rxqs[0].refill_err;
4441 pp->ethtool_stats[i] += val;
4445 static void mvneta_ethtool_get_stats(struct net_device *dev,
4446 struct ethtool_stats *stats, u64 *data)
4448 struct mvneta_port *pp = netdev_priv(dev);
4451 mvneta_ethtool_update_stats(pp);
4453 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4454 *data++ = pp->ethtool_stats[i];
4457 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4459 if (sset == ETH_SS_STATS)
4460 return ARRAY_SIZE(mvneta_statistics);
4464 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4466 return MVNETA_RSS_LU_TABLE_SIZE;
4469 static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4470 struct ethtool_rxnfc *info,
4471 u32 *rules __always_unused)
4473 switch (info->cmd) {
4474 case ETHTOOL_GRXRINGS:
4475 info->data = rxq_number;
4484 static int mvneta_config_rss(struct mvneta_port *pp)
4489 netif_tx_stop_all_queues(pp->dev);
4491 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4493 if (!pp->neta_armada3700) {
4494 /* We have to synchronise on the napi of each CPU */
4495 for_each_online_cpu(cpu) {
4496 struct mvneta_pcpu_port *pcpu_port =
4497 per_cpu_ptr(pp->ports, cpu);
4499 napi_synchronize(&pcpu_port->napi);
4500 napi_disable(&pcpu_port->napi);
4503 napi_synchronize(&pp->napi);
4504 napi_disable(&pp->napi);
4507 pp->rxq_def = pp->indir[0];
4509 /* Update unicast mapping */
4510 mvneta_set_rx_mode(pp->dev);
4512 /* Update val of portCfg register accordingly with all RxQueue types */
4513 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4514 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4516 /* Update the elected CPU matching the new rxq_def */
4517 spin_lock(&pp->lock);
4518 mvneta_percpu_elect(pp);
4519 spin_unlock(&pp->lock);
4521 if (!pp->neta_armada3700) {
4522 /* We have to synchronise on the napi of each CPU */
4523 for_each_online_cpu(cpu) {
4524 struct mvneta_pcpu_port *pcpu_port =
4525 per_cpu_ptr(pp->ports, cpu);
4527 napi_enable(&pcpu_port->napi);
4530 napi_enable(&pp->napi);
4533 netif_tx_start_all_queues(pp->dev);
4538 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4539 const u8 *key, const u8 hfunc)
4541 struct mvneta_port *pp = netdev_priv(dev);
4543 /* Current code for Armada 3700 doesn't support RSS features yet */
4544 if (pp->neta_armada3700)
4547 /* We require at least one supported parameter to be changed
4548 * and no change in any of the unsupported parameters
4551 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
4557 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4559 return mvneta_config_rss(pp);
4562 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4565 struct mvneta_port *pp = netdev_priv(dev);
4567 /* Current code for Armada 3700 doesn't support RSS features yet */
4568 if (pp->neta_armada3700)
4572 *hfunc = ETH_RSS_HASH_TOP;
4577 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4582 static void mvneta_ethtool_get_wol(struct net_device *dev,
4583 struct ethtool_wolinfo *wol)
4585 struct mvneta_port *pp = netdev_priv(dev);
4587 phylink_ethtool_get_wol(pp->phylink, wol);
4590 static int mvneta_ethtool_set_wol(struct net_device *dev,
4591 struct ethtool_wolinfo *wol)
4593 struct mvneta_port *pp = netdev_priv(dev);
4596 ret = phylink_ethtool_set_wol(pp->phylink, wol);
4598 device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
4603 static int mvneta_ethtool_get_eee(struct net_device *dev,
4604 struct ethtool_eee *eee)
4606 struct mvneta_port *pp = netdev_priv(dev);
4609 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4611 eee->eee_enabled = pp->eee_enabled;
4612 eee->eee_active = pp->eee_active;
4613 eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4614 eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
4616 return phylink_ethtool_get_eee(pp->phylink, eee);
4619 static int mvneta_ethtool_set_eee(struct net_device *dev,
4620 struct ethtool_eee *eee)
4622 struct mvneta_port *pp = netdev_priv(dev);
4625 /* The Armada 37x documents do not give limits for this other than
4626 * it being an 8-bit register. */
4627 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4630 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4631 lpi_ctl0 &= ~(0xff << 8);
4632 lpi_ctl0 |= eee->tx_lpi_timer << 8;
4633 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4635 pp->eee_enabled = eee->eee_enabled;
4636 pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4638 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4640 return phylink_ethtool_set_eee(pp->phylink, eee);
4643 static const struct net_device_ops mvneta_netdev_ops = {
4644 .ndo_open = mvneta_open,
4645 .ndo_stop = mvneta_stop,
4646 .ndo_start_xmit = mvneta_tx,
4647 .ndo_set_rx_mode = mvneta_set_rx_mode,
4648 .ndo_set_mac_address = mvneta_set_mac_addr,
4649 .ndo_change_mtu = mvneta_change_mtu,
4650 .ndo_fix_features = mvneta_fix_features,
4651 .ndo_get_stats64 = mvneta_get_stats64,
4652 .ndo_do_ioctl = mvneta_ioctl,
4653 .ndo_bpf = mvneta_xdp,
4654 .ndo_xdp_xmit = mvneta_xdp_xmit,
4657 static const struct ethtool_ops mvneta_eth_tool_ops = {
4658 .nway_reset = mvneta_ethtool_nway_reset,
4659 .get_link = ethtool_op_get_link,
4660 .set_coalesce = mvneta_ethtool_set_coalesce,
4661 .get_coalesce = mvneta_ethtool_get_coalesce,
4662 .get_drvinfo = mvneta_ethtool_get_drvinfo,
4663 .get_ringparam = mvneta_ethtool_get_ringparam,
4664 .set_ringparam = mvneta_ethtool_set_ringparam,
4665 .get_pauseparam = mvneta_ethtool_get_pauseparam,
4666 .set_pauseparam = mvneta_ethtool_set_pauseparam,
4667 .get_strings = mvneta_ethtool_get_strings,
4668 .get_ethtool_stats = mvneta_ethtool_get_stats,
4669 .get_sset_count = mvneta_ethtool_get_sset_count,
4670 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
4671 .get_rxnfc = mvneta_ethtool_get_rxnfc,
4672 .get_rxfh = mvneta_ethtool_get_rxfh,
4673 .set_rxfh = mvneta_ethtool_set_rxfh,
4674 .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
4675 .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
4676 .get_wol = mvneta_ethtool_get_wol,
4677 .set_wol = mvneta_ethtool_set_wol,
4678 .get_eee = mvneta_ethtool_get_eee,
4679 .set_eee = mvneta_ethtool_set_eee,
4683 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
4688 mvneta_port_disable(pp);
4690 /* Set port default values */
4691 mvneta_defaults_set(pp);
4693 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4697 /* Initialize TX descriptor rings */
4698 for (queue = 0; queue < txq_number; queue++) {
4699 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4701 txq->size = pp->tx_ring_size;
4702 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4705 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4709 /* Create Rx descriptor rings */
4710 for (queue = 0; queue < rxq_number; queue++) {
4711 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4713 rxq->size = pp->rx_ring_size;
4714 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4715 rxq->time_coal = MVNETA_RX_COAL_USEC;
4717 = devm_kmalloc_array(pp->dev->dev.parent,
4719 sizeof(*rxq->buf_virt_addr),
4721 if (!rxq->buf_virt_addr)
4728 /* platform glue : initialize decoding windows */
4729 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4730 const struct mbus_dram_target_info *dram)
4736 for (i = 0; i < 6; i++) {
4737 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4738 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4741 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4748 for (i = 0; i < dram->num_cs; i++) {
4749 const struct mbus_dram_window *cs = dram->cs + i;
4751 mvreg_write(pp, MVNETA_WIN_BASE(i),
4752 (cs->base & 0xffff0000) |
4753 (cs->mbus_attr << 8) |
4754 dram->mbus_dram_target_id);
4756 mvreg_write(pp, MVNETA_WIN_SIZE(i),
4757 (cs->size - 1) & 0xffff0000);
4759 win_enable &= ~(1 << i);
4760 win_protect |= 3 << (2 * i);
4763 /* For Armada3700 open default 4GB Mbus window, leaving
4764 * arbitration of target/attribute to a different layer
4767 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4768 win_enable &= ~BIT(0);
4772 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4773 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4776 /* Power up the port */
4777 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4779 /* MAC Cause register should be cleared */
4780 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4782 if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
4783 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4784 else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
4785 phy_interface_mode_is_8023z(phy_mode))
4786 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4787 else if (!phy_interface_mode_is_rgmii(phy_mode))
4793 /* Device initialization routine */
4794 static int mvneta_probe(struct platform_device *pdev)
4796 struct device_node *dn = pdev->dev.of_node;
4797 struct device_node *bm_node;
4798 struct mvneta_port *pp;
4799 struct net_device *dev;
4800 struct phylink *phylink;
4802 const char *dt_mac_addr;
4803 char hw_mac_addr[ETH_ALEN];
4804 phy_interface_t phy_mode;
4805 const char *mac_from;
4810 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
4811 txq_number, rxq_number);
4815 dev->irq = irq_of_parse_and_map(dn, 0);
4819 err = of_get_phy_mode(dn, &phy_mode);
4821 dev_err(&pdev->dev, "incorrect phy-mode\n");
4825 comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
4826 if (comphy == ERR_PTR(-EPROBE_DEFER)) {
4827 err = -EPROBE_DEFER;
4829 } else if (IS_ERR(comphy)) {
4833 pp = netdev_priv(dev);
4834 spin_lock_init(&pp->lock);
4836 pp->phylink_config.dev = &dev->dev;
4837 pp->phylink_config.type = PHYLINK_NETDEV;
4839 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
4840 phy_mode, &mvneta_phylink_ops);
4841 if (IS_ERR(phylink)) {
4842 err = PTR_ERR(phylink);
4846 dev->tx_queue_len = MVNETA_MAX_TXD;
4847 dev->watchdog_timeo = 5 * HZ;
4848 dev->netdev_ops = &mvneta_netdev_ops;
4850 dev->ethtool_ops = &mvneta_eth_tool_ops;
4852 pp->phylink = phylink;
4853 pp->comphy = comphy;
4854 pp->phy_interface = phy_mode;
4857 pp->rxq_def = rxq_def;
4858 pp->indir[0] = rxq_def;
4860 /* Get special SoC configurations */
4861 if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4862 pp->neta_armada3700 = true;
4864 pp->clk = devm_clk_get(&pdev->dev, "core");
4865 if (IS_ERR(pp->clk))
4866 pp->clk = devm_clk_get(&pdev->dev, NULL);
4867 if (IS_ERR(pp->clk)) {
4868 err = PTR_ERR(pp->clk);
4869 goto err_free_phylink;
4872 clk_prepare_enable(pp->clk);
4874 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4875 if (!IS_ERR(pp->clk_bus))
4876 clk_prepare_enable(pp->clk_bus);
4878 pp->base = devm_platform_ioremap_resource(pdev, 0);
4879 if (IS_ERR(pp->base)) {
4880 err = PTR_ERR(pp->base);
4884 /* Alloc per-cpu port structure */
4885 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4891 /* Alloc per-cpu stats */
4892 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4895 goto err_free_ports;
4898 dt_mac_addr = of_get_mac_address(dn);
4899 if (!IS_ERR(dt_mac_addr)) {
4900 mac_from = "device tree";
4901 ether_addr_copy(dev->dev_addr, dt_mac_addr);
4903 mvneta_get_mac_addr(pp, hw_mac_addr);
4904 if (is_valid_ether_addr(hw_mac_addr)) {
4905 mac_from = "hardware";
4906 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4908 mac_from = "random";
4909 eth_hw_addr_random(dev);
4913 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4914 if (tx_csum_limit < 0 ||
4915 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4916 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4917 dev_info(&pdev->dev,
4918 "Wrong TX csum limit in DT, set to %dB\n",
4919 MVNETA_TX_CSUM_DEF_SIZE);
4921 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4922 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4924 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4927 pp->tx_csum_limit = tx_csum_limit;
4929 pp->dram_target_info = mv_mbus_dram_info();
4930 /* Armada3700 requires setting default configuration of Mbus
4931 * windows, however without using filled mbus_dram_target_info
4934 if (pp->dram_target_info || pp->neta_armada3700)
4935 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4937 pp->tx_ring_size = MVNETA_MAX_TXD;
4938 pp->rx_ring_size = MVNETA_MAX_RXD;
4941 SET_NETDEV_DEV(dev, &pdev->dev);
4943 pp->id = global_port_id++;
4944 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
4946 /* Obtain access to BM resources if enabled and already initialized */
4947 bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4949 pp->bm_priv = mvneta_bm_get(bm_node);
4951 err = mvneta_bm_port_init(pdev, pp);
4953 dev_info(&pdev->dev,
4954 "use SW buffer management\n");
4955 mvneta_bm_put(pp->bm_priv);
4959 /* Set RX packet offset correction for platforms, whose
4960 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4961 * platforms and 0B for 32-bit ones.
4963 pp->rx_offset_correction = max(0,
4965 MVNETA_RX_PKT_OFFSET_CORRECTION);
4967 of_node_put(bm_node);
4969 err = mvneta_init(&pdev->dev, pp);
4973 err = mvneta_port_power_up(pp, phy_mode);
4975 dev_err(&pdev->dev, "can't power up port\n");
4979 /* Armada3700 network controller does not support per-cpu
4980 * operation, so only single NAPI should be initialized.
4982 if (pp->neta_armada3700) {
4983 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4985 for_each_present_cpu(cpu) {
4986 struct mvneta_pcpu_port *port =
4987 per_cpu_ptr(pp->ports, cpu);
4989 netif_napi_add(dev, &port->napi, mvneta_poll,
4995 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4996 NETIF_F_TSO | NETIF_F_RXCSUM;
4997 dev->hw_features |= dev->features;
4998 dev->vlan_features |= dev->features;
4999 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5000 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
5002 /* MTU range: 68 - 9676 */
5003 dev->min_mtu = ETH_MIN_MTU;
5004 /* 9676 == 9700 - 20 and rounding to 8 */
5005 dev->max_mtu = 9676;
5007 err = register_netdev(dev);
5009 dev_err(&pdev->dev, "failed to register\n");
5013 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
5016 platform_set_drvdata(pdev, pp->dev);
5022 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5023 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5025 mvneta_bm_put(pp->bm_priv);
5027 free_percpu(pp->stats);
5029 free_percpu(pp->ports);
5031 clk_disable_unprepare(pp->clk_bus);
5032 clk_disable_unprepare(pp->clk);
5035 phylink_destroy(pp->phylink);
5037 irq_dispose_mapping(dev->irq);
5041 /* Device removal routine */
5042 static int mvneta_remove(struct platform_device *pdev)
5044 struct net_device *dev = platform_get_drvdata(pdev);
5045 struct mvneta_port *pp = netdev_priv(dev);
5047 unregister_netdev(dev);
5048 clk_disable_unprepare(pp->clk_bus);
5049 clk_disable_unprepare(pp->clk);
5050 free_percpu(pp->ports);
5051 free_percpu(pp->stats);
5052 irq_dispose_mapping(dev->irq);
5053 phylink_destroy(pp->phylink);
5056 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5057 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5059 mvneta_bm_put(pp->bm_priv);
5065 #ifdef CONFIG_PM_SLEEP
5066 static int mvneta_suspend(struct device *device)
5069 struct net_device *dev = dev_get_drvdata(device);
5070 struct mvneta_port *pp = netdev_priv(dev);
5072 if (!netif_running(dev))
5075 if (!pp->neta_armada3700) {
5076 spin_lock(&pp->lock);
5077 pp->is_stopped = true;
5078 spin_unlock(&pp->lock);
5080 cpuhp_state_remove_instance_nocalls(online_hpstate,
5082 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5087 mvneta_stop_dev(pp);
5090 for (queue = 0; queue < rxq_number; queue++) {
5091 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5093 mvneta_rxq_drop_pkts(pp, rxq);
5096 for (queue = 0; queue < txq_number; queue++) {
5097 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5099 mvneta_txq_hw_deinit(pp, txq);
5103 netif_device_detach(dev);
5104 clk_disable_unprepare(pp->clk_bus);
5105 clk_disable_unprepare(pp->clk);
5110 static int mvneta_resume(struct device *device)
5112 struct platform_device *pdev = to_platform_device(device);
5113 struct net_device *dev = dev_get_drvdata(device);
5114 struct mvneta_port *pp = netdev_priv(dev);
5117 clk_prepare_enable(pp->clk);
5118 if (!IS_ERR(pp->clk_bus))
5119 clk_prepare_enable(pp->clk_bus);
5120 if (pp->dram_target_info || pp->neta_armada3700)
5121 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5123 err = mvneta_bm_port_init(pdev, pp);
5125 dev_info(&pdev->dev, "use SW buffer management\n");
5129 mvneta_defaults_set(pp);
5130 err = mvneta_port_power_up(pp, pp->phy_interface);
5132 dev_err(device, "can't power up port\n");
5136 netif_device_attach(dev);
5138 if (!netif_running(dev))
5141 for (queue = 0; queue < rxq_number; queue++) {
5142 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5144 rxq->next_desc_to_proc = 0;
5145 mvneta_rxq_hw_init(pp, rxq);
5148 for (queue = 0; queue < txq_number; queue++) {
5149 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5151 txq->next_desc_to_proc = 0;
5152 mvneta_txq_hw_init(pp, txq);
5155 if (!pp->neta_armada3700) {
5156 spin_lock(&pp->lock);
5157 pp->is_stopped = false;
5158 spin_unlock(&pp->lock);
5159 cpuhp_state_add_instance_nocalls(online_hpstate,
5161 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5166 mvneta_start_dev(pp);
5168 mvneta_set_rx_mode(dev);
5174 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
5176 static const struct of_device_id mvneta_match[] = {
5177 { .compatible = "marvell,armada-370-neta" },
5178 { .compatible = "marvell,armada-xp-neta" },
5179 { .compatible = "marvell,armada-3700-neta" },
5182 MODULE_DEVICE_TABLE(of, mvneta_match);
5184 static struct platform_driver mvneta_driver = {
5185 .probe = mvneta_probe,
5186 .remove = mvneta_remove,
5188 .name = MVNETA_DRIVER_NAME,
5189 .of_match_table = mvneta_match,
5190 .pm = &mvneta_pm_ops,
5194 static int __init mvneta_driver_init(void)
5198 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
5200 mvneta_cpu_down_prepare);
5203 online_hpstate = ret;
5204 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
5205 NULL, mvneta_cpu_dead);
5209 ret = platform_driver_register(&mvneta_driver);
5215 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5217 cpuhp_remove_multi_state(online_hpstate);
5221 module_init(mvneta_driver_init);
5223 static void __exit mvneta_driver_exit(void)
5225 platform_driver_unregister(&mvneta_driver);
5226 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5227 cpuhp_remove_multi_state(online_hpstate);
5229 module_exit(mvneta_driver_exit);
5231 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
5232 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
5233 MODULE_LICENSE("GPL");
5235 module_param(rxq_number, int, 0444);
5236 module_param(txq_number, int, 0444);
5238 module_param(rxq_def, int, 0444);
5239 module_param(rx_copybreak, int, 0644);