2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/phy.h>
30 #include <linux/clk.h>
31 #include <linux/hrtimer.h>
32 #include <linux/ktime.h>
33 #include <uapi/linux/ppp_defs.h>
37 /* RX Fifo Registers */
38 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41 #define MVPP2_RX_FIFO_INIT_REG 0x64
43 /* RX DMA Top Registers */
44 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
49 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
53 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
54 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
55 #define MVPP2_RXQ_POOL_LONG_OFFS 24
56 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
57 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
58 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
59 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
60 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
62 /* Parser Registers */
63 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
64 #define MVPP2_PRS_PORT_LU_MAX 0xf
65 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
66 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
67 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
68 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
69 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
70 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
71 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
72 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
73 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
74 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
75 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
76 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
77 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
78 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
79 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
81 /* Classifier Registers */
82 #define MVPP2_CLS_MODE_REG 0x1800
83 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
84 #define MVPP2_CLS_PORT_WAY_REG 0x1810
85 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
86 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
87 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
88 #define MVPP2_CLS_LKP_TBL_REG 0x1818
89 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
90 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
91 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
92 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
93 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
94 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
95 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
96 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
97 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
98 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
99 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
100 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
102 /* Descriptor Manager Top Registers */
103 #define MVPP2_RXQ_NUM_REG 0x2040
104 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
105 #define MVPP22_DESC_ADDR_OFFS 8
106 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
107 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
108 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
109 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
110 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
111 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
112 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
113 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
114 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
115 #define MVPP2_RXQ_THRESH_REG 0x204c
116 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
117 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
118 #define MVPP2_RXQ_INDEX_REG 0x2050
119 #define MVPP2_TXQ_NUM_REG 0x2080
120 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
121 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
122 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
123 #define MVPP2_TXQ_THRESH_REG 0x2094
124 #define MVPP2_TXQ_THRESH_OFFSET 16
125 #define MVPP2_TXQ_THRESH_MASK 0x3fff
126 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
127 #define MVPP2_TXQ_INDEX_REG 0x2098
128 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
129 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
130 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
131 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
132 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
133 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
134 #define MVPP2_TXQ_PENDING_REG 0x20a0
135 #define MVPP2_TXQ_PENDING_MASK 0x3fff
136 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
137 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
138 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
139 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
140 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
141 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
142 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
143 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
144 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
145 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
146 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
147 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
148 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
149 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
150 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
151 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
152 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
154 /* MBUS bridge registers */
155 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
156 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
157 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
158 #define MVPP2_BASE_ADDR_ENABLE 0x4060
160 /* AXI Bridge Registers */
161 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
162 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
163 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
164 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
165 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
166 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
167 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
168 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
169 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
170 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
171 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
172 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
174 /* Values for AXI Bridge registers */
175 #define MVPP22_AXI_ATTR_CACHE_OFFS 0
176 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
178 #define MVPP22_AXI_CODE_CACHE_OFFS 0
179 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
181 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
182 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
183 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
185 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
186 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
188 /* Interrupt Cause and Mask registers */
189 #define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
190 #define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
192 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
193 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
194 #define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
196 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
197 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
198 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
199 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
201 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
202 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
204 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
205 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
206 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
207 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
209 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
210 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
211 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
212 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
213 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
214 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
215 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
216 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
217 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
218 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
219 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
220 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
221 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
222 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
223 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
224 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
225 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
226 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
227 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
229 /* Buffer Manager registers */
230 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
231 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
232 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
233 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
234 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
235 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
236 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
237 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
238 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
239 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
240 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
241 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
242 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
243 #define MVPP2_BM_START_MASK BIT(0)
244 #define MVPP2_BM_STOP_MASK BIT(1)
245 #define MVPP2_BM_STATE_MASK BIT(4)
246 #define MVPP2_BM_LOW_THRESH_OFFS 8
247 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
248 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
249 MVPP2_BM_LOW_THRESH_OFFS)
250 #define MVPP2_BM_HIGH_THRESH_OFFS 16
251 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
252 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
253 MVPP2_BM_HIGH_THRESH_OFFS)
254 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
255 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
256 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
257 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
258 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
259 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
260 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
261 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
262 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
263 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
264 #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
265 #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
266 #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
267 #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
268 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
269 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
270 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
271 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
272 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
273 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
274 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
275 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
276 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
278 /* TX Scheduler registers */
279 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
280 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
281 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
282 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
283 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
284 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
285 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
286 #define MVPP2_TXP_MTU_MAX 0x7FFFF
287 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
288 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
289 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
290 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
291 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
292 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
293 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
294 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
295 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
296 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
297 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
298 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
299 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
300 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
302 /* TX general registers */
303 #define MVPP2_TX_SNOOP_REG 0x8800
304 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
305 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
308 #define MVPP2_SRC_ADDR_MIDDLE 0x24
309 #define MVPP2_SRC_ADDR_HIGH 0x28
310 #define MVPP2_PHY_AN_CFG0_REG 0x34
311 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
312 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
313 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
315 /* Per-port registers */
316 #define MVPP2_GMAC_CTRL_0_REG 0x0
317 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
318 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
319 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
320 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
321 #define MVPP2_GMAC_CTRL_1_REG 0x4
322 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
323 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
324 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
325 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
326 #define MVPP2_GMAC_SA_LOW_OFFS 7
327 #define MVPP2_GMAC_CTRL_2_REG 0x8
328 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
329 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
330 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
331 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
332 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
333 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
334 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
335 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
336 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
337 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
338 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
339 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
340 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
341 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
342 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
343 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
344 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
345 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
346 #define MVPP22_GMAC_CTRL_4_REG 0x90
347 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
348 #define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
349 #define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
350 #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
352 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
353 * relative to port->base.
355 #define MVPP22_XLG_CTRL0_REG 0x100
356 #define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
357 #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
358 #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
360 #define MVPP22_XLG_CTRL3_REG 0x11c
361 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
362 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
363 #define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
365 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
366 #define MVPP22_SMI_MISC_CFG_REG 0x1204
367 #define MVPP22_SMI_POLLING_EN BIT(10)
369 #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
371 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
373 /* Descriptor ring Macros */
374 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
375 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
377 /* Various constants */
380 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
381 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
382 #define MVPP2_TXDONE_COAL_USEC 1000
383 #define MVPP2_RX_COAL_PKTS 32
384 #define MVPP2_RX_COAL_USEC 100
386 /* The two bytes Marvell header. Either contains a special value used
387 * by Marvell switches when a specific hardware mode is enabled (not
388 * supported by this driver) or is filled automatically by zeroes on
389 * the RX side. Those two bytes being at the front of the Ethernet
390 * header, they allow to have the IP header aligned on a 4 bytes
391 * boundary automatically: the hardware skips those two bytes on its
394 #define MVPP2_MH_SIZE 2
395 #define MVPP2_ETH_TYPE_LEN 2
396 #define MVPP2_PPPOE_HDR_SIZE 8
397 #define MVPP2_VLAN_TAG_LEN 4
399 /* Lbtd 802.3 type */
400 #define MVPP2_IP_LBDT_TYPE 0xfffa
402 #define MVPP2_TX_CSUM_MAX_SIZE 9800
404 /* Timeout constants */
405 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
406 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
408 #define MVPP2_TX_MTU_MAX 0x7ffff
410 /* Maximum number of T-CONTs of PON port */
411 #define MVPP2_MAX_TCONT 16
413 /* Maximum number of supported ports */
414 #define MVPP2_MAX_PORTS 4
416 /* Maximum number of TXQs used by single port */
417 #define MVPP2_MAX_TXQ 8
419 /* Dfault number of RXQs in use */
420 #define MVPP2_DEFAULT_RXQ 4
422 /* Max number of Rx descriptors */
423 #define MVPP2_MAX_RXD 128
425 /* Max number of Tx descriptors */
426 #define MVPP2_MAX_TXD 1024
428 /* Amount of Tx descriptors that can be reserved at once by CPU */
429 #define MVPP2_CPU_DESC_CHUNK 64
431 /* Max number of Tx descriptors in each aggregated queue */
432 #define MVPP2_AGGR_TXQ_SIZE 256
434 /* Descriptor aligned size */
435 #define MVPP2_DESC_ALIGNED_SIZE 32
437 /* Descriptor alignment mask */
438 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
440 /* RX FIFO constants */
441 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
442 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
443 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
445 /* RX buffer constants */
446 #define MVPP2_SKB_SHINFO_SIZE \
447 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
449 #define MVPP2_RX_PKT_SIZE(mtu) \
450 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
451 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
453 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
454 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
455 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
456 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
458 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
460 /* IPv6 max L3 address size */
461 #define MVPP2_MAX_L3_ADDR_SIZE 16
464 #define MVPP2_F_LOOPBACK BIT(0)
466 /* Marvell tag types */
467 enum mvpp2_tag_type {
468 MVPP2_TAG_TYPE_NONE = 0,
469 MVPP2_TAG_TYPE_MH = 1,
470 MVPP2_TAG_TYPE_DSA = 2,
471 MVPP2_TAG_TYPE_EDSA = 3,
472 MVPP2_TAG_TYPE_VLAN = 4,
473 MVPP2_TAG_TYPE_LAST = 5
476 /* Parser constants */
477 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
478 #define MVPP2_PRS_TCAM_WORDS 6
479 #define MVPP2_PRS_SRAM_WORDS 4
480 #define MVPP2_PRS_FLOW_ID_SIZE 64
481 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
482 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
483 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
484 #define MVPP2_PRS_IPV4_HEAD 0x40
485 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
486 #define MVPP2_PRS_IPV4_MC 0xe0
487 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
488 #define MVPP2_PRS_IPV4_BC_MASK 0xff
489 #define MVPP2_PRS_IPV4_IHL 0x5
490 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
491 #define MVPP2_PRS_IPV6_MC 0xff
492 #define MVPP2_PRS_IPV6_MC_MASK 0xff
493 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
494 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
495 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
496 #define MVPP2_PRS_DBL_VLANS_MAX 100
499 * - lookup ID - 4 bits
501 * - additional information - 1 byte
502 * - header data - 8 bytes
503 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
505 #define MVPP2_PRS_AI_BITS 8
506 #define MVPP2_PRS_PORT_MASK 0xff
507 #define MVPP2_PRS_LU_MASK 0xf
508 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
509 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
510 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
511 (((offs) * 2) - ((offs) % 2) + 2)
512 #define MVPP2_PRS_TCAM_AI_BYTE 16
513 #define MVPP2_PRS_TCAM_PORT_BYTE 17
514 #define MVPP2_PRS_TCAM_LU_BYTE 20
515 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
516 #define MVPP2_PRS_TCAM_INV_WORD 5
517 /* Tcam entries ID */
518 #define MVPP2_PE_DROP_ALL 0
519 #define MVPP2_PE_FIRST_FREE_TID 1
520 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
521 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
522 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
523 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
524 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
525 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
526 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
527 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
528 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
529 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
530 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
531 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
532 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
533 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
534 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
535 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
536 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
537 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
538 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
539 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
540 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
541 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
542 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
543 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
544 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
547 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
549 #define MVPP2_PRS_SRAM_RI_OFFS 0
550 #define MVPP2_PRS_SRAM_RI_WORD 0
551 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
552 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
553 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
554 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
555 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
556 #define MVPP2_PRS_SRAM_UDF_OFFS 73
557 #define MVPP2_PRS_SRAM_UDF_BITS 8
558 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
559 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
560 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
561 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
562 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
563 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
564 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
565 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
566 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
567 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
568 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
569 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
570 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
571 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
572 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
573 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
574 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
575 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
576 #define MVPP2_PRS_SRAM_AI_OFFS 90
577 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
578 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
579 #define MVPP2_PRS_SRAM_AI_MASK 0xff
580 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
581 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
582 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
583 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
585 /* Sram result info bits assignment */
586 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
587 #define MVPP2_PRS_RI_DSA_MASK 0x2
588 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
589 #define MVPP2_PRS_RI_VLAN_NONE 0x0
590 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
591 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
592 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
593 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
594 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
595 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
596 #define MVPP2_PRS_RI_L2_UCAST 0x0
597 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
598 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
599 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
600 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
601 #define MVPP2_PRS_RI_L3_UN 0x0
602 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
603 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
604 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
605 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
606 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
607 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
608 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
609 #define MVPP2_PRS_RI_L3_UCAST 0x0
610 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
611 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
612 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
613 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
614 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
615 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
616 #define MVPP2_PRS_RI_L4_TCP BIT(22)
617 #define MVPP2_PRS_RI_L4_UDP BIT(23)
618 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
619 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
620 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
621 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
623 /* Sram additional info bits assignment */
624 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
625 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
626 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
627 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
628 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
629 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
630 #define MVPP2_PRS_SINGLE_VLAN_AI 0
631 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
634 #define MVPP2_PRS_TAGGED true
635 #define MVPP2_PRS_UNTAGGED false
636 #define MVPP2_PRS_EDSA true
637 #define MVPP2_PRS_DSA false
639 /* MAC entries, shadow udf */
641 MVPP2_PRS_UDF_MAC_DEF,
642 MVPP2_PRS_UDF_MAC_RANGE,
643 MVPP2_PRS_UDF_L2_DEF,
644 MVPP2_PRS_UDF_L2_DEF_COPY,
645 MVPP2_PRS_UDF_L2_USER,
649 enum mvpp2_prs_lookup {
663 enum mvpp2_prs_l3_cast {
664 MVPP2_PRS_L3_UNI_CAST,
665 MVPP2_PRS_L3_MULTI_CAST,
666 MVPP2_PRS_L3_BROAD_CAST
669 /* Classifier constants */
670 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
671 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
672 #define MVPP2_CLS_LKP_TBL_SIZE 64
675 #define MVPP2_BM_POOLS_NUM 8
676 #define MVPP2_BM_LONG_BUF_NUM 1024
677 #define MVPP2_BM_SHORT_BUF_NUM 2048
678 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
679 #define MVPP2_BM_POOL_PTR_ALIGN 128
680 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
681 #define MVPP2_BM_SWF_SHORT_POOL 3
683 /* BM cookie (32 bits) definition */
684 #define MVPP2_BM_COOKIE_POOL_OFFS 8
685 #define MVPP2_BM_COOKIE_CPU_OFFS 24
687 /* BM short pool packet size
688 * These value assure that for SWF the total number
689 * of bytes allocated for each buffer will be 512
691 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
693 #define MVPP21_ADDR_SPACE_SZ 0
694 #define MVPP22_ADDR_SPACE_SZ SZ_64K
696 #define MVPP2_MAX_THREADS 8
697 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
707 /* Shared Packet Processor resources */
709 /* Shared registers' base addresses */
710 void __iomem *lms_base;
711 void __iomem *iface_base;
713 /* On PPv2.2, each "software thread" can access the base
714 * register through a separate address space, each 64 KB apart
715 * from each other. Typically, such address spaces will be
718 void __iomem *swth_base[MVPP2_MAX_THREADS];
725 /* List of pointers to port structures */
726 struct mvpp2_port **port_list;
728 /* Aggregated TXQs */
729 struct mvpp2_tx_queue *aggr_txqs;
732 struct mvpp2_bm_pool *bm_pools;
734 /* PRS shadow table */
735 struct mvpp2_prs_shadow *prs_shadow;
736 /* PRS auxiliary table for double vlan entries control */
737 bool *prs_double_vlans;
743 enum { MVPP21, MVPP22 } hw_version;
745 /* Maximum number of RXQs per port */
746 unsigned int max_port_rxqs;
749 struct mvpp2_pcpu_stats {
750 struct u64_stats_sync syncp;
757 /* Per-CPU port control */
758 struct mvpp2_port_pcpu {
759 struct hrtimer tx_done_timer;
760 bool timer_scheduled;
761 /* Tasklet for egress finalization */
762 struct tasklet_struct tx_done_tasklet;
765 struct mvpp2_queue_vector {
767 struct napi_struct napi;
768 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
773 u32 pending_cause_rx;
774 struct mvpp2_port *port;
780 /* Index of the port from the "group of ports" complex point
787 /* Per-port registers' base address */
790 struct mvpp2_rx_queue **rxqs;
792 struct mvpp2_tx_queue **txqs;
794 struct net_device *dev;
798 /* Per-CPU port control */
799 struct mvpp2_port_pcpu __percpu *pcpu;
806 struct mvpp2_pcpu_stats __percpu *stats;
808 phy_interface_t phy_interface;
809 struct device_node *phy_node;
814 struct mvpp2_bm_pool *pool_long;
815 struct mvpp2_bm_pool *pool_short;
817 /* Index of first port's physical RXQ */
820 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
827 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
828 * layout of the transmit and reception DMA descriptors, and their
829 * layout is therefore defined by the hardware design
832 #define MVPP2_TXD_L3_OFF_SHIFT 0
833 #define MVPP2_TXD_IP_HLEN_SHIFT 8
834 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
835 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
836 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
837 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
838 #define MVPP2_TXD_L4_UDP BIT(24)
839 #define MVPP2_TXD_L3_IP6 BIT(26)
840 #define MVPP2_TXD_L_DESC BIT(28)
841 #define MVPP2_TXD_F_DESC BIT(29)
843 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
844 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
845 #define MVPP2_RXD_ERR_CRC 0x0
846 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
847 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
848 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
849 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
850 #define MVPP2_RXD_HWF_SYNC BIT(21)
851 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
852 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
853 #define MVPP2_RXD_L4_TCP BIT(25)
854 #define MVPP2_RXD_L4_UDP BIT(26)
855 #define MVPP2_RXD_L3_IP4 BIT(28)
856 #define MVPP2_RXD_L3_IP6 BIT(30)
857 #define MVPP2_RXD_BUF_HDR BIT(31)
859 /* HW TX descriptor for PPv2.1 */
860 struct mvpp21_tx_desc {
861 u32 command; /* Options used by HW for packet transmitting.*/
862 u8 packet_offset; /* the offset from the buffer beginning */
863 u8 phys_txq; /* destination queue ID */
864 u16 data_size; /* data size of transmitted packet in bytes */
865 u32 buf_dma_addr; /* physical addr of transmitted buffer */
866 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
867 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
868 u32 reserved2; /* reserved (for future use) */
871 /* HW RX descriptor for PPv2.1 */
872 struct mvpp21_rx_desc {
873 u32 status; /* info about received packet */
874 u16 reserved1; /* parser_info (for future use, PnC) */
875 u16 data_size; /* size of received packet in bytes */
876 u32 buf_dma_addr; /* physical address of the buffer */
877 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
878 u16 reserved2; /* gem_port_id (for future use, PON) */
879 u16 reserved3; /* csum_l4 (for future use, PnC) */
880 u8 reserved4; /* bm_qset (for future use, BM) */
882 u16 reserved6; /* classify_info (for future use, PnC) */
883 u32 reserved7; /* flow_id (for future use, PnC) */
887 /* HW TX descriptor for PPv2.2 */
888 struct mvpp22_tx_desc {
894 u64 buf_dma_addr_ptp;
898 /* HW RX descriptor for PPv2.2 */
899 struct mvpp22_rx_desc {
905 u64 buf_dma_addr_key_hash;
909 /* Opaque type used by the driver to manipulate the HW TX and RX
912 struct mvpp2_tx_desc {
914 struct mvpp21_tx_desc pp21;
915 struct mvpp22_tx_desc pp22;
919 struct mvpp2_rx_desc {
921 struct mvpp21_rx_desc pp21;
922 struct mvpp22_rx_desc pp22;
926 struct mvpp2_txq_pcpu_buf {
927 /* Transmitted SKB */
930 /* Physical address of transmitted buffer */
933 /* Size transmitted */
937 /* Per-CPU Tx queue control */
938 struct mvpp2_txq_pcpu {
941 /* Number of Tx DMA descriptors in the descriptor ring */
944 /* Number of currently used Tx DMA descriptor in the
949 /* Number of Tx DMA descriptors reserved for each CPU */
952 /* Infos about transmitted buffers */
953 struct mvpp2_txq_pcpu_buf *buffs;
955 /* Index of last TX DMA descriptor that was inserted */
958 /* Index of the TX DMA descriptor to be cleaned up */
962 struct mvpp2_tx_queue {
963 /* Physical number of this Tx queue */
966 /* Logical number of this Tx queue */
969 /* Number of Tx DMA descriptors in the descriptor ring */
972 /* Number of currently used Tx DMA descriptor in the descriptor ring */
975 /* Per-CPU control of physical Tx queues */
976 struct mvpp2_txq_pcpu __percpu *pcpu;
980 /* Virtual address of thex Tx DMA descriptors array */
981 struct mvpp2_tx_desc *descs;
983 /* DMA address of the Tx DMA descriptors array */
984 dma_addr_t descs_dma;
986 /* Index of the last Tx DMA descriptor */
989 /* Index of the next Tx DMA descriptor to process */
990 int next_desc_to_proc;
993 struct mvpp2_rx_queue {
994 /* RX queue number, in the range 0-31 for physical RXQs */
997 /* Num of rx descriptors in the rx descriptor ring */
1003 /* Virtual address of the RX DMA descriptors array */
1004 struct mvpp2_rx_desc *descs;
1006 /* DMA address of the RX DMA descriptors array */
1007 dma_addr_t descs_dma;
1009 /* Index of the last RX DMA descriptor */
1012 /* Index of the next RX DMA descriptor to process */
1013 int next_desc_to_proc;
1015 /* ID of port to which physical RXQ is mapped */
1018 /* Port's logic RXQ number to which physical RXQ is mapped */
1022 union mvpp2_prs_tcam_entry {
1023 u32 word[MVPP2_PRS_TCAM_WORDS];
1024 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1027 union mvpp2_prs_sram_entry {
1028 u32 word[MVPP2_PRS_SRAM_WORDS];
1029 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1032 struct mvpp2_prs_entry {
1034 union mvpp2_prs_tcam_entry tcam;
1035 union mvpp2_prs_sram_entry sram;
1038 struct mvpp2_prs_shadow {
1045 /* User defined offset */
1053 struct mvpp2_cls_flow_entry {
1055 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1058 struct mvpp2_cls_lookup_entry {
1064 struct mvpp2_bm_pool {
1065 /* Pool number in the range 0-7 */
1067 enum mvpp2_bm_type type;
1069 /* Buffer Pointers Pool External (BPPE) size */
1071 /* BPPE size in bytes */
1073 /* Number of buffers for this pool */
1075 /* Pool buffer size */
1081 /* BPPE virtual base address */
1083 /* BPPE DMA base address */
1084 dma_addr_t dma_addr;
1086 /* Ports using BM pool */
1091 #define MVPP2_QDIST_SINGLE_MODE 0
1092 #define MVPP2_QDIST_MULTI_MODE 1
1094 static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1096 module_param(queue_mode, int, 0444);
1097 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1099 #define MVPP2_DRIVER_NAME "mvpp2"
1100 #define MVPP2_DRIVER_VERSION "1.0"
1102 /* Utility/helper methods */
1104 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1106 writel(data, priv->swth_base[0] + offset);
1109 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1111 return readl(priv->swth_base[0] + offset);
1114 /* These accessors should be used to access:
1116 * - per-CPU registers, where each CPU has its own copy of the
1119 * MVPP2_BM_VIRT_ALLOC_REG
1120 * MVPP2_BM_ADDR_HIGH_ALLOC
1121 * MVPP22_BM_ADDR_HIGH_RLS_REG
1122 * MVPP2_BM_VIRT_RLS_REG
1123 * MVPP2_ISR_RX_TX_CAUSE_REG
1124 * MVPP2_ISR_RX_TX_MASK_REG
1126 * MVPP2_AGGR_TXQ_UPDATE_REG
1127 * MVPP2_TXQ_RSVD_REQ_REG
1128 * MVPP2_TXQ_RSVD_RSLT_REG
1129 * MVPP2_TXQ_SENT_REG
1132 * - global registers that must be accessed through a specific CPU
1133 * window, because they are related to an access to a per-CPU
1136 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1137 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1138 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1139 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1140 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1141 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1142 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1143 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1144 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1145 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1146 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1147 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1148 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1150 static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1151 u32 offset, u32 data)
1153 writel(data, priv->swth_base[cpu] + offset);
1156 static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1159 return readl(priv->swth_base[cpu] + offset);
1162 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1163 struct mvpp2_tx_desc *tx_desc)
1165 if (port->priv->hw_version == MVPP21)
1166 return tx_desc->pp21.buf_dma_addr;
1168 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
1171 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1172 struct mvpp2_tx_desc *tx_desc,
1173 dma_addr_t dma_addr)
1175 if (port->priv->hw_version == MVPP21) {
1176 tx_desc->pp21.buf_dma_addr = dma_addr;
1178 u64 val = (u64)dma_addr;
1180 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1181 tx_desc->pp22.buf_dma_addr_ptp |= val;
1185 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1186 struct mvpp2_tx_desc *tx_desc)
1188 if (port->priv->hw_version == MVPP21)
1189 return tx_desc->pp21.data_size;
1191 return tx_desc->pp22.data_size;
1194 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1195 struct mvpp2_tx_desc *tx_desc,
1198 if (port->priv->hw_version == MVPP21)
1199 tx_desc->pp21.data_size = size;
1201 tx_desc->pp22.data_size = size;
1204 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1205 struct mvpp2_tx_desc *tx_desc,
1208 if (port->priv->hw_version == MVPP21)
1209 tx_desc->pp21.phys_txq = txq;
1211 tx_desc->pp22.phys_txq = txq;
1214 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1215 struct mvpp2_tx_desc *tx_desc,
1216 unsigned int command)
1218 if (port->priv->hw_version == MVPP21)
1219 tx_desc->pp21.command = command;
1221 tx_desc->pp22.command = command;
1224 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1225 struct mvpp2_tx_desc *tx_desc,
1226 unsigned int offset)
1228 if (port->priv->hw_version == MVPP21)
1229 tx_desc->pp21.packet_offset = offset;
1231 tx_desc->pp22.packet_offset = offset;
1234 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1235 struct mvpp2_tx_desc *tx_desc)
1237 if (port->priv->hw_version == MVPP21)
1238 return tx_desc->pp21.packet_offset;
1240 return tx_desc->pp22.packet_offset;
1243 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1244 struct mvpp2_rx_desc *rx_desc)
1246 if (port->priv->hw_version == MVPP21)
1247 return rx_desc->pp21.buf_dma_addr;
1249 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1252 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1253 struct mvpp2_rx_desc *rx_desc)
1255 if (port->priv->hw_version == MVPP21)
1256 return rx_desc->pp21.buf_cookie;
1258 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1261 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1262 struct mvpp2_rx_desc *rx_desc)
1264 if (port->priv->hw_version == MVPP21)
1265 return rx_desc->pp21.data_size;
1267 return rx_desc->pp22.data_size;
1270 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1271 struct mvpp2_rx_desc *rx_desc)
1273 if (port->priv->hw_version == MVPP21)
1274 return rx_desc->pp21.status;
1276 return rx_desc->pp22.status;
1279 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1281 txq_pcpu->txq_get_index++;
1282 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1283 txq_pcpu->txq_get_index = 0;
1286 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1287 struct mvpp2_txq_pcpu *txq_pcpu,
1288 struct sk_buff *skb,
1289 struct mvpp2_tx_desc *tx_desc)
1291 struct mvpp2_txq_pcpu_buf *tx_buf =
1292 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1294 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1295 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1296 mvpp2_txdesc_offset_get(port, tx_desc);
1297 txq_pcpu->txq_put_index++;
1298 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1299 txq_pcpu->txq_put_index = 0;
1302 /* Get number of physical egress port */
1303 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1305 return MVPP2_MAX_TCONT + port->id;
1308 /* Get number of physical TXQ */
1309 static inline int mvpp2_txq_phys(int port, int txq)
1311 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1314 /* Parser configuration routines */
1316 /* Update parser tcam and sram hw entries */
1317 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1321 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1324 /* Clear entry invalidation bit */
1325 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1327 /* Write tcam index - indirect access */
1328 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1329 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1330 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1332 /* Write sram index - indirect access */
1333 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1334 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1335 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1340 /* Read tcam entry from hw */
1341 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1345 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1348 /* Write tcam index - indirect access */
1349 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1351 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1352 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1353 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1354 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1356 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1357 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1359 /* Write sram index - indirect access */
1360 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1361 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1362 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1367 /* Invalidate tcam hw entry */
1368 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1370 /* Write index - indirect access */
1371 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1372 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1373 MVPP2_PRS_TCAM_INV_MASK);
1376 /* Enable shadow table entry and set its lookup ID */
1377 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1379 priv->prs_shadow[index].valid = true;
1380 priv->prs_shadow[index].lu = lu;
1383 /* Update ri fields in shadow table entry */
1384 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1385 unsigned int ri, unsigned int ri_mask)
1387 priv->prs_shadow[index].ri_mask = ri_mask;
1388 priv->prs_shadow[index].ri = ri;
1391 /* Update lookup field in tcam sw entry */
1392 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1394 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1396 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1397 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1400 /* Update mask for single port in tcam sw entry */
1401 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1402 unsigned int port, bool add)
1404 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1407 pe->tcam.byte[enable_off] &= ~(1 << port);
1409 pe->tcam.byte[enable_off] |= 1 << port;
1412 /* Update port map in tcam sw entry */
1413 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1416 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1417 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1419 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1420 pe->tcam.byte[enable_off] &= ~port_mask;
1421 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1424 /* Obtain port map from tcam sw entry */
1425 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1427 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1429 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1432 /* Set byte of data and its enable bits in tcam sw entry */
1433 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1434 unsigned int offs, unsigned char byte,
1435 unsigned char enable)
1437 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1438 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1441 /* Get byte of data and its enable bits from tcam sw entry */
1442 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1443 unsigned int offs, unsigned char *byte,
1444 unsigned char *enable)
1446 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1447 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1450 /* Compare tcam data bytes with a pattern */
1451 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1454 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1457 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1458 if (tcam_data != data)
1463 /* Update ai bits in tcam sw entry */
1464 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1465 unsigned int bits, unsigned int enable)
1467 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1469 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1471 if (!(enable & BIT(i)))
1475 pe->tcam.byte[ai_idx] |= 1 << i;
1477 pe->tcam.byte[ai_idx] &= ~(1 << i);
1480 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1483 /* Get ai bits from tcam sw entry */
1484 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1486 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1489 /* Set ethertype in tcam sw entry */
1490 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1491 unsigned short ethertype)
1493 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1494 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1497 /* Set bits in sram sw entry */
1498 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1501 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1504 /* Clear bits in sram sw entry */
1505 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1508 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1511 /* Update ri bits in sram sw entry */
1512 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1513 unsigned int bits, unsigned int mask)
1517 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1518 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1520 if (!(mask & BIT(i)))
1524 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1526 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1528 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1532 /* Obtain ri bits from sram sw entry */
1533 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1535 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1538 /* Update ai bits in sram sw entry */
1539 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1540 unsigned int bits, unsigned int mask)
1543 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1545 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1547 if (!(mask & BIT(i)))
1551 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1553 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1555 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1559 /* Read ai bits from sram sw entry */
1560 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1563 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1564 int ai_en_off = ai_off + 1;
1565 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1567 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1568 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1573 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1576 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1579 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1581 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1582 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1583 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1586 /* In the sram sw entry set sign and value of the next lookup offset
1587 * and the offset value generated to the classifier
1589 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1594 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1597 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1601 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1602 (unsigned char)shift;
1604 /* Reset and set operation */
1605 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1606 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1607 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1609 /* Set base offset as current */
1610 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1613 /* In the sram sw entry set sign and value of the user defined offset
1614 * generated to the classifier
1616 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1617 unsigned int type, int offset,
1622 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1623 offset = 0 - offset;
1625 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1629 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1630 MVPP2_PRS_SRAM_UDF_MASK);
1631 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1632 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1633 MVPP2_PRS_SRAM_UDF_BITS)] &=
1634 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1635 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1636 MVPP2_PRS_SRAM_UDF_BITS)] |=
1637 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1639 /* Set offset type */
1640 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1641 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1642 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1644 /* Set offset operation */
1645 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1646 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1647 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1649 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1650 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1651 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1652 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1654 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1655 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1656 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1658 /* Set base offset as current */
1659 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1662 /* Find parser flow entry */
1663 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1665 struct mvpp2_prs_entry *pe;
1668 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1671 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1673 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1674 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1677 if (!priv->prs_shadow[tid].valid ||
1678 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1682 mvpp2_prs_hw_read(priv, pe);
1683 bits = mvpp2_prs_sram_ai_get(pe);
1685 /* Sram store classification lookup ID in AI bits [5:0] */
1686 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1694 /* Return first free tcam index, seeking from start to end */
1695 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1703 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1704 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1706 for (tid = start; tid <= end; tid++) {
1707 if (!priv->prs_shadow[tid].valid)
1714 /* Enable/disable dropping all mac da's */
1715 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1717 struct mvpp2_prs_entry pe;
1719 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1720 /* Entry exist - update port only */
1721 pe.index = MVPP2_PE_DROP_ALL;
1722 mvpp2_prs_hw_read(priv, &pe);
1724 /* Entry doesn't exist - create new */
1725 memset(&pe, 0, sizeof(pe));
1726 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1727 pe.index = MVPP2_PE_DROP_ALL;
1729 /* Non-promiscuous mode for all ports - DROP unknown packets */
1730 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1731 MVPP2_PRS_RI_DROP_MASK);
1733 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1734 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1736 /* Update shadow table */
1737 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1739 /* Mask all ports */
1740 mvpp2_prs_tcam_port_map_set(&pe, 0);
1743 /* Update port mask */
1744 mvpp2_prs_tcam_port_set(&pe, port, add);
1746 mvpp2_prs_hw_write(priv, &pe);
1749 /* Set port to promiscuous mode */
1750 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1752 struct mvpp2_prs_entry pe;
1754 /* Promiscuous mode - Accept unknown packets */
1756 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1757 /* Entry exist - update port only */
1758 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1759 mvpp2_prs_hw_read(priv, &pe);
1761 /* Entry doesn't exist - create new */
1762 memset(&pe, 0, sizeof(pe));
1763 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1764 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1766 /* Continue - set next lookup */
1767 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1769 /* Set result info bits */
1770 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1771 MVPP2_PRS_RI_L2_CAST_MASK);
1773 /* Shift to ethertype */
1774 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1775 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1777 /* Mask all ports */
1778 mvpp2_prs_tcam_port_map_set(&pe, 0);
1780 /* Update shadow table */
1781 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1784 /* Update port mask */
1785 mvpp2_prs_tcam_port_set(&pe, port, add);
1787 mvpp2_prs_hw_write(priv, &pe);
1790 /* Accept multicast */
1791 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1794 struct mvpp2_prs_entry pe;
1795 unsigned char da_mc;
1797 /* Ethernet multicast address first byte is
1798 * 0x01 for IPv4 and 0x33 for IPv6
1800 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1802 if (priv->prs_shadow[index].valid) {
1803 /* Entry exist - update port only */
1805 mvpp2_prs_hw_read(priv, &pe);
1807 /* Entry doesn't exist - create new */
1808 memset(&pe, 0, sizeof(pe));
1809 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1812 /* Continue - set next lookup */
1813 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1815 /* Set result info bits */
1816 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1817 MVPP2_PRS_RI_L2_CAST_MASK);
1819 /* Update tcam entry data first byte */
1820 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1822 /* Shift to ethertype */
1823 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1824 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1826 /* Mask all ports */
1827 mvpp2_prs_tcam_port_map_set(&pe, 0);
1829 /* Update shadow table */
1830 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1833 /* Update port mask */
1834 mvpp2_prs_tcam_port_set(&pe, port, add);
1836 mvpp2_prs_hw_write(priv, &pe);
1839 /* Set entry for dsa packets */
1840 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1841 bool tagged, bool extend)
1843 struct mvpp2_prs_entry pe;
1847 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1850 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1854 if (priv->prs_shadow[tid].valid) {
1855 /* Entry exist - update port only */
1857 mvpp2_prs_hw_read(priv, &pe);
1859 /* Entry doesn't exist - create new */
1860 memset(&pe, 0, sizeof(pe));
1861 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1864 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1865 mvpp2_prs_sram_shift_set(&pe, shift,
1866 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1868 /* Update shadow table */
1869 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1872 /* Set tagged bit in DSA tag */
1873 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1874 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1875 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1876 /* Clear all ai bits for next iteration */
1877 mvpp2_prs_sram_ai_update(&pe, 0,
1878 MVPP2_PRS_SRAM_AI_MASK);
1879 /* If packet is tagged continue check vlans */
1880 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1882 /* Set result info bits to 'no vlans' */
1883 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1884 MVPP2_PRS_RI_VLAN_MASK);
1885 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1888 /* Mask all ports */
1889 mvpp2_prs_tcam_port_map_set(&pe, 0);
1892 /* Update port mask */
1893 mvpp2_prs_tcam_port_set(&pe, port, add);
1895 mvpp2_prs_hw_write(priv, &pe);
1898 /* Set entry for dsa ethertype */
1899 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1900 bool add, bool tagged, bool extend)
1902 struct mvpp2_prs_entry pe;
1903 int tid, shift, port_mask;
1906 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1907 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1911 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1912 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1913 port_mask = MVPP2_PRS_PORT_MASK;
1917 if (priv->prs_shadow[tid].valid) {
1918 /* Entry exist - update port only */
1920 mvpp2_prs_hw_read(priv, &pe);
1922 /* Entry doesn't exist - create new */
1923 memset(&pe, 0, sizeof(pe));
1924 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1928 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1929 mvpp2_prs_match_etype(&pe, 2, 0);
1931 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1932 MVPP2_PRS_RI_DSA_MASK);
1933 /* Shift ethertype + 2 byte reserved + tag*/
1934 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1935 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1937 /* Update shadow table */
1938 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1941 /* Set tagged bit in DSA tag */
1942 mvpp2_prs_tcam_data_byte_set(&pe,
1943 MVPP2_ETH_TYPE_LEN + 2 + 3,
1944 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1945 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1946 /* Clear all ai bits for next iteration */
1947 mvpp2_prs_sram_ai_update(&pe, 0,
1948 MVPP2_PRS_SRAM_AI_MASK);
1949 /* If packet is tagged continue check vlans */
1950 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1952 /* Set result info bits to 'no vlans' */
1953 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1954 MVPP2_PRS_RI_VLAN_MASK);
1955 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1957 /* Mask/unmask all ports, depending on dsa type */
1958 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1961 /* Update port mask */
1962 mvpp2_prs_tcam_port_set(&pe, port, add);
1964 mvpp2_prs_hw_write(priv, &pe);
1967 /* Search for existing single/triple vlan entry */
1968 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1969 unsigned short tpid, int ai)
1971 struct mvpp2_prs_entry *pe;
1974 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1977 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1979 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1980 for (tid = MVPP2_PE_FIRST_FREE_TID;
1981 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1982 unsigned int ri_bits, ai_bits;
1985 if (!priv->prs_shadow[tid].valid ||
1986 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1991 mvpp2_prs_hw_read(priv, pe);
1992 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1997 ri_bits = mvpp2_prs_sram_ri_get(pe);
1998 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2000 /* Get current ai value from tcam */
2001 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2002 /* Clear double vlan bit */
2003 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2008 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2009 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2017 /* Add/update single/triple vlan entry */
2018 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2019 unsigned int port_map)
2021 struct mvpp2_prs_entry *pe;
2025 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2028 /* Create new tcam entry */
2029 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2030 MVPP2_PE_FIRST_FREE_TID);
2034 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2038 /* Get last double vlan tid */
2039 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2040 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2041 unsigned int ri_bits;
2043 if (!priv->prs_shadow[tid_aux].valid ||
2044 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2047 pe->index = tid_aux;
2048 mvpp2_prs_hw_read(priv, pe);
2049 ri_bits = mvpp2_prs_sram_ri_get(pe);
2050 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2051 MVPP2_PRS_RI_VLAN_DOUBLE)
2055 if (tid <= tid_aux) {
2060 memset(pe, 0, sizeof(*pe));
2061 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2064 mvpp2_prs_match_etype(pe, 0, tpid);
2066 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
2067 /* Shift 4 bytes - skip 1 vlan tag */
2068 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
2069 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2070 /* Clear all ai bits for next iteration */
2071 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2073 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2074 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2075 MVPP2_PRS_RI_VLAN_MASK);
2077 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2078 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2079 MVPP2_PRS_RI_VLAN_MASK);
2081 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2083 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2085 /* Update ports' mask */
2086 mvpp2_prs_tcam_port_map_set(pe, port_map);
2088 mvpp2_prs_hw_write(priv, pe);
2095 /* Get first free double vlan ai number */
2096 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2100 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2101 if (!priv->prs_double_vlans[i])
2108 /* Search for existing double vlan entry */
2109 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2110 unsigned short tpid1,
2111 unsigned short tpid2)
2113 struct mvpp2_prs_entry *pe;
2116 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2119 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2121 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2122 for (tid = MVPP2_PE_FIRST_FREE_TID;
2123 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2124 unsigned int ri_mask;
2127 if (!priv->prs_shadow[tid].valid ||
2128 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2132 mvpp2_prs_hw_read(priv, pe);
2134 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2135 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2140 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2141 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2149 /* Add or update double vlan entry */
2150 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2151 unsigned short tpid2,
2152 unsigned int port_map)
2154 struct mvpp2_prs_entry *pe;
2155 int tid_aux, tid, ai, ret = 0;
2157 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2160 /* Create new tcam entry */
2161 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2162 MVPP2_PE_LAST_FREE_TID);
2166 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2170 /* Set ai value for new double vlan entry */
2171 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
2177 /* Get first single/triple vlan tid */
2178 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2179 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2180 unsigned int ri_bits;
2182 if (!priv->prs_shadow[tid_aux].valid ||
2183 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2186 pe->index = tid_aux;
2187 mvpp2_prs_hw_read(priv, pe);
2188 ri_bits = mvpp2_prs_sram_ri_get(pe);
2189 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2190 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2191 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2195 if (tid >= tid_aux) {
2200 memset(pe, 0, sizeof(*pe));
2201 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2204 priv->prs_double_vlans[ai] = true;
2206 mvpp2_prs_match_etype(pe, 0, tpid1);
2207 mvpp2_prs_match_etype(pe, 4, tpid2);
2209 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2210 /* Shift 8 bytes - skip 2 vlan tags */
2211 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2212 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2213 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2214 MVPP2_PRS_RI_VLAN_MASK);
2215 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2216 MVPP2_PRS_SRAM_AI_MASK);
2218 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2221 /* Update ports' mask */
2222 mvpp2_prs_tcam_port_map_set(pe, port_map);
2223 mvpp2_prs_hw_write(priv, pe);
2229 /* IPv4 header parsing for fragmentation and L4 offset */
2230 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2231 unsigned int ri, unsigned int ri_mask)
2233 struct mvpp2_prs_entry pe;
2236 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2237 (proto != IPPROTO_IGMP))
2240 /* Fragmented packet */
2241 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2242 MVPP2_PE_LAST_FREE_TID);
2246 memset(&pe, 0, sizeof(pe));
2247 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2250 /* Set next lu to IPv4 */
2251 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2252 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2254 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2255 sizeof(struct iphdr) - 4,
2256 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2257 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2258 MVPP2_PRS_IPV4_DIP_AI_BIT);
2259 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2260 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2262 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2263 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2264 /* Unmask all ports */
2265 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2267 /* Update shadow table and hw entry */
2268 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2269 mvpp2_prs_hw_write(priv, &pe);
2271 /* Not fragmented packet */
2272 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2273 MVPP2_PE_LAST_FREE_TID);
2278 /* Clear ri before updating */
2279 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2280 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2281 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2283 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2284 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2286 /* Update shadow table and hw entry */
2287 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2288 mvpp2_prs_hw_write(priv, &pe);
2293 /* IPv4 L3 multicast or broadcast */
2294 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2296 struct mvpp2_prs_entry pe;
2299 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2300 MVPP2_PE_LAST_FREE_TID);
2304 memset(&pe, 0, sizeof(pe));
2305 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2309 case MVPP2_PRS_L3_MULTI_CAST:
2310 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2311 MVPP2_PRS_IPV4_MC_MASK);
2312 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2313 MVPP2_PRS_RI_L3_ADDR_MASK);
2315 case MVPP2_PRS_L3_BROAD_CAST:
2316 mask = MVPP2_PRS_IPV4_BC_MASK;
2317 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2318 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2319 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2320 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2321 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2322 MVPP2_PRS_RI_L3_ADDR_MASK);
2328 /* Finished: go to flowid generation */
2329 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2330 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2332 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2333 MVPP2_PRS_IPV4_DIP_AI_BIT);
2334 /* Unmask all ports */
2335 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2337 /* Update shadow table and hw entry */
2338 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2339 mvpp2_prs_hw_write(priv, &pe);
2344 /* Set entries for protocols over IPv6 */
2345 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2346 unsigned int ri, unsigned int ri_mask)
2348 struct mvpp2_prs_entry pe;
2351 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2352 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2355 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2356 MVPP2_PE_LAST_FREE_TID);
2360 memset(&pe, 0, sizeof(pe));
2361 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2364 /* Finished: go to flowid generation */
2365 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2366 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2367 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2368 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2369 sizeof(struct ipv6hdr) - 6,
2370 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2372 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2373 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2374 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2375 /* Unmask all ports */
2376 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2379 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2380 mvpp2_prs_hw_write(priv, &pe);
2385 /* IPv6 L3 multicast entry */
2386 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2388 struct mvpp2_prs_entry pe;
2391 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2394 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2395 MVPP2_PE_LAST_FREE_TID);
2399 memset(&pe, 0, sizeof(pe));
2400 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2403 /* Finished: go to flowid generation */
2404 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2405 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2406 MVPP2_PRS_RI_L3_ADDR_MASK);
2407 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2408 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2409 /* Shift back to IPv6 NH */
2410 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2412 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2413 MVPP2_PRS_IPV6_MC_MASK);
2414 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2415 /* Unmask all ports */
2416 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2418 /* Update shadow table and hw entry */
2419 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2420 mvpp2_prs_hw_write(priv, &pe);
2425 /* Parser per-port initialization */
2426 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2427 int lu_max, int offset)
2432 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2433 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2434 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2435 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2437 /* Set maximum number of loops for packet received from port */
2438 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2439 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2440 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2441 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2443 /* Set initial offset for packet header extraction for the first
2446 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2447 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2448 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2449 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2452 /* Default flow entries initialization for all ports */
2453 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2455 struct mvpp2_prs_entry pe;
2458 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2459 memset(&pe, 0, sizeof(pe));
2460 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2461 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2463 /* Mask all ports */
2464 mvpp2_prs_tcam_port_map_set(&pe, 0);
2467 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2468 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2470 /* Update shadow table and hw entry */
2471 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2472 mvpp2_prs_hw_write(priv, &pe);
2476 /* Set default entry for Marvell Header field */
2477 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2479 struct mvpp2_prs_entry pe;
2481 memset(&pe, 0, sizeof(pe));
2483 pe.index = MVPP2_PE_MH_DEFAULT;
2484 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2485 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2486 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2487 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2489 /* Unmask all ports */
2490 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2492 /* Update shadow table and hw entry */
2493 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2494 mvpp2_prs_hw_write(priv, &pe);
2497 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2498 * multicast MAC addresses
2500 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2502 struct mvpp2_prs_entry pe;
2504 memset(&pe, 0, sizeof(pe));
2506 /* Non-promiscuous mode for all ports - DROP unknown packets */
2507 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2508 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2510 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2511 MVPP2_PRS_RI_DROP_MASK);
2512 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2513 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2515 /* Unmask all ports */
2516 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2518 /* Update shadow table and hw entry */
2519 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2520 mvpp2_prs_hw_write(priv, &pe);
2522 /* place holders only - no ports */
2523 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2524 mvpp2_prs_mac_promisc_set(priv, 0, false);
2525 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2526 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2529 /* Set default entries for various types of dsa packets */
2530 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2532 struct mvpp2_prs_entry pe;
2534 /* None tagged EDSA entry - place holder */
2535 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2538 /* Tagged EDSA entry - place holder */
2539 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2541 /* None tagged DSA entry - place holder */
2542 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2545 /* Tagged DSA entry - place holder */
2546 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2548 /* None tagged EDSA ethertype entry - place holder*/
2549 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2550 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2552 /* Tagged EDSA ethertype entry - place holder*/
2553 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2554 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2556 /* None tagged DSA ethertype entry */
2557 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2558 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2560 /* Tagged DSA ethertype entry */
2561 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2562 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2564 /* Set default entry, in case DSA or EDSA tag not found */
2565 memset(&pe, 0, sizeof(pe));
2566 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2567 pe.index = MVPP2_PE_DSA_DEFAULT;
2568 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2571 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2572 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2574 /* Clear all sram ai bits for next iteration */
2575 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2577 /* Unmask all ports */
2578 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2580 mvpp2_prs_hw_write(priv, &pe);
2583 /* Match basic ethertypes */
2584 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2586 struct mvpp2_prs_entry pe;
2589 /* Ethertype: PPPoE */
2590 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2591 MVPP2_PE_LAST_FREE_TID);
2595 memset(&pe, 0, sizeof(pe));
2596 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2599 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2601 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2602 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2603 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2604 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2605 MVPP2_PRS_RI_PPPOE_MASK);
2607 /* Update shadow table and hw entry */
2608 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2609 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2610 priv->prs_shadow[pe.index].finish = false;
2611 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2612 MVPP2_PRS_RI_PPPOE_MASK);
2613 mvpp2_prs_hw_write(priv, &pe);
2615 /* Ethertype: ARP */
2616 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2617 MVPP2_PE_LAST_FREE_TID);
2621 memset(&pe, 0, sizeof(pe));
2622 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2625 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2627 /* Generate flow in the next iteration*/
2628 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2629 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2630 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2631 MVPP2_PRS_RI_L3_PROTO_MASK);
2633 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2635 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2637 /* Update shadow table and hw entry */
2638 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2639 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2640 priv->prs_shadow[pe.index].finish = true;
2641 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2642 MVPP2_PRS_RI_L3_PROTO_MASK);
2643 mvpp2_prs_hw_write(priv, &pe);
2645 /* Ethertype: LBTD */
2646 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2647 MVPP2_PE_LAST_FREE_TID);
2651 memset(&pe, 0, sizeof(pe));
2652 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2655 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2657 /* Generate flow in the next iteration*/
2658 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2659 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2660 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2661 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2662 MVPP2_PRS_RI_CPU_CODE_MASK |
2663 MVPP2_PRS_RI_UDF3_MASK);
2665 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2667 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2669 /* Update shadow table and hw entry */
2670 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2671 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2672 priv->prs_shadow[pe.index].finish = true;
2673 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2674 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2675 MVPP2_PRS_RI_CPU_CODE_MASK |
2676 MVPP2_PRS_RI_UDF3_MASK);
2677 mvpp2_prs_hw_write(priv, &pe);
2679 /* Ethertype: IPv4 without options */
2680 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2681 MVPP2_PE_LAST_FREE_TID);
2685 memset(&pe, 0, sizeof(pe));
2686 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2689 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2690 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2691 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2692 MVPP2_PRS_IPV4_HEAD_MASK |
2693 MVPP2_PRS_IPV4_IHL_MASK);
2695 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2696 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2697 MVPP2_PRS_RI_L3_PROTO_MASK);
2698 /* Skip eth_type + 4 bytes of IP header */
2699 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2700 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2702 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2704 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2706 /* Update shadow table and hw entry */
2707 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2708 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2709 priv->prs_shadow[pe.index].finish = false;
2710 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2711 MVPP2_PRS_RI_L3_PROTO_MASK);
2712 mvpp2_prs_hw_write(priv, &pe);
2714 /* Ethertype: IPv4 with options */
2715 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2716 MVPP2_PE_LAST_FREE_TID);
2722 /* Clear tcam data before updating */
2723 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2724 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2726 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2727 MVPP2_PRS_IPV4_HEAD,
2728 MVPP2_PRS_IPV4_HEAD_MASK);
2730 /* Clear ri before updating */
2731 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2732 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2733 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2734 MVPP2_PRS_RI_L3_PROTO_MASK);
2736 /* Update shadow table and hw entry */
2737 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2738 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2739 priv->prs_shadow[pe.index].finish = false;
2740 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2741 MVPP2_PRS_RI_L3_PROTO_MASK);
2742 mvpp2_prs_hw_write(priv, &pe);
2744 /* Ethertype: IPv6 without options */
2745 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2746 MVPP2_PE_LAST_FREE_TID);
2750 memset(&pe, 0, sizeof(pe));
2751 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2754 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2756 /* Skip DIP of IPV6 header */
2757 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2758 MVPP2_MAX_L3_ADDR_SIZE,
2759 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2760 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2761 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2762 MVPP2_PRS_RI_L3_PROTO_MASK);
2764 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2766 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2768 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2769 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2770 priv->prs_shadow[pe.index].finish = false;
2771 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2772 MVPP2_PRS_RI_L3_PROTO_MASK);
2773 mvpp2_prs_hw_write(priv, &pe);
2775 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2776 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2777 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2778 pe.index = MVPP2_PE_ETH_TYPE_UN;
2780 /* Unmask all ports */
2781 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2783 /* Generate flow in the next iteration*/
2784 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2785 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2786 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2787 MVPP2_PRS_RI_L3_PROTO_MASK);
2788 /* Set L3 offset even it's unknown L3 */
2789 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2791 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2793 /* Update shadow table and hw entry */
2794 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2795 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2796 priv->prs_shadow[pe.index].finish = true;
2797 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2798 MVPP2_PRS_RI_L3_PROTO_MASK);
2799 mvpp2_prs_hw_write(priv, &pe);
2804 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2811 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2813 struct mvpp2_prs_entry pe;
2816 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2817 MVPP2_PRS_DBL_VLANS_MAX,
2819 if (!priv->prs_double_vlans)
2822 /* Double VLAN: 0x8100, 0x88A8 */
2823 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2824 MVPP2_PRS_PORT_MASK);
2828 /* Double VLAN: 0x8100, 0x8100 */
2829 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2830 MVPP2_PRS_PORT_MASK);
2834 /* Single VLAN: 0x88a8 */
2835 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2836 MVPP2_PRS_PORT_MASK);
2840 /* Single VLAN: 0x8100 */
2841 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2842 MVPP2_PRS_PORT_MASK);
2846 /* Set default double vlan entry */
2847 memset(&pe, 0, sizeof(pe));
2848 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2849 pe.index = MVPP2_PE_VLAN_DBL;
2851 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2852 /* Clear ai for next iterations */
2853 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2854 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2855 MVPP2_PRS_RI_VLAN_MASK);
2857 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2858 MVPP2_PRS_DBL_VLAN_AI_BIT);
2859 /* Unmask all ports */
2860 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2862 /* Update shadow table and hw entry */
2863 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2864 mvpp2_prs_hw_write(priv, &pe);
2866 /* Set default vlan none entry */
2867 memset(&pe, 0, sizeof(pe));
2868 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2869 pe.index = MVPP2_PE_VLAN_NONE;
2871 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2872 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2873 MVPP2_PRS_RI_VLAN_MASK);
2875 /* Unmask all ports */
2876 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2878 /* Update shadow table and hw entry */
2879 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2880 mvpp2_prs_hw_write(priv, &pe);
2885 /* Set entries for PPPoE ethertype */
2886 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2888 struct mvpp2_prs_entry pe;
2891 /* IPv4 over PPPoE with options */
2892 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2893 MVPP2_PE_LAST_FREE_TID);
2897 memset(&pe, 0, sizeof(pe));
2898 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2901 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2903 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2904 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2905 MVPP2_PRS_RI_L3_PROTO_MASK);
2906 /* Skip eth_type + 4 bytes of IP header */
2907 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2908 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2910 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2912 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2914 /* Update shadow table and hw entry */
2915 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2916 mvpp2_prs_hw_write(priv, &pe);
2918 /* IPv4 over PPPoE without options */
2919 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2920 MVPP2_PE_LAST_FREE_TID);
2926 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2927 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2928 MVPP2_PRS_IPV4_HEAD_MASK |
2929 MVPP2_PRS_IPV4_IHL_MASK);
2931 /* Clear ri before updating */
2932 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2933 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2934 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2935 MVPP2_PRS_RI_L3_PROTO_MASK);
2937 /* Update shadow table and hw entry */
2938 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2939 mvpp2_prs_hw_write(priv, &pe);
2941 /* IPv6 over PPPoE */
2942 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2943 MVPP2_PE_LAST_FREE_TID);
2947 memset(&pe, 0, sizeof(pe));
2948 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2951 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2953 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2954 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2955 MVPP2_PRS_RI_L3_PROTO_MASK);
2956 /* Skip eth_type + 4 bytes of IPv6 header */
2957 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2958 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2960 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2962 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2964 /* Update shadow table and hw entry */
2965 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2966 mvpp2_prs_hw_write(priv, &pe);
2968 /* Non-IP over PPPoE */
2969 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2970 MVPP2_PE_LAST_FREE_TID);
2974 memset(&pe, 0, sizeof(pe));
2975 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2978 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2979 MVPP2_PRS_RI_L3_PROTO_MASK);
2981 /* Finished: go to flowid generation */
2982 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2983 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2984 /* Set L3 offset even if it's unknown L3 */
2985 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2987 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2989 /* Update shadow table and hw entry */
2990 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2991 mvpp2_prs_hw_write(priv, &pe);
2996 /* Initialize entries for IPv4 */
2997 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2999 struct mvpp2_prs_entry pe;
3002 /* Set entries for TCP, UDP and IGMP over IPv4 */
3003 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3004 MVPP2_PRS_RI_L4_PROTO_MASK);
3008 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3009 MVPP2_PRS_RI_L4_PROTO_MASK);
3013 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3014 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3015 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3016 MVPP2_PRS_RI_CPU_CODE_MASK |
3017 MVPP2_PRS_RI_UDF3_MASK);
3021 /* IPv4 Broadcast */
3022 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3026 /* IPv4 Multicast */
3027 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3031 /* Default IPv4 entry for unknown protocols */
3032 memset(&pe, 0, sizeof(pe));
3033 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3034 pe.index = MVPP2_PE_IP4_PROTO_UN;
3036 /* Set next lu to IPv4 */
3037 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3038 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3040 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3041 sizeof(struct iphdr) - 4,
3042 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3043 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3044 MVPP2_PRS_IPV4_DIP_AI_BIT);
3045 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3046 MVPP2_PRS_RI_L4_PROTO_MASK);
3048 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3049 /* Unmask all ports */
3050 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3052 /* Update shadow table and hw entry */
3053 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3054 mvpp2_prs_hw_write(priv, &pe);
3056 /* Default IPv4 entry for unicast address */
3057 memset(&pe, 0, sizeof(pe));
3058 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3059 pe.index = MVPP2_PE_IP4_ADDR_UN;
3061 /* Finished: go to flowid generation */
3062 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3063 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3064 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3065 MVPP2_PRS_RI_L3_ADDR_MASK);
3067 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3068 MVPP2_PRS_IPV4_DIP_AI_BIT);
3069 /* Unmask all ports */
3070 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3072 /* Update shadow table and hw entry */
3073 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3074 mvpp2_prs_hw_write(priv, &pe);
3079 /* Initialize entries for IPv6 */
3080 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3082 struct mvpp2_prs_entry pe;
3085 /* Set entries for TCP, UDP and ICMP over IPv6 */
3086 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3087 MVPP2_PRS_RI_L4_TCP,
3088 MVPP2_PRS_RI_L4_PROTO_MASK);
3092 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3093 MVPP2_PRS_RI_L4_UDP,
3094 MVPP2_PRS_RI_L4_PROTO_MASK);
3098 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3099 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3100 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3101 MVPP2_PRS_RI_CPU_CODE_MASK |
3102 MVPP2_PRS_RI_UDF3_MASK);
3106 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3107 /* Result Info: UDF7=1, DS lite */
3108 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3109 MVPP2_PRS_RI_UDF7_IP6_LITE,
3110 MVPP2_PRS_RI_UDF7_MASK);
3114 /* IPv6 multicast */
3115 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3119 /* Entry for checking hop limit */
3120 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3121 MVPP2_PE_LAST_FREE_TID);
3125 memset(&pe, 0, sizeof(pe));
3126 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3129 /* Finished: go to flowid generation */
3130 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3131 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3132 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3133 MVPP2_PRS_RI_DROP_MASK,
3134 MVPP2_PRS_RI_L3_PROTO_MASK |
3135 MVPP2_PRS_RI_DROP_MASK);
3137 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3138 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3139 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3141 /* Update shadow table and hw entry */
3142 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3143 mvpp2_prs_hw_write(priv, &pe);
3145 /* Default IPv6 entry for unknown protocols */
3146 memset(&pe, 0, sizeof(pe));
3147 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3148 pe.index = MVPP2_PE_IP6_PROTO_UN;
3150 /* Finished: go to flowid generation */
3151 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3152 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3153 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3154 MVPP2_PRS_RI_L4_PROTO_MASK);
3155 /* Set L4 offset relatively to our current place */
3156 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3157 sizeof(struct ipv6hdr) - 4,
3158 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3160 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3161 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3162 /* Unmask all ports */
3163 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3165 /* Update shadow table and hw entry */
3166 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3167 mvpp2_prs_hw_write(priv, &pe);
3169 /* Default IPv6 entry for unknown ext protocols */
3170 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3171 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3172 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3174 /* Finished: go to flowid generation */
3175 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3176 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3177 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3178 MVPP2_PRS_RI_L4_PROTO_MASK);
3180 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3181 MVPP2_PRS_IPV6_EXT_AI_BIT);
3182 /* Unmask all ports */
3183 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3185 /* Update shadow table and hw entry */
3186 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3187 mvpp2_prs_hw_write(priv, &pe);
3189 /* Default IPv6 entry for unicast address */
3190 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3191 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3192 pe.index = MVPP2_PE_IP6_ADDR_UN;
3194 /* Finished: go to IPv6 again */
3195 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3196 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3197 MVPP2_PRS_RI_L3_ADDR_MASK);
3198 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3199 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3200 /* Shift back to IPV6 NH */
3201 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3203 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3204 /* Unmask all ports */
3205 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3207 /* Update shadow table and hw entry */
3208 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3209 mvpp2_prs_hw_write(priv, &pe);
3214 /* Parser default initialization */
3215 static int mvpp2_prs_default_init(struct platform_device *pdev,
3220 /* Enable tcam table */
3221 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3223 /* Clear all tcam and sram entries */
3224 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3225 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3226 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3227 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3229 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3230 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3231 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3234 /* Invalidate all tcam entries */
3235 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3236 mvpp2_prs_hw_inv(priv, index);
3238 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3239 sizeof(*priv->prs_shadow),
3241 if (!priv->prs_shadow)
3244 /* Always start from lookup = 0 */
3245 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3246 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3247 MVPP2_PRS_PORT_LU_MAX, 0);
3249 mvpp2_prs_def_flow_init(priv);
3251 mvpp2_prs_mh_init(priv);
3253 mvpp2_prs_mac_init(priv);
3255 mvpp2_prs_dsa_init(priv);
3257 err = mvpp2_prs_etype_init(priv);
3261 err = mvpp2_prs_vlan_init(pdev, priv);
3265 err = mvpp2_prs_pppoe_init(priv);
3269 err = mvpp2_prs_ip6_init(priv);
3273 err = mvpp2_prs_ip4_init(priv);
3280 /* Compare MAC DA with tcam entry data */
3281 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3282 const u8 *da, unsigned char *mask)
3284 unsigned char tcam_byte, tcam_mask;
3287 for (index = 0; index < ETH_ALEN; index++) {
3288 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3289 if (tcam_mask != mask[index])
3292 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3299 /* Find tcam entry with matched pair <MAC DA, port> */
3300 static struct mvpp2_prs_entry *
3301 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3302 unsigned char *mask, int udf_type)
3304 struct mvpp2_prs_entry *pe;
3307 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3310 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3312 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3313 for (tid = MVPP2_PE_FIRST_FREE_TID;
3314 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3315 unsigned int entry_pmap;
3317 if (!priv->prs_shadow[tid].valid ||
3318 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3319 (priv->prs_shadow[tid].udf != udf_type))
3323 mvpp2_prs_hw_read(priv, pe);
3324 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3326 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3335 /* Update parser's mac da entry */
3336 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3337 const u8 *da, bool add)
3339 struct mvpp2_prs_entry *pe;
3340 unsigned int pmap, len, ri;
3341 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3344 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3345 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3346 MVPP2_PRS_UDF_MAC_DEF);
3353 /* Create new TCAM entry */
3354 /* Find first range mac entry*/
3355 for (tid = MVPP2_PE_FIRST_FREE_TID;
3356 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3357 if (priv->prs_shadow[tid].valid &&
3358 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3359 (priv->prs_shadow[tid].udf ==
3360 MVPP2_PRS_UDF_MAC_RANGE))
3363 /* Go through the all entries from first to last */
3364 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3369 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3372 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3375 /* Mask all ports */
3376 mvpp2_prs_tcam_port_map_set(pe, 0);
3379 /* Update port mask */
3380 mvpp2_prs_tcam_port_set(pe, port, add);
3382 /* Invalidate the entry if no ports are left enabled */
3383 pmap = mvpp2_prs_tcam_port_map_get(pe);
3389 mvpp2_prs_hw_inv(priv, pe->index);
3390 priv->prs_shadow[pe->index].valid = false;
3395 /* Continue - set next lookup */
3396 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3398 /* Set match on DA */
3401 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3403 /* Set result info bits */
3404 if (is_broadcast_ether_addr(da))
3405 ri = MVPP2_PRS_RI_L2_BCAST;
3406 else if (is_multicast_ether_addr(da))
3407 ri = MVPP2_PRS_RI_L2_MCAST;
3409 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3411 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3412 MVPP2_PRS_RI_MAC_ME_MASK);
3413 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3414 MVPP2_PRS_RI_MAC_ME_MASK);
3416 /* Shift to ethertype */
3417 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3418 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3420 /* Update shadow table and hw entry */
3421 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3422 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3423 mvpp2_prs_hw_write(priv, pe);
3430 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3432 struct mvpp2_port *port = netdev_priv(dev);
3435 /* Remove old parser entry */
3436 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3441 /* Add new parser entry */
3442 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3446 /* Set addr in the device */
3447 ether_addr_copy(dev->dev_addr, da);
3452 /* Delete all port's multicast simple (not range) entries */
3453 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3455 struct mvpp2_prs_entry pe;
3458 for (tid = MVPP2_PE_FIRST_FREE_TID;
3459 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3460 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3462 if (!priv->prs_shadow[tid].valid ||
3463 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3464 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3467 /* Only simple mac entries */
3469 mvpp2_prs_hw_read(priv, &pe);
3471 /* Read mac addr from entry */
3472 for (index = 0; index < ETH_ALEN; index++)
3473 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3476 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3477 /* Delete this entry */
3478 mvpp2_prs_mac_da_accept(priv, port, da, false);
3482 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3485 case MVPP2_TAG_TYPE_EDSA:
3486 /* Add port to EDSA entries */
3487 mvpp2_prs_dsa_tag_set(priv, port, true,
3488 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3489 mvpp2_prs_dsa_tag_set(priv, port, true,
3490 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3491 /* Remove port from DSA entries */
3492 mvpp2_prs_dsa_tag_set(priv, port, false,
3493 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3494 mvpp2_prs_dsa_tag_set(priv, port, false,
3495 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3498 case MVPP2_TAG_TYPE_DSA:
3499 /* Add port to DSA entries */
3500 mvpp2_prs_dsa_tag_set(priv, port, true,
3501 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3502 mvpp2_prs_dsa_tag_set(priv, port, true,
3503 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3504 /* Remove port from EDSA entries */
3505 mvpp2_prs_dsa_tag_set(priv, port, false,
3506 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3507 mvpp2_prs_dsa_tag_set(priv, port, false,
3508 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3511 case MVPP2_TAG_TYPE_MH:
3512 case MVPP2_TAG_TYPE_NONE:
3513 /* Remove port form EDSA and DSA entries */
3514 mvpp2_prs_dsa_tag_set(priv, port, false,
3515 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3516 mvpp2_prs_dsa_tag_set(priv, port, false,
3517 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3518 mvpp2_prs_dsa_tag_set(priv, port, false,
3519 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3520 mvpp2_prs_dsa_tag_set(priv, port, false,
3521 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3525 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3532 /* Set prs flow for the port */
3533 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3535 struct mvpp2_prs_entry *pe;
3538 pe = mvpp2_prs_flow_find(port->priv, port->id);
3540 /* Such entry not exist */
3542 /* Go through the all entires from last to first */
3543 tid = mvpp2_prs_tcam_first_free(port->priv,
3544 MVPP2_PE_LAST_FREE_TID,
3545 MVPP2_PE_FIRST_FREE_TID);
3549 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3553 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3557 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3558 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3560 /* Update shadow table */
3561 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3564 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3565 mvpp2_prs_hw_write(port->priv, pe);
3571 /* Classifier configuration routines */
3573 /* Update classification flow table registers */
3574 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3575 struct mvpp2_cls_flow_entry *fe)
3577 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3578 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3579 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3580 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3583 /* Update classification lookup table register */
3584 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3585 struct mvpp2_cls_lookup_entry *le)
3589 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3590 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3591 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3594 /* Classifier default initialization */
3595 static void mvpp2_cls_init(struct mvpp2 *priv)
3597 struct mvpp2_cls_lookup_entry le;
3598 struct mvpp2_cls_flow_entry fe;
3601 /* Enable classifier */
3602 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3604 /* Clear classifier flow table */
3605 memset(&fe.data, 0, sizeof(fe.data));
3606 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3608 mvpp2_cls_flow_write(priv, &fe);
3611 /* Clear classifier lookup table */
3613 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3616 mvpp2_cls_lookup_write(priv, &le);
3619 mvpp2_cls_lookup_write(priv, &le);
3623 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3625 struct mvpp2_cls_lookup_entry le;
3628 /* Set way for the port */
3629 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3630 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3631 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3633 /* Pick the entry to be accessed in lookup ID decoding table
3634 * according to the way and lkpid.
3636 le.lkpid = port->id;
3640 /* Set initial CPU queue for receiving packets */
3641 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3642 le.data |= port->first_rxq;
3644 /* Disable classification engines */
3645 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3647 /* Update lookup ID table entry */
3648 mvpp2_cls_lookup_write(port->priv, &le);
3651 /* Set CPU queue number for oversize packets */
3652 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3656 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3657 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3659 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3660 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3662 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3663 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3664 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3667 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3669 if (likely(pool->frag_size <= PAGE_SIZE))
3670 return netdev_alloc_frag(pool->frag_size);
3672 return kmalloc(pool->frag_size, GFP_ATOMIC);
3675 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3677 if (likely(pool->frag_size <= PAGE_SIZE))
3678 skb_free_frag(data);
3683 /* Buffer Manager configuration routines */
3686 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3688 struct mvpp2_bm_pool *bm_pool, int size)
3692 /* Number of buffer pointers must be a multiple of 16, as per
3693 * hardware constraints
3695 if (!IS_ALIGNED(size, 16))
3698 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3699 * bytes per buffer pointer
3701 if (priv->hw_version == MVPP21)
3702 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3704 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3706 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
3709 if (!bm_pool->virt_addr)
3712 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3713 MVPP2_BM_POOL_PTR_ALIGN)) {
3714 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3715 bm_pool->virt_addr, bm_pool->dma_addr);
3716 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3717 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3721 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3722 lower_32_bits(bm_pool->dma_addr));
3723 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3725 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3726 val |= MVPP2_BM_START_MASK;
3727 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3729 bm_pool->type = MVPP2_BM_FREE;
3730 bm_pool->size = size;
3731 bm_pool->pkt_size = 0;
3732 bm_pool->buf_num = 0;
3737 /* Set pool buffer size */
3738 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3739 struct mvpp2_bm_pool *bm_pool,
3744 bm_pool->buf_size = buf_size;
3746 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3747 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3750 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3751 struct mvpp2_bm_pool *bm_pool,
3752 dma_addr_t *dma_addr,
3753 phys_addr_t *phys_addr)
3755 int cpu = get_cpu();
3757 *dma_addr = mvpp2_percpu_read(priv, cpu,
3758 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3759 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
3761 if (priv->hw_version == MVPP22) {
3763 u32 dma_addr_highbits, phys_addr_highbits;
3765 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
3766 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3767 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3768 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3770 if (sizeof(dma_addr_t) == 8)
3771 *dma_addr |= (u64)dma_addr_highbits << 32;
3773 if (sizeof(phys_addr_t) == 8)
3774 *phys_addr |= (u64)phys_addr_highbits << 32;
3780 /* Free all buffers from the pool */
3781 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3782 struct mvpp2_bm_pool *bm_pool)
3786 for (i = 0; i < bm_pool->buf_num; i++) {
3787 dma_addr_t buf_dma_addr;
3788 phys_addr_t buf_phys_addr;
3791 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3792 &buf_dma_addr, &buf_phys_addr);
3794 dma_unmap_single(dev, buf_dma_addr,
3795 bm_pool->buf_size, DMA_FROM_DEVICE);
3797 data = (void *)phys_to_virt(buf_phys_addr);
3801 mvpp2_frag_free(bm_pool, data);
3804 /* Update BM driver with number of buffers removed from pool */
3805 bm_pool->buf_num -= i;
3809 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3811 struct mvpp2_bm_pool *bm_pool)
3815 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3816 if (bm_pool->buf_num) {
3817 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3821 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3822 val |= MVPP2_BM_STOP_MASK;
3823 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3825 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3831 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3835 struct mvpp2_bm_pool *bm_pool;
3837 /* Create all pools with maximum size */
3838 size = MVPP2_BM_POOL_SIZE_MAX;
3839 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3840 bm_pool = &priv->bm_pools[i];
3842 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3844 goto err_unroll_pools;
3845 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3850 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3851 for (i = i - 1; i >= 0; i--)
3852 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3856 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3860 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3861 /* Mask BM all interrupts */
3862 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3863 /* Clear BM cause register */
3864 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3867 /* Allocate and initialize BM pools */
3868 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3869 sizeof(*priv->bm_pools), GFP_KERNEL);
3870 if (!priv->bm_pools)
3873 err = mvpp2_bm_pools_init(pdev, priv);
3879 /* Attach long pool to rxq */
3880 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3881 int lrxq, int long_pool)
3886 /* Get queue physical ID */
3887 prxq = port->rxqs[lrxq]->id;
3889 if (port->priv->hw_version == MVPP21)
3890 mask = MVPP21_RXQ_POOL_LONG_MASK;
3892 mask = MVPP22_RXQ_POOL_LONG_MASK;
3894 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3896 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
3897 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3900 /* Attach short pool to rxq */
3901 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3902 int lrxq, int short_pool)
3907 /* Get queue physical ID */
3908 prxq = port->rxqs[lrxq]->id;
3910 if (port->priv->hw_version == MVPP21)
3911 mask = MVPP21_RXQ_POOL_SHORT_MASK;
3913 mask = MVPP22_RXQ_POOL_SHORT_MASK;
3915 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3917 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
3918 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3921 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3922 struct mvpp2_bm_pool *bm_pool,
3923 dma_addr_t *buf_dma_addr,
3924 phys_addr_t *buf_phys_addr,
3927 dma_addr_t dma_addr;
3930 data = mvpp2_frag_alloc(bm_pool);
3934 dma_addr = dma_map_single(port->dev->dev.parent, data,
3935 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3937 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3938 mvpp2_frag_free(bm_pool, data);
3941 *buf_dma_addr = dma_addr;
3942 *buf_phys_addr = virt_to_phys(data);
3947 /* Release buffer to BM */
3948 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3949 dma_addr_t buf_dma_addr,
3950 phys_addr_t buf_phys_addr)
3952 int cpu = get_cpu();
3954 if (port->priv->hw_version == MVPP22) {
3957 if (sizeof(dma_addr_t) == 8)
3958 val |= upper_32_bits(buf_dma_addr) &
3959 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
3961 if (sizeof(phys_addr_t) == 8)
3962 val |= (upper_32_bits(buf_phys_addr)
3963 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
3964 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
3966 mvpp2_percpu_write(port->priv, cpu,
3967 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
3970 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3971 * returned in the "cookie" field of the RX
3972 * descriptor. Instead of storing the virtual address, we
3973 * store the physical address
3975 mvpp2_percpu_write(port->priv, cpu,
3976 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3977 mvpp2_percpu_write(port->priv, cpu,
3978 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
3983 /* Allocate buffers for the pool */
3984 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3985 struct mvpp2_bm_pool *bm_pool, int buf_num)
3987 int i, buf_size, total_size;
3988 dma_addr_t dma_addr;
3989 phys_addr_t phys_addr;
3992 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3993 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3996 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3997 netdev_err(port->dev,
3998 "cannot allocate %d buffers for pool %d\n",
3999 buf_num, bm_pool->id);
4003 for (i = 0; i < buf_num; i++) {
4004 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4005 &phys_addr, GFP_KERNEL);
4009 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
4013 /* Update BM driver with number of buffers added to pool */
4014 bm_pool->buf_num += i;
4016 netdev_dbg(port->dev,
4017 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4018 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4019 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4021 netdev_dbg(port->dev,
4022 "%s pool %d: %d of %d buffers added\n",
4023 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4024 bm_pool->id, i, buf_num);
4028 /* Notify the driver that BM pool is being used as specific type and return the
4029 * pool pointer on success
4031 static struct mvpp2_bm_pool *
4032 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
4035 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4038 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
4039 netdev_err(port->dev, "mixing pool types is forbidden\n");
4043 if (new_pool->type == MVPP2_BM_FREE)
4044 new_pool->type = type;
4046 /* Allocate buffers in case BM pool is used as long pool, but packet
4047 * size doesn't match MTU or BM pool hasn't being used yet
4049 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4050 (new_pool->pkt_size == 0)) {
4053 /* Set default buffer number or free all the buffers in case
4054 * the pool is not empty
4056 pkts_num = new_pool->buf_num;
4058 pkts_num = type == MVPP2_BM_SWF_LONG ?
4059 MVPP2_BM_LONG_BUF_NUM :
4060 MVPP2_BM_SHORT_BUF_NUM;
4062 mvpp2_bm_bufs_free(port->dev->dev.parent,
4063 port->priv, new_pool);
4065 new_pool->pkt_size = pkt_size;
4066 new_pool->frag_size =
4067 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4068 MVPP2_SKB_SHINFO_SIZE;
4070 /* Allocate buffers for this pool */
4071 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4072 if (num != pkts_num) {
4073 WARN(1, "pool %d: %d of %d allocated\n",
4074 new_pool->id, num, pkts_num);
4079 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4080 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4085 /* Initialize pools for swf */
4086 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4090 if (!port->pool_long) {
4092 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4095 if (!port->pool_long)
4098 port->pool_long->port_map |= (1 << port->id);
4100 for (rxq = 0; rxq < port->nrxqs; rxq++)
4101 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4104 if (!port->pool_short) {
4106 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4108 MVPP2_BM_SHORT_PKT_SIZE);
4109 if (!port->pool_short)
4112 port->pool_short->port_map |= (1 << port->id);
4114 for (rxq = 0; rxq < port->nrxqs; rxq++)
4115 mvpp2_rxq_short_pool_set(port, rxq,
4116 port->pool_short->id);
4122 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4124 struct mvpp2_port *port = netdev_priv(dev);
4125 struct mvpp2_bm_pool *port_pool = port->pool_long;
4126 int num, pkts_num = port_pool->buf_num;
4127 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4129 /* Update BM pool with new buffer size */
4130 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
4131 if (port_pool->buf_num) {
4132 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4136 port_pool->pkt_size = pkt_size;
4137 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4138 MVPP2_SKB_SHINFO_SIZE;
4139 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4140 if (num != pkts_num) {
4141 WARN(1, "pool %d: %d of %d allocated\n",
4142 port_pool->id, num, pkts_num);
4146 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4147 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4149 netdev_update_features(dev);
4153 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4155 int i, sw_thread_mask = 0;
4157 for (i = 0; i < port->nqvecs; i++)
4158 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4160 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4161 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
4164 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4166 int i, sw_thread_mask = 0;
4168 for (i = 0; i < port->nqvecs; i++)
4169 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4171 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4172 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4175 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4177 struct mvpp2_port *port = qvec->port;
4179 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4180 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4183 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4185 struct mvpp2_port *port = qvec->port;
4187 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4188 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
4191 /* Mask the current CPU's Rx/Tx interrupts
4192 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4193 * using smp_processor_id() is OK.
4195 static void mvpp2_interrupts_mask(void *arg)
4197 struct mvpp2_port *port = arg;
4199 mvpp2_percpu_write(port->priv, smp_processor_id(),
4200 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
4203 /* Unmask the current CPU's Rx/Tx interrupts.
4204 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4205 * using smp_processor_id() is OK.
4207 static void mvpp2_interrupts_unmask(void *arg)
4209 struct mvpp2_port *port = arg;
4212 val = MVPP2_CAUSE_MISC_SUM_MASK |
4213 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4214 if (port->has_tx_irqs)
4215 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4217 mvpp2_percpu_write(port->priv, smp_processor_id(),
4218 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4222 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4227 if (port->priv->hw_version != MVPP22)
4233 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4235 for (i = 0; i < port->nqvecs; i++) {
4236 struct mvpp2_queue_vector *v = port->qvecs + i;
4238 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4241 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4242 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4246 /* Port configuration routines */
4248 static void mvpp22_port_mii_set(struct mvpp2_port *port)
4252 /* Only GOP port 0 has an XLG MAC */
4253 if (port->gop_id == 0) {
4254 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4255 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4257 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4258 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4259 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4261 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4263 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4266 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4267 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
4268 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4270 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4271 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4272 val |= MVPP22_CTRL4_SYNC_BYPASS;
4273 val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4274 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4277 static void mvpp2_port_mii_set(struct mvpp2_port *port)
4281 if (port->priv->hw_version == MVPP22)
4282 mvpp22_port_mii_set(port);
4284 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4286 switch (port->phy_interface) {
4287 case PHY_INTERFACE_MODE_SGMII:
4288 val |= MVPP2_GMAC_INBAND_AN_MASK;
4290 case PHY_INTERFACE_MODE_RGMII:
4291 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4293 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4296 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4299 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4303 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4304 val |= MVPP2_GMAC_FC_ADV_EN;
4305 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4308 static void mvpp2_port_enable(struct mvpp2_port *port)
4312 /* Only GOP port 0 has an XLG MAC */
4313 if (port->gop_id == 0 &&
4314 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4315 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4316 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4317 val |= MVPP22_XLG_CTRL0_PORT_EN |
4318 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4319 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
4320 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4322 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4323 val |= MVPP2_GMAC_PORT_EN_MASK;
4324 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4325 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4329 static void mvpp2_port_disable(struct mvpp2_port *port)
4333 /* Only GOP port 0 has an XLG MAC */
4334 if (port->gop_id == 0 &&
4335 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4336 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4337 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4338 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
4339 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
4340 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4342 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4343 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4344 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4348 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4349 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4353 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4354 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4355 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4358 /* Configure loopback port */
4359 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4363 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4365 if (port->speed == 1000)
4366 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4368 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4370 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4371 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4373 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4375 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4378 static void mvpp2_port_reset(struct mvpp2_port *port)
4382 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4383 ~MVPP2_GMAC_PORT_RESET_MASK;
4384 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4386 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4387 MVPP2_GMAC_PORT_RESET_MASK)
4391 /* Change maximum receive size of the port */
4392 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4396 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4397 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4398 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4399 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4400 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4403 /* Set defaults to the MVPP2 port */
4404 static void mvpp2_defaults_set(struct mvpp2_port *port)
4406 int tx_port_num, val, queue, ptxq, lrxq;
4408 if (port->priv->hw_version == MVPP21) {
4409 /* Configure port to loopback if needed */
4410 if (port->flags & MVPP2_F_LOOPBACK)
4411 mvpp2_port_loopback_set(port);
4413 /* Update TX FIFO MIN Threshold */
4414 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4415 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4416 /* Min. TX threshold must be less than minimal packet length */
4417 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4418 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4421 /* Disable Legacy WRR, Disable EJP, Release from reset */
4422 tx_port_num = mvpp2_egress_port(port);
4423 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4425 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4427 /* Close bandwidth for all queues */
4428 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4429 ptxq = mvpp2_txq_phys(port->id, queue);
4430 mvpp2_write(port->priv,
4431 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4434 /* Set refill period to 1 usec, refill tokens
4435 * and bucket size to maximum
4437 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4438 port->priv->tclk / USEC_PER_SEC);
4439 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4440 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4441 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4442 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4443 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4444 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4445 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4447 /* Set MaximumLowLatencyPacketSize value to 256 */
4448 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4449 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4450 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4452 /* Enable Rx cache snoop */
4453 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
4454 queue = port->rxqs[lrxq]->id;
4455 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4456 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4457 MVPP2_SNOOP_BUF_HDR_MASK;
4458 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4461 /* At default, mask all interrupts to all present cpus */
4462 mvpp2_interrupts_disable(port);
4465 /* Enable/disable receiving packets */
4466 static void mvpp2_ingress_enable(struct mvpp2_port *port)
4471 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
4472 queue = port->rxqs[lrxq]->id;
4473 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4474 val &= ~MVPP2_RXQ_DISABLE_MASK;
4475 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4479 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4484 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
4485 queue = port->rxqs[lrxq]->id;
4486 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4487 val |= MVPP2_RXQ_DISABLE_MASK;
4488 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4492 /* Enable transmit via physical egress queue
4493 * - HW starts take descriptors from DRAM
4495 static void mvpp2_egress_enable(struct mvpp2_port *port)
4499 int tx_port_num = mvpp2_egress_port(port);
4501 /* Enable all initialized TXs. */
4503 for (queue = 0; queue < port->ntxqs; queue++) {
4504 struct mvpp2_tx_queue *txq = port->txqs[queue];
4507 qmap |= (1 << queue);
4510 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4511 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4514 /* Disable transmit via physical egress queue
4515 * - HW doesn't take descriptors from DRAM
4517 static void mvpp2_egress_disable(struct mvpp2_port *port)
4521 int tx_port_num = mvpp2_egress_port(port);
4523 /* Issue stop command for active channels only */
4524 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4525 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4526 MVPP2_TXP_SCHED_ENQ_MASK;
4528 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4529 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4531 /* Wait for all Tx activity to terminate. */
4534 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4535 netdev_warn(port->dev,
4536 "Tx stop timed out, status=0x%08x\n",
4543 /* Check port TX Command register that all
4544 * Tx queues are stopped
4546 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4547 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4550 /* Rx descriptors helper methods */
4552 /* Get number of Rx descriptors occupied by received packets */
4554 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4556 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4558 return val & MVPP2_RXQ_OCCUPIED_MASK;
4561 /* Update Rx queue status with the number of occupied and available
4562 * Rx descriptor slots.
4565 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4566 int used_count, int free_count)
4568 /* Decrement the number of used descriptors and increment count
4569 * increment the number of free descriptors.
4571 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4573 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4576 /* Get pointer to next RX descriptor to be processed by SW */
4577 static inline struct mvpp2_rx_desc *
4578 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4580 int rx_desc = rxq->next_desc_to_proc;
4582 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4583 prefetch(rxq->descs + rxq->next_desc_to_proc);
4584 return rxq->descs + rx_desc;
4587 /* Set rx queue offset */
4588 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4589 int prxq, int offset)
4593 /* Convert offset from bytes to units of 32 bytes */
4594 offset = offset >> 5;
4596 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4597 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4600 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4601 MVPP2_RXQ_PACKET_OFFSET_MASK);
4603 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4606 /* Tx descriptors helper methods */
4608 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4609 static struct mvpp2_tx_desc *
4610 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4612 int tx_desc = txq->next_desc_to_proc;
4614 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4615 return txq->descs + tx_desc;
4618 /* Update HW with number of aggregated Tx descriptors to be sent
4620 * Called only from mvpp2_tx(), so migration is disabled, using
4621 * smp_processor_id() is OK.
4623 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4625 /* aggregated access - relevant TXQ number is written in TX desc */
4626 mvpp2_percpu_write(port->priv, smp_processor_id(),
4627 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4631 /* Check if there are enough free descriptors in aggregated txq.
4632 * If not, update the number of occupied descriptors and repeat the check.
4634 * Called only from mvpp2_tx(), so migration is disabled, using
4635 * smp_processor_id() is OK.
4637 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4638 struct mvpp2_tx_queue *aggr_txq, int num)
4640 if ((aggr_txq->count + num) > aggr_txq->size) {
4641 /* Update number of occupied aggregated Tx descriptors */
4642 int cpu = smp_processor_id();
4643 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4645 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4648 if ((aggr_txq->count + num) > aggr_txq->size)
4654 /* Reserved Tx descriptors allocation request
4656 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
4657 * only by mvpp2_tx(), so migration is disabled, using
4658 * smp_processor_id() is OK.
4660 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4661 struct mvpp2_tx_queue *txq, int num)
4664 int cpu = smp_processor_id();
4666 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4667 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
4669 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
4671 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4674 /* Check if there are enough reserved descriptors for transmission.
4675 * If not, request chunk of reserved descriptors and check again.
4677 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4678 struct mvpp2_tx_queue *txq,
4679 struct mvpp2_txq_pcpu *txq_pcpu,
4682 int req, cpu, desc_count;
4684 if (txq_pcpu->reserved_num >= num)
4687 /* Not enough descriptors reserved! Update the reserved descriptor
4688 * count and check again.
4692 /* Compute total of used descriptors */
4693 for_each_present_cpu(cpu) {
4694 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4696 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4697 desc_count += txq_pcpu_aux->count;
4698 desc_count += txq_pcpu_aux->reserved_num;
4701 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4705 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4708 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4710 /* OK, the descriptor cound has been updated: check again. */
4711 if (txq_pcpu->reserved_num < num)
4716 /* Release the last allocated Tx descriptor. Useful to handle DMA
4717 * mapping failures in the Tx path.
4719 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4721 if (txq->next_desc_to_proc == 0)
4722 txq->next_desc_to_proc = txq->last_desc - 1;
4724 txq->next_desc_to_proc--;
4727 /* Set Tx descriptors fields relevant for CSUM calculation */
4728 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4729 int ip_hdr_len, int l4_proto)
4733 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4734 * G_L4_chk, L4_type required only for checksum calculation
4736 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4737 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4738 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4740 if (l3_proto == swab16(ETH_P_IP)) {
4741 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4742 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4744 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4747 if (l4_proto == IPPROTO_TCP) {
4748 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4749 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4750 } else if (l4_proto == IPPROTO_UDP) {
4751 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4752 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4754 command |= MVPP2_TXD_L4_CSUM_NOT;
4760 /* Get number of sent descriptors and decrement counter.
4761 * The number of sent descriptors is returned.
4764 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
4765 * (migration disabled) and from the TX completion tasklet (migration
4766 * disabled) so using smp_processor_id() is OK.
4768 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4769 struct mvpp2_tx_queue *txq)
4773 /* Reading status reg resets transmitted descriptor counter */
4774 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
4775 MVPP2_TXQ_SENT_REG(txq->id));
4777 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4778 MVPP2_TRANSMITTED_COUNT_OFFSET;
4781 /* Called through on_each_cpu(), so runs on all CPUs, with migration
4782 * disabled, therefore using smp_processor_id() is OK.
4784 static void mvpp2_txq_sent_counter_clear(void *arg)
4786 struct mvpp2_port *port = arg;
4789 for (queue = 0; queue < port->ntxqs; queue++) {
4790 int id = port->txqs[queue]->id;
4792 mvpp2_percpu_read(port->priv, smp_processor_id(),
4793 MVPP2_TXQ_SENT_REG(id));
4797 /* Set max sizes for Tx queues */
4798 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4801 int txq, tx_port_num;
4803 mtu = port->pkt_size * 8;
4804 if (mtu > MVPP2_TXP_MTU_MAX)
4805 mtu = MVPP2_TXP_MTU_MAX;
4807 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4810 /* Indirect access to registers */
4811 tx_port_num = mvpp2_egress_port(port);
4812 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4815 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4816 val &= ~MVPP2_TXP_MTU_MAX;
4818 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4820 /* TXP token size and all TXQs token size must be larger that MTU */
4821 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4822 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4825 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4827 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4830 for (txq = 0; txq < port->ntxqs; txq++) {
4831 val = mvpp2_read(port->priv,
4832 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4833 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4837 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4839 mvpp2_write(port->priv,
4840 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4846 /* Set the number of packets that will be received before Rx interrupt
4847 * will be generated by HW.
4849 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4850 struct mvpp2_rx_queue *rxq)
4852 int cpu = get_cpu();
4854 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4855 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
4857 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4858 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4864 /* For some reason in the LSP this is done on each CPU. Why ? */
4865 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
4866 struct mvpp2_tx_queue *txq)
4868 int cpu = get_cpu();
4871 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
4872 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
4874 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
4875 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
4876 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
4881 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4883 u64 tmp = (u64)clk_hz * usec;
4885 do_div(tmp, USEC_PER_SEC);
4887 return tmp > U32_MAX ? U32_MAX : tmp;
4890 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4892 u64 tmp = (u64)cycles * USEC_PER_SEC;
4894 do_div(tmp, clk_hz);
4896 return tmp > U32_MAX ? U32_MAX : tmp;
4899 /* Set the time delay in usec before Rx interrupt */
4900 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4901 struct mvpp2_rx_queue *rxq)
4903 unsigned long freq = port->priv->tclk;
4904 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4906 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4908 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4910 /* re-evaluate to get actual register value */
4911 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4914 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4917 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
4919 unsigned long freq = port->priv->tclk;
4920 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
4922 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
4923 port->tx_time_coal =
4924 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
4926 /* re-evaluate to get actual register value */
4927 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
4930 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
4933 /* Free Tx queue skbuffs */
4934 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4935 struct mvpp2_tx_queue *txq,
4936 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4940 for (i = 0; i < num; i++) {
4941 struct mvpp2_txq_pcpu_buf *tx_buf =
4942 txq_pcpu->buffs + txq_pcpu->txq_get_index;
4944 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
4945 tx_buf->size, DMA_TO_DEVICE);
4947 dev_kfree_skb_any(tx_buf->skb);
4949 mvpp2_txq_inc_get(txq_pcpu);
4953 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4956 int queue = fls(cause) - 1;
4958 return port->rxqs[queue];
4961 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4964 int queue = fls(cause) - 1;
4966 return port->txqs[queue];
4969 /* Handle end of transmission */
4970 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4971 struct mvpp2_txq_pcpu *txq_pcpu)
4973 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4976 if (txq_pcpu->cpu != smp_processor_id())
4977 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4979 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4982 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4984 txq_pcpu->count -= tx_done;
4986 if (netif_tx_queue_stopped(nq))
4987 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4988 netif_tx_wake_queue(nq);
4991 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
4994 struct mvpp2_tx_queue *txq;
4995 struct mvpp2_txq_pcpu *txq_pcpu;
4996 unsigned int tx_todo = 0;
4999 txq = mvpp2_get_tx_queue(port, cause);
5003 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5005 if (txq_pcpu->count) {
5006 mvpp2_txq_done(port, txq, txq_pcpu);
5007 tx_todo += txq_pcpu->count;
5010 cause &= ~(1 << txq->log_id);
5015 /* Rx/Tx queue initialization/cleanup methods */
5017 /* Allocate and initialize descriptors for aggr TXQ */
5018 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5019 struct mvpp2_tx_queue *aggr_txq,
5020 int desc_num, int cpu,
5025 /* Allocate memory for TX descriptors */
5026 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
5027 desc_num * MVPP2_DESC_ALIGNED_SIZE,
5028 &aggr_txq->descs_dma, GFP_KERNEL);
5029 if (!aggr_txq->descs)
5032 aggr_txq->last_desc = aggr_txq->size - 1;
5034 /* Aggr TXQ no reset WA */
5035 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5036 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5038 /* Set Tx descriptors queue starting address indirect
5041 if (priv->hw_version == MVPP21)
5042 txq_dma = aggr_txq->descs_dma;
5044 txq_dma = aggr_txq->descs_dma >>
5045 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5047 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
5048 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
5053 /* Create a specified Rx queue */
5054 static int mvpp2_rxq_init(struct mvpp2_port *port,
5055 struct mvpp2_rx_queue *rxq)
5061 rxq->size = port->rx_ring_size;
5063 /* Allocate memory for RX descriptors */
5064 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5065 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5066 &rxq->descs_dma, GFP_KERNEL);
5070 rxq->last_desc = rxq->size - 1;
5072 /* Zero occupied and non-occupied counters - direct access */
5073 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5075 /* Set Rx descriptors queue starting address - indirect access */
5077 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5078 if (port->priv->hw_version == MVPP21)
5079 rxq_dma = rxq->descs_dma;
5081 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
5082 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
5083 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
5084 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
5088 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
5090 /* Set coalescing pkts and time */
5091 mvpp2_rx_pkts_coal_set(port, rxq);
5092 mvpp2_rx_time_coal_set(port, rxq);
5094 /* Add number of descriptors ready for receiving packets */
5095 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
5100 /* Push packets received by the RXQ to BM pool */
5101 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
5102 struct mvpp2_rx_queue *rxq)
5106 rx_received = mvpp2_rxq_received(port, rxq->id);
5110 for (i = 0; i < rx_received; i++) {
5111 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5112 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5115 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5116 MVPP2_RXD_BM_POOL_ID_OFFS;
5118 mvpp2_bm_pool_put(port, pool,
5119 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
5120 mvpp2_rxdesc_cookie_get(port, rx_desc));
5122 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
5125 /* Cleanup Rx queue */
5126 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5127 struct mvpp2_rx_queue *rxq)
5131 mvpp2_rxq_drop_pkts(port, rxq);
5134 dma_free_coherent(port->dev->dev.parent,
5135 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5141 rxq->next_desc_to_proc = 0;
5144 /* Clear Rx descriptors queue starting address and size;
5145 * free descriptor number
5147 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5149 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5150 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5151 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
5155 /* Create and initialize a Tx queue */
5156 static int mvpp2_txq_init(struct mvpp2_port *port,
5157 struct mvpp2_tx_queue *txq)
5160 int cpu, desc, desc_per_txq, tx_port_num;
5161 struct mvpp2_txq_pcpu *txq_pcpu;
5163 txq->size = port->tx_ring_size;
5165 /* Allocate memory for Tx descriptors */
5166 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
5167 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5168 &txq->descs_dma, GFP_KERNEL);
5172 txq->last_desc = txq->size - 1;
5174 /* Set Tx descriptors queue starting address - indirect access */
5176 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5177 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5179 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5180 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5181 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5182 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5183 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5184 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
5185 val &= ~MVPP2_TXQ_PENDING_MASK;
5186 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
5188 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5189 * for each existing TXQ.
5190 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5191 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5194 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5195 (txq->log_id * desc_per_txq);
5197 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5198 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5199 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
5202 /* WRR / EJP configuration - indirect access */
5203 tx_port_num = mvpp2_egress_port(port);
5204 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5206 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5207 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5208 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5209 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5210 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5212 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5213 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5216 for_each_present_cpu(cpu) {
5217 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5218 txq_pcpu->size = txq->size;
5219 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
5220 sizeof(*txq_pcpu->buffs),
5222 if (!txq_pcpu->buffs)
5225 txq_pcpu->count = 0;
5226 txq_pcpu->reserved_num = 0;
5227 txq_pcpu->txq_put_index = 0;
5228 txq_pcpu->txq_get_index = 0;
5233 for_each_present_cpu(cpu) {
5234 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5235 kfree(txq_pcpu->buffs);
5238 dma_free_coherent(port->dev->dev.parent,
5239 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5240 txq->descs, txq->descs_dma);
5245 /* Free allocated TXQ resources */
5246 static void mvpp2_txq_deinit(struct mvpp2_port *port,
5247 struct mvpp2_tx_queue *txq)
5249 struct mvpp2_txq_pcpu *txq_pcpu;
5252 for_each_present_cpu(cpu) {
5253 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5254 kfree(txq_pcpu->buffs);
5258 dma_free_coherent(port->dev->dev.parent,
5259 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5260 txq->descs, txq->descs_dma);
5264 txq->next_desc_to_proc = 0;
5267 /* Set minimum bandwidth for disabled TXQs */
5268 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5270 /* Set Tx descriptors queue starting address and size */
5272 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5273 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5274 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
5278 /* Cleanup Tx ports */
5279 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5281 struct mvpp2_txq_pcpu *txq_pcpu;
5282 int delay, pending, cpu;
5286 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5287 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
5288 val |= MVPP2_TXQ_DRAIN_EN_MASK;
5289 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5291 /* The napi queue has been stopped so wait for all packets
5292 * to be transmitted.
5296 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5297 netdev_warn(port->dev,
5298 "port %d: cleaning queue %d timed out\n",
5299 port->id, txq->log_id);
5305 pending = mvpp2_percpu_read(port->priv, cpu,
5306 MVPP2_TXQ_PENDING_REG);
5307 pending &= MVPP2_TXQ_PENDING_MASK;
5310 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
5311 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5314 for_each_present_cpu(cpu) {
5315 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5317 /* Release all packets */
5318 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5321 txq_pcpu->count = 0;
5322 txq_pcpu->txq_put_index = 0;
5323 txq_pcpu->txq_get_index = 0;
5327 /* Cleanup all Tx queues */
5328 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5330 struct mvpp2_tx_queue *txq;
5334 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5336 /* Reset Tx ports and delete Tx queues */
5337 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5338 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5340 for (queue = 0; queue < port->ntxqs; queue++) {
5341 txq = port->txqs[queue];
5342 mvpp2_txq_clean(port, txq);
5343 mvpp2_txq_deinit(port, txq);
5346 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5348 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5349 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5352 /* Cleanup all Rx queues */
5353 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5357 for (queue = 0; queue < port->nrxqs; queue++)
5358 mvpp2_rxq_deinit(port, port->rxqs[queue]);
5361 /* Init all Rx queues for port */
5362 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5366 for (queue = 0; queue < port->nrxqs; queue++) {
5367 err = mvpp2_rxq_init(port, port->rxqs[queue]);
5374 mvpp2_cleanup_rxqs(port);
5378 /* Init all tx queues for port */
5379 static int mvpp2_setup_txqs(struct mvpp2_port *port)
5381 struct mvpp2_tx_queue *txq;
5384 for (queue = 0; queue < port->ntxqs; queue++) {
5385 txq = port->txqs[queue];
5386 err = mvpp2_txq_init(port, txq);
5391 if (port->has_tx_irqs) {
5392 mvpp2_tx_time_coal_set(port);
5393 for (queue = 0; queue < port->ntxqs; queue++) {
5394 txq = port->txqs[queue];
5395 mvpp2_tx_pkts_coal_set(port, txq);
5399 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5403 mvpp2_cleanup_txqs(port);
5407 /* The callback for per-port interrupt */
5408 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5410 struct mvpp2_queue_vector *qv = dev_id;
5412 mvpp2_qvec_interrupt_disable(qv);
5414 napi_schedule(&qv->napi);
5420 static void mvpp2_link_event(struct net_device *dev)
5422 struct mvpp2_port *port = netdev_priv(dev);
5423 struct phy_device *phydev = dev->phydev;
5424 int status_change = 0;
5428 if ((port->speed != phydev->speed) ||
5429 (port->duplex != phydev->duplex)) {
5432 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5433 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5434 MVPP2_GMAC_CONFIG_GMII_SPEED |
5435 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5436 MVPP2_GMAC_AN_SPEED_EN |
5437 MVPP2_GMAC_AN_DUPLEX_EN);
5440 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5442 if (phydev->speed == SPEED_1000)
5443 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5444 else if (phydev->speed == SPEED_100)
5445 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5447 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5449 port->duplex = phydev->duplex;
5450 port->speed = phydev->speed;
5454 if (phydev->link != port->link) {
5455 if (!phydev->link) {
5460 port->link = phydev->link;
5464 if (status_change) {
5466 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5467 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5468 MVPP2_GMAC_FORCE_LINK_DOWN);
5469 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5470 mvpp2_egress_enable(port);
5471 mvpp2_ingress_enable(port);
5473 mvpp2_ingress_disable(port);
5474 mvpp2_egress_disable(port);
5476 phy_print_status(phydev);
5480 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5484 if (!port_pcpu->timer_scheduled) {
5485 port_pcpu->timer_scheduled = true;
5486 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
5487 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5488 HRTIMER_MODE_REL_PINNED);
5492 static void mvpp2_tx_proc_cb(unsigned long data)
5494 struct net_device *dev = (struct net_device *)data;
5495 struct mvpp2_port *port = netdev_priv(dev);
5496 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5497 unsigned int tx_todo, cause;
5499 if (!netif_running(dev))
5501 port_pcpu->timer_scheduled = false;
5503 /* Process all the Tx queues */
5504 cause = (1 << port->ntxqs) - 1;
5505 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
5507 /* Set the timer in case not all the packets were processed */
5509 mvpp2_timer_set(port_pcpu);
5512 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5514 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5515 struct mvpp2_port_pcpu,
5518 tasklet_schedule(&port_pcpu->tx_done_tasklet);
5520 return HRTIMER_NORESTART;
5523 /* Main RX/TX processing routines */
5525 /* Display more error info */
5526 static void mvpp2_rx_error(struct mvpp2_port *port,
5527 struct mvpp2_rx_desc *rx_desc)
5529 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5530 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
5532 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
5533 case MVPP2_RXD_ERR_CRC:
5534 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
5537 case MVPP2_RXD_ERR_OVERRUN:
5538 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5541 case MVPP2_RXD_ERR_RESOURCE:
5542 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5548 /* Handle RX checksum offload */
5549 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5550 struct sk_buff *skb)
5552 if (((status & MVPP2_RXD_L3_IP4) &&
5553 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5554 (status & MVPP2_RXD_L3_IP6))
5555 if (((status & MVPP2_RXD_L4_UDP) ||
5556 (status & MVPP2_RXD_L4_TCP)) &&
5557 (status & MVPP2_RXD_L4_CSUM_OK)) {
5559 skb->ip_summed = CHECKSUM_UNNECESSARY;
5563 skb->ip_summed = CHECKSUM_NONE;
5566 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5567 static int mvpp2_rx_refill(struct mvpp2_port *port,
5568 struct mvpp2_bm_pool *bm_pool, int pool)
5570 dma_addr_t dma_addr;
5571 phys_addr_t phys_addr;
5574 /* No recycle or too many buffers are in use, so allocate a new skb */
5575 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5580 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
5585 /* Handle tx checksum */
5586 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5588 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5592 if (skb->protocol == htons(ETH_P_IP)) {
5593 struct iphdr *ip4h = ip_hdr(skb);
5595 /* Calculate IPv4 checksum and L4 checksum */
5596 ip_hdr_len = ip4h->ihl;
5597 l4_proto = ip4h->protocol;
5598 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5599 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5601 /* Read l4_protocol from one of IPv6 extra headers */
5602 if (skb_network_header_len(skb) > 0)
5603 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5604 l4_proto = ip6h->nexthdr;
5606 return MVPP2_TXD_L4_CSUM_NOT;
5609 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5610 skb->protocol, ip_hdr_len, l4_proto);
5613 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5616 /* Main rx processing */
5617 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
5618 int rx_todo, struct mvpp2_rx_queue *rxq)
5620 struct net_device *dev = port->dev;
5626 /* Get number of received packets and clamp the to-do */
5627 rx_received = mvpp2_rxq_received(port, rxq->id);
5628 if (rx_todo > rx_received)
5629 rx_todo = rx_received;
5631 while (rx_done < rx_todo) {
5632 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5633 struct mvpp2_bm_pool *bm_pool;
5634 struct sk_buff *skb;
5635 unsigned int frag_size;
5636 dma_addr_t dma_addr;
5637 phys_addr_t phys_addr;
5639 int pool, rx_bytes, err;
5643 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5644 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5645 rx_bytes -= MVPP2_MH_SIZE;
5646 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5647 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5648 data = (void *)phys_to_virt(phys_addr);
5650 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5651 MVPP2_RXD_BM_POOL_ID_OFFS;
5652 bm_pool = &port->priv->bm_pools[pool];
5654 /* In case of an error, release the requested buffer pointer
5655 * to the Buffer Manager. This request process is controlled
5656 * by the hardware, and the information about the buffer is
5657 * comprised by the RX descriptor.
5659 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5661 dev->stats.rx_errors++;
5662 mvpp2_rx_error(port, rx_desc);
5663 /* Return the buffer to the pool */
5664 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
5668 if (bm_pool->frag_size > PAGE_SIZE)
5671 frag_size = bm_pool->frag_size;
5673 skb = build_skb(data, frag_size);
5675 netdev_warn(port->dev, "skb build failed\n");
5676 goto err_drop_frame;
5679 err = mvpp2_rx_refill(port, bm_pool, pool);
5681 netdev_err(port->dev, "failed to refill BM pools\n");
5682 goto err_drop_frame;
5685 dma_unmap_single(dev->dev.parent, dma_addr,
5686 bm_pool->buf_size, DMA_FROM_DEVICE);
5689 rcvd_bytes += rx_bytes;
5691 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
5692 skb_put(skb, rx_bytes);
5693 skb->protocol = eth_type_trans(skb, dev);
5694 mvpp2_rx_csum(port, rx_status, skb);
5696 napi_gro_receive(napi, skb);
5700 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5702 u64_stats_update_begin(&stats->syncp);
5703 stats->rx_packets += rcvd_pkts;
5704 stats->rx_bytes += rcvd_bytes;
5705 u64_stats_update_end(&stats->syncp);
5708 /* Update Rx queue management counters */
5710 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5716 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5717 struct mvpp2_tx_desc *desc)
5719 dma_addr_t buf_dma_addr =
5720 mvpp2_txdesc_dma_addr_get(port, desc);
5722 mvpp2_txdesc_size_get(port, desc);
5723 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5724 buf_sz, DMA_TO_DEVICE);
5725 mvpp2_txq_desc_put(txq);
5728 /* Handle tx fragmentation processing */
5729 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5730 struct mvpp2_tx_queue *aggr_txq,
5731 struct mvpp2_tx_queue *txq)
5733 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5734 struct mvpp2_tx_desc *tx_desc;
5736 dma_addr_t buf_dma_addr;
5738 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5739 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5740 void *addr = page_address(frag->page.p) + frag->page_offset;
5742 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5743 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5744 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
5746 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
5749 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
5750 mvpp2_txq_desc_put(txq);
5754 mvpp2_txdesc_offset_set(port, tx_desc,
5755 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5756 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5757 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5759 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5760 /* Last descriptor */
5761 mvpp2_txdesc_cmd_set(port, tx_desc,
5763 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5765 /* Descriptor in the middle: Not First, Not Last */
5766 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
5767 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5773 /* Release all descriptors that were used to map fragments of
5774 * this packet, as well as the corresponding DMA mappings
5776 for (i = i - 1; i >= 0; i--) {
5777 tx_desc = txq->descs + i;
5778 tx_desc_unmap_put(port, txq, tx_desc);
5784 /* Main tx processing */
5785 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5787 struct mvpp2_port *port = netdev_priv(dev);
5788 struct mvpp2_tx_queue *txq, *aggr_txq;
5789 struct mvpp2_txq_pcpu *txq_pcpu;
5790 struct mvpp2_tx_desc *tx_desc;
5791 dma_addr_t buf_dma_addr;
5796 txq_id = skb_get_queue_mapping(skb);
5797 txq = port->txqs[txq_id];
5798 txq_pcpu = this_cpu_ptr(txq->pcpu);
5799 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5801 frags = skb_shinfo(skb)->nr_frags + 1;
5803 /* Check number of available descriptors */
5804 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5805 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5811 /* Get a descriptor for the first part of the packet */
5812 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5813 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5814 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
5816 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
5817 skb_headlen(skb), DMA_TO_DEVICE);
5818 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
5819 mvpp2_txq_desc_put(txq);
5824 mvpp2_txdesc_offset_set(port, tx_desc,
5825 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5826 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5827 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5829 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5832 /* First and Last descriptor */
5833 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5834 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5835 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5837 /* First but not Last */
5838 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5839 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5840 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5842 /* Continue with other skb fragments */
5843 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5844 tx_desc_unmap_put(port, txq, tx_desc);
5850 txq_pcpu->reserved_num -= frags;
5851 txq_pcpu->count += frags;
5852 aggr_txq->count += frags;
5854 /* Enable transmit */
5856 mvpp2_aggr_txq_pend_desc_add(port, frags);
5858 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5859 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5861 netif_tx_stop_queue(nq);
5865 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5867 u64_stats_update_begin(&stats->syncp);
5868 stats->tx_packets++;
5869 stats->tx_bytes += skb->len;
5870 u64_stats_update_end(&stats->syncp);
5872 dev->stats.tx_dropped++;
5873 dev_kfree_skb_any(skb);
5876 /* Finalize TX processing */
5877 if (txq_pcpu->count >= txq->done_pkts_coal)
5878 mvpp2_txq_done(port, txq, txq_pcpu);
5880 /* Set the timer in case not all frags were processed */
5881 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
5882 txq_pcpu->count > 0) {
5883 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5885 mvpp2_timer_set(port_pcpu);
5888 return NETDEV_TX_OK;
5891 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5893 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5894 netdev_err(dev, "FCS error\n");
5895 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5896 netdev_err(dev, "rx fifo overrun error\n");
5897 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5898 netdev_err(dev, "tx fifo underrun error\n");
5901 static int mvpp2_poll(struct napi_struct *napi, int budget)
5903 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
5905 struct mvpp2_port *port = netdev_priv(napi->dev);
5906 struct mvpp2_queue_vector *qv;
5907 int cpu = smp_processor_id();
5909 qv = container_of(napi, struct mvpp2_queue_vector, napi);
5911 /* Rx/Tx cause register
5913 * Bits 0-15: each bit indicates received packets on the Rx queue
5914 * (bit 0 is for Rx queue 0).
5916 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5917 * (bit 16 is for Tx queue 0).
5919 * Each CPU has its own Rx/Tx cause register
5921 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
5922 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5924 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5926 mvpp2_cause_error(port->dev, cause_misc);
5928 /* Clear the cause register */
5929 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5930 mvpp2_percpu_write(port->priv, cpu,
5931 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5932 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5935 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5937 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
5938 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
5941 /* Process RX packets */
5942 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5943 cause_rx <<= qv->first_rxq;
5944 cause_rx |= qv->pending_cause_rx;
5945 while (cause_rx && budget > 0) {
5947 struct mvpp2_rx_queue *rxq;
5949 rxq = mvpp2_get_rx_queue(port, cause_rx);
5953 count = mvpp2_rx(port, napi, budget, rxq);
5957 /* Clear the bit associated to this Rx queue
5958 * so that next iteration will continue from
5959 * the next Rx queue.
5961 cause_rx &= ~(1 << rxq->logic_rxq);
5967 napi_complete_done(napi, rx_done);
5969 mvpp2_qvec_interrupt_enable(qv);
5971 qv->pending_cause_rx = cause_rx;
5975 /* Set hw internals when starting port */
5976 static void mvpp2_start_dev(struct mvpp2_port *port)
5978 struct net_device *ndev = port->dev;
5981 mvpp2_gmac_max_rx_size_set(port);
5982 mvpp2_txp_max_tx_size_set(port);
5984 for (i = 0; i < port->nqvecs; i++)
5985 napi_enable(&port->qvecs[i].napi);
5987 /* Enable interrupts on all CPUs */
5988 mvpp2_interrupts_enable(port);
5990 mvpp2_port_enable(port);
5991 phy_start(ndev->phydev);
5992 netif_tx_start_all_queues(port->dev);
5995 /* Set hw internals when stopping port */
5996 static void mvpp2_stop_dev(struct mvpp2_port *port)
5998 struct net_device *ndev = port->dev;
6001 /* Stop new packets from arriving to RXQs */
6002 mvpp2_ingress_disable(port);
6006 /* Disable interrupts on all CPUs */
6007 mvpp2_interrupts_disable(port);
6009 for (i = 0; i < port->nqvecs; i++)
6010 napi_disable(&port->qvecs[i].napi);
6012 netif_carrier_off(port->dev);
6013 netif_tx_stop_all_queues(port->dev);
6015 mvpp2_egress_disable(port);
6016 mvpp2_port_disable(port);
6017 phy_stop(ndev->phydev);
6020 static int mvpp2_check_ringparam_valid(struct net_device *dev,
6021 struct ethtool_ringparam *ring)
6023 u16 new_rx_pending = ring->rx_pending;
6024 u16 new_tx_pending = ring->tx_pending;
6026 if (ring->rx_pending == 0 || ring->tx_pending == 0)
6029 if (ring->rx_pending > MVPP2_MAX_RXD)
6030 new_rx_pending = MVPP2_MAX_RXD;
6031 else if (!IS_ALIGNED(ring->rx_pending, 16))
6032 new_rx_pending = ALIGN(ring->rx_pending, 16);
6034 if (ring->tx_pending > MVPP2_MAX_TXD)
6035 new_tx_pending = MVPP2_MAX_TXD;
6036 else if (!IS_ALIGNED(ring->tx_pending, 32))
6037 new_tx_pending = ALIGN(ring->tx_pending, 32);
6039 if (ring->rx_pending != new_rx_pending) {
6040 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
6041 ring->rx_pending, new_rx_pending);
6042 ring->rx_pending = new_rx_pending;
6045 if (ring->tx_pending != new_tx_pending) {
6046 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
6047 ring->tx_pending, new_tx_pending);
6048 ring->tx_pending = new_tx_pending;
6054 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
6056 u32 mac_addr_l, mac_addr_m, mac_addr_h;
6058 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
6059 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
6060 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
6061 addr[0] = (mac_addr_h >> 24) & 0xFF;
6062 addr[1] = (mac_addr_h >> 16) & 0xFF;
6063 addr[2] = (mac_addr_h >> 8) & 0xFF;
6064 addr[3] = mac_addr_h & 0xFF;
6065 addr[4] = mac_addr_m & 0xFF;
6066 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
6069 static int mvpp2_phy_connect(struct mvpp2_port *port)
6071 struct phy_device *phy_dev;
6073 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
6074 port->phy_interface);
6076 netdev_err(port->dev, "cannot connect to phy\n");
6079 phy_dev->supported &= PHY_GBIT_FEATURES;
6080 phy_dev->advertising = phy_dev->supported;
6089 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
6091 struct net_device *ndev = port->dev;
6093 phy_disconnect(ndev->phydev);
6096 static int mvpp2_irqs_init(struct mvpp2_port *port)
6100 for (i = 0; i < port->nqvecs; i++) {
6101 struct mvpp2_queue_vector *qv = port->qvecs + i;
6103 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
6107 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
6108 irq_set_affinity_hint(qv->irq,
6109 cpumask_of(qv->sw_thread_id));
6114 for (i = 0; i < port->nqvecs; i++) {
6115 struct mvpp2_queue_vector *qv = port->qvecs + i;
6117 irq_set_affinity_hint(qv->irq, NULL);
6118 free_irq(qv->irq, qv);
6124 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
6128 for (i = 0; i < port->nqvecs; i++) {
6129 struct mvpp2_queue_vector *qv = port->qvecs + i;
6131 irq_set_affinity_hint(qv->irq, NULL);
6132 free_irq(qv->irq, qv);
6136 static int mvpp2_open(struct net_device *dev)
6138 struct mvpp2_port *port = netdev_priv(dev);
6139 unsigned char mac_bcast[ETH_ALEN] = {
6140 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6143 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
6145 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
6148 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
6149 dev->dev_addr, true);
6151 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
6154 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
6156 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
6159 err = mvpp2_prs_def_flow(port);
6161 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
6165 /* Allocate the Rx/Tx queues */
6166 err = mvpp2_setup_rxqs(port);
6168 netdev_err(port->dev, "cannot allocate Rx queues\n");
6172 err = mvpp2_setup_txqs(port);
6174 netdev_err(port->dev, "cannot allocate Tx queues\n");
6175 goto err_cleanup_rxqs;
6178 err = mvpp2_irqs_init(port);
6180 netdev_err(port->dev, "cannot init IRQs\n");
6181 goto err_cleanup_txqs;
6184 /* In default link is down */
6185 netif_carrier_off(port->dev);
6187 err = mvpp2_phy_connect(port);
6191 /* Unmask interrupts on all CPUs */
6192 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
6193 mvpp2_shared_interrupt_mask_unmask(port, false);
6195 mvpp2_start_dev(port);
6200 mvpp2_irqs_deinit(port);
6202 mvpp2_cleanup_txqs(port);
6204 mvpp2_cleanup_rxqs(port);
6208 static int mvpp2_stop(struct net_device *dev)
6210 struct mvpp2_port *port = netdev_priv(dev);
6211 struct mvpp2_port_pcpu *port_pcpu;
6214 mvpp2_stop_dev(port);
6215 mvpp2_phy_disconnect(port);
6217 /* Mask interrupts on all CPUs */
6218 on_each_cpu(mvpp2_interrupts_mask, port, 1);
6219 mvpp2_shared_interrupt_mask_unmask(port, true);
6221 mvpp2_irqs_deinit(port);
6222 if (!port->has_tx_irqs) {
6223 for_each_present_cpu(cpu) {
6224 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6226 hrtimer_cancel(&port_pcpu->tx_done_timer);
6227 port_pcpu->timer_scheduled = false;
6228 tasklet_kill(&port_pcpu->tx_done_tasklet);
6231 mvpp2_cleanup_rxqs(port);
6232 mvpp2_cleanup_txqs(port);
6237 static void mvpp2_set_rx_mode(struct net_device *dev)
6239 struct mvpp2_port *port = netdev_priv(dev);
6240 struct mvpp2 *priv = port->priv;
6241 struct netdev_hw_addr *ha;
6243 bool allmulti = dev->flags & IFF_ALLMULTI;
6245 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6246 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6247 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6249 /* Remove all port->id's mcast enries */
6250 mvpp2_prs_mcast_del_all(priv, id);
6252 if (allmulti && !netdev_mc_empty(dev)) {
6253 netdev_for_each_mc_addr(ha, dev)
6254 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
6258 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6260 struct mvpp2_port *port = netdev_priv(dev);
6261 const struct sockaddr *addr = p;
6264 if (!is_valid_ether_addr(addr->sa_data)) {
6265 err = -EADDRNOTAVAIL;
6269 if (!netif_running(dev)) {
6270 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6273 /* Reconfigure parser to accept the original MAC address */
6274 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6279 mvpp2_stop_dev(port);
6281 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6285 /* Reconfigure parser accept the original MAC address */
6286 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6290 mvpp2_start_dev(port);
6291 mvpp2_egress_enable(port);
6292 mvpp2_ingress_enable(port);
6295 netdev_err(dev, "failed to change MAC address\n");
6299 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6301 struct mvpp2_port *port = netdev_priv(dev);
6304 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6305 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6306 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6307 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
6310 if (!netif_running(dev)) {
6311 err = mvpp2_bm_update_mtu(dev, mtu);
6313 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6317 /* Reconfigure BM to the original MTU */
6318 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6323 mvpp2_stop_dev(port);
6325 err = mvpp2_bm_update_mtu(dev, mtu);
6327 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6331 /* Reconfigure BM to the original MTU */
6332 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6337 mvpp2_start_dev(port);
6338 mvpp2_egress_enable(port);
6339 mvpp2_ingress_enable(port);
6343 netdev_err(dev, "failed to change MTU\n");
6348 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6350 struct mvpp2_port *port = netdev_priv(dev);
6354 for_each_possible_cpu(cpu) {
6355 struct mvpp2_pcpu_stats *cpu_stats;
6361 cpu_stats = per_cpu_ptr(port->stats, cpu);
6363 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
6364 rx_packets = cpu_stats->rx_packets;
6365 rx_bytes = cpu_stats->rx_bytes;
6366 tx_packets = cpu_stats->tx_packets;
6367 tx_bytes = cpu_stats->tx_bytes;
6368 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
6370 stats->rx_packets += rx_packets;
6371 stats->rx_bytes += rx_bytes;
6372 stats->tx_packets += tx_packets;
6373 stats->tx_bytes += tx_bytes;
6376 stats->rx_errors = dev->stats.rx_errors;
6377 stats->rx_dropped = dev->stats.rx_dropped;
6378 stats->tx_dropped = dev->stats.tx_dropped;
6381 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6388 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
6390 mvpp2_link_event(dev);
6395 /* Ethtool methods */
6397 /* Set interrupt coalescing for ethtools */
6398 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
6399 struct ethtool_coalesce *c)
6401 struct mvpp2_port *port = netdev_priv(dev);
6404 for (queue = 0; queue < port->nrxqs; queue++) {
6405 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6407 rxq->time_coal = c->rx_coalesce_usecs;
6408 rxq->pkts_coal = c->rx_max_coalesced_frames;
6409 mvpp2_rx_pkts_coal_set(port, rxq);
6410 mvpp2_rx_time_coal_set(port, rxq);
6413 if (port->has_tx_irqs) {
6414 port->tx_time_coal = c->tx_coalesce_usecs;
6415 mvpp2_tx_time_coal_set(port);
6418 for (queue = 0; queue < port->ntxqs; queue++) {
6419 struct mvpp2_tx_queue *txq = port->txqs[queue];
6421 txq->done_pkts_coal = c->tx_max_coalesced_frames;
6423 if (port->has_tx_irqs)
6424 mvpp2_tx_pkts_coal_set(port, txq);
6430 /* get coalescing for ethtools */
6431 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
6432 struct ethtool_coalesce *c)
6434 struct mvpp2_port *port = netdev_priv(dev);
6436 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
6437 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
6438 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
6442 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
6443 struct ethtool_drvinfo *drvinfo)
6445 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
6446 sizeof(drvinfo->driver));
6447 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
6448 sizeof(drvinfo->version));
6449 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
6450 sizeof(drvinfo->bus_info));
6453 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
6454 struct ethtool_ringparam *ring)
6456 struct mvpp2_port *port = netdev_priv(dev);
6458 ring->rx_max_pending = MVPP2_MAX_RXD;
6459 ring->tx_max_pending = MVPP2_MAX_TXD;
6460 ring->rx_pending = port->rx_ring_size;
6461 ring->tx_pending = port->tx_ring_size;
6464 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
6465 struct ethtool_ringparam *ring)
6467 struct mvpp2_port *port = netdev_priv(dev);
6468 u16 prev_rx_ring_size = port->rx_ring_size;
6469 u16 prev_tx_ring_size = port->tx_ring_size;
6472 err = mvpp2_check_ringparam_valid(dev, ring);
6476 if (!netif_running(dev)) {
6477 port->rx_ring_size = ring->rx_pending;
6478 port->tx_ring_size = ring->tx_pending;
6482 /* The interface is running, so we have to force a
6483 * reallocation of the queues
6485 mvpp2_stop_dev(port);
6486 mvpp2_cleanup_rxqs(port);
6487 mvpp2_cleanup_txqs(port);
6489 port->rx_ring_size = ring->rx_pending;
6490 port->tx_ring_size = ring->tx_pending;
6492 err = mvpp2_setup_rxqs(port);
6494 /* Reallocate Rx queues with the original ring size */
6495 port->rx_ring_size = prev_rx_ring_size;
6496 ring->rx_pending = prev_rx_ring_size;
6497 err = mvpp2_setup_rxqs(port);
6501 err = mvpp2_setup_txqs(port);
6503 /* Reallocate Tx queues with the original ring size */
6504 port->tx_ring_size = prev_tx_ring_size;
6505 ring->tx_pending = prev_tx_ring_size;
6506 err = mvpp2_setup_txqs(port);
6508 goto err_clean_rxqs;
6511 mvpp2_start_dev(port);
6512 mvpp2_egress_enable(port);
6513 mvpp2_ingress_enable(port);
6518 mvpp2_cleanup_rxqs(port);
6520 netdev_err(dev, "failed to change ring parameters");
6526 static const struct net_device_ops mvpp2_netdev_ops = {
6527 .ndo_open = mvpp2_open,
6528 .ndo_stop = mvpp2_stop,
6529 .ndo_start_xmit = mvpp2_tx,
6530 .ndo_set_rx_mode = mvpp2_set_rx_mode,
6531 .ndo_set_mac_address = mvpp2_set_mac_address,
6532 .ndo_change_mtu = mvpp2_change_mtu,
6533 .ndo_get_stats64 = mvpp2_get_stats64,
6534 .ndo_do_ioctl = mvpp2_ioctl,
6537 static const struct ethtool_ops mvpp2_eth_tool_ops = {
6538 .nway_reset = phy_ethtool_nway_reset,
6539 .get_link = ethtool_op_get_link,
6540 .set_coalesce = mvpp2_ethtool_set_coalesce,
6541 .get_coalesce = mvpp2_ethtool_get_coalesce,
6542 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
6543 .get_ringparam = mvpp2_ethtool_get_ringparam,
6544 .set_ringparam = mvpp2_ethtool_set_ringparam,
6545 .get_link_ksettings = phy_ethtool_get_link_ksettings,
6546 .set_link_ksettings = phy_ethtool_set_link_ksettings,
6549 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
6550 * had a single IRQ defined per-port.
6552 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
6553 struct device_node *port_node)
6555 struct mvpp2_queue_vector *v = &port->qvecs[0];
6558 v->nrxqs = port->nrxqs;
6559 v->type = MVPP2_QUEUE_VECTOR_SHARED;
6560 v->sw_thread_id = 0;
6561 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
6563 v->irq = irq_of_parse_and_map(port_node, 0);
6566 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
6574 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
6575 struct device_node *port_node)
6577 struct mvpp2_queue_vector *v;
6580 port->nqvecs = num_possible_cpus();
6581 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
6584 for (i = 0; i < port->nqvecs; i++) {
6587 v = port->qvecs + i;
6590 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
6591 v->sw_thread_id = i;
6592 v->sw_thread_mask = BIT(i);
6594 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
6596 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
6597 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
6598 v->nrxqs = MVPP2_DEFAULT_RXQ;
6599 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
6600 i == (port->nqvecs - 1)) {
6602 v->nrxqs = port->nrxqs;
6603 v->type = MVPP2_QUEUE_VECTOR_SHARED;
6604 strncpy(irqname, "rx-shared", sizeof(irqname));
6607 v->irq = of_irq_get_byname(port_node, irqname);
6613 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
6620 for (i = 0; i < port->nqvecs; i++)
6621 irq_dispose_mapping(port->qvecs[i].irq);
6625 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
6626 struct device_node *port_node)
6628 if (port->has_tx_irqs)
6629 return mvpp2_multi_queue_vectors_init(port, port_node);
6631 return mvpp2_simple_queue_vectors_init(port, port_node);
6634 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
6638 for (i = 0; i < port->nqvecs; i++)
6639 irq_dispose_mapping(port->qvecs[i].irq);
6642 /* Configure Rx queue group interrupt for this port */
6643 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
6645 struct mvpp2 *priv = port->priv;
6649 if (priv->hw_version == MVPP21) {
6650 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
6655 /* Handle the more complicated PPv2.2 case */
6656 for (i = 0; i < port->nqvecs; i++) {
6657 struct mvpp2_queue_vector *qv = port->qvecs + i;
6662 val = qv->sw_thread_id;
6663 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
6664 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6666 val = qv->first_rxq;
6667 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
6668 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6672 /* Initialize port HW */
6673 static int mvpp2_port_init(struct mvpp2_port *port)
6675 struct device *dev = port->dev->dev.parent;
6676 struct mvpp2 *priv = port->priv;
6677 struct mvpp2_txq_pcpu *txq_pcpu;
6678 int queue, cpu, err;
6680 /* Checks for hardware constraints */
6681 if (port->first_rxq + port->nrxqs >
6682 MVPP2_MAX_PORTS * priv->max_port_rxqs)
6685 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
6686 (port->ntxqs > MVPP2_MAX_TXQ))
6690 mvpp2_egress_disable(port);
6691 mvpp2_port_disable(port);
6693 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
6695 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
6700 /* Associate physical Tx queues to this port and initialize.
6701 * The mapping is predefined.
6703 for (queue = 0; queue < port->ntxqs; queue++) {
6704 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6705 struct mvpp2_tx_queue *txq;
6707 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6710 goto err_free_percpu;
6713 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6716 goto err_free_percpu;
6719 txq->id = queue_phy_id;
6720 txq->log_id = queue;
6721 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6722 for_each_present_cpu(cpu) {
6723 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6724 txq_pcpu->cpu = cpu;
6727 port->txqs[queue] = txq;
6730 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
6734 goto err_free_percpu;
6737 /* Allocate and initialize Rx queue for this port */
6738 for (queue = 0; queue < port->nrxqs; queue++) {
6739 struct mvpp2_rx_queue *rxq;
6741 /* Map physical Rx queue to port's logical Rx queue */
6742 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6745 goto err_free_percpu;
6747 /* Map this Rx queue to a physical queue */
6748 rxq->id = port->first_rxq + queue;
6749 rxq->port = port->id;
6750 rxq->logic_rxq = queue;
6752 port->rxqs[queue] = rxq;
6755 mvpp2_rx_irqs_setup(port);
6757 /* Create Rx descriptor rings */
6758 for (queue = 0; queue < port->nrxqs; queue++) {
6759 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6761 rxq->size = port->rx_ring_size;
6762 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6763 rxq->time_coal = MVPP2_RX_COAL_USEC;
6766 mvpp2_ingress_disable(port);
6768 /* Port default configuration */
6769 mvpp2_defaults_set(port);
6771 /* Port's classifier configuration */
6772 mvpp2_cls_oversize_rxq_set(port);
6773 mvpp2_cls_port_config(port);
6775 /* Provide an initial Rx packet size */
6776 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6778 /* Initialize pools for swf */
6779 err = mvpp2_swf_bm_pool_init(port);
6781 goto err_free_percpu;
6786 for (queue = 0; queue < port->ntxqs; queue++) {
6787 if (!port->txqs[queue])
6789 free_percpu(port->txqs[queue]->pcpu);
6794 /* Checks if the port DT description has the TX interrupts
6795 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
6796 * there are available, but we need to keep support for old DTs.
6798 static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
6799 struct device_node *port_node)
6801 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
6802 "tx-cpu2", "tx-cpu3" };
6805 if (priv->hw_version == MVPP21)
6808 for (i = 0; i < 5; i++) {
6809 ret = of_property_match_string(port_node, "interrupt-names",
6818 /* Ports initialization */
6819 static int mvpp2_port_probe(struct platform_device *pdev,
6820 struct device_node *port_node,
6823 struct device_node *phy_node;
6824 struct mvpp2_port *port;
6825 struct mvpp2_port_pcpu *port_pcpu;
6826 struct net_device *dev;
6827 struct resource *res;
6828 const char *dt_mac_addr;
6829 const char *mac_from;
6830 char hw_mac_addr[ETH_ALEN];
6831 unsigned int ntxqs, nrxqs;
6838 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
6841 queue_mode = MVPP2_QDIST_SINGLE_MODE;
6843 ntxqs = MVPP2_MAX_TXQ;
6844 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
6845 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
6847 nrxqs = MVPP2_DEFAULT_RXQ;
6849 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6853 phy_node = of_parse_phandle(port_node, "phy", 0);
6855 dev_err(&pdev->dev, "missing phy\n");
6857 goto err_free_netdev;
6860 phy_mode = of_get_phy_mode(port_node);
6862 dev_err(&pdev->dev, "incorrect phy mode\n");
6864 goto err_free_netdev;
6867 if (of_property_read_u32(port_node, "port-id", &id)) {
6869 dev_err(&pdev->dev, "missing port-id value\n");
6870 goto err_free_netdev;
6873 dev->tx_queue_len = MVPP2_MAX_TXD;
6874 dev->watchdog_timeo = 5 * HZ;
6875 dev->netdev_ops = &mvpp2_netdev_ops;
6876 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6878 port = netdev_priv(dev);
6880 port->ntxqs = ntxqs;
6881 port->nrxqs = nrxqs;
6883 port->has_tx_irqs = has_tx_irqs;
6885 err = mvpp2_queue_vectors_init(port, port_node);
6887 goto err_free_netdev;
6889 if (of_property_read_bool(port_node, "marvell,loopback"))
6890 port->flags |= MVPP2_F_LOOPBACK;
6893 if (priv->hw_version == MVPP21)
6894 port->first_rxq = port->id * port->nrxqs;
6896 port->first_rxq = port->id * priv->max_port_rxqs;
6898 port->phy_node = phy_node;
6899 port->phy_interface = phy_mode;
6901 if (priv->hw_version == MVPP21) {
6902 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
6903 port->base = devm_ioremap_resource(&pdev->dev, res);
6904 if (IS_ERR(port->base)) {
6905 err = PTR_ERR(port->base);
6906 goto err_deinit_qvecs;
6909 if (of_property_read_u32(port_node, "gop-port-id",
6912 dev_err(&pdev->dev, "missing gop-port-id value\n");
6913 goto err_deinit_qvecs;
6916 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6919 /* Alloc per-cpu stats */
6920 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6923 goto err_deinit_qvecs;
6926 dt_mac_addr = of_get_mac_address(port_node);
6927 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6928 mac_from = "device tree";
6929 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6931 if (priv->hw_version == MVPP21)
6932 mvpp21_get_mac_address(port, hw_mac_addr);
6933 if (is_valid_ether_addr(hw_mac_addr)) {
6934 mac_from = "hardware";
6935 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6937 mac_from = "random";
6938 eth_hw_addr_random(dev);
6942 port->tx_ring_size = MVPP2_MAX_TXD;
6943 port->rx_ring_size = MVPP2_MAX_RXD;
6944 SET_NETDEV_DEV(dev, &pdev->dev);
6946 err = mvpp2_port_init(port);
6948 dev_err(&pdev->dev, "failed to init port %d\n", id);
6949 goto err_free_stats;
6952 mvpp2_port_mii_set(port);
6953 mvpp2_port_periodic_xon_disable(port);
6955 if (priv->hw_version == MVPP21)
6956 mvpp2_port_fc_adv_enable(port);
6958 mvpp2_port_reset(port);
6960 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6963 goto err_free_txq_pcpu;
6966 if (!port->has_tx_irqs) {
6967 for_each_present_cpu(cpu) {
6968 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6970 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6971 HRTIMER_MODE_REL_PINNED);
6972 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6973 port_pcpu->timer_scheduled = false;
6975 tasklet_init(&port_pcpu->tx_done_tasklet,
6977 (unsigned long)dev);
6981 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6982 dev->features = features | NETIF_F_RXCSUM;
6983 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6984 dev->vlan_features |= features;
6986 /* MTU range: 68 - 9676 */
6987 dev->min_mtu = ETH_MIN_MTU;
6988 /* 9676 == 9700 - 20 and rounding to 8 */
6989 dev->max_mtu = 9676;
6991 err = register_netdev(dev);
6993 dev_err(&pdev->dev, "failed to register netdev\n");
6994 goto err_free_port_pcpu;
6996 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6998 priv->port_list[id] = port;
7002 free_percpu(port->pcpu);
7004 for (i = 0; i < port->ntxqs; i++)
7005 free_percpu(port->txqs[i]->pcpu);
7007 free_percpu(port->stats);
7009 mvpp2_queue_vectors_deinit(port);
7011 of_node_put(phy_node);
7016 /* Ports removal routine */
7017 static void mvpp2_port_remove(struct mvpp2_port *port)
7021 unregister_netdev(port->dev);
7022 of_node_put(port->phy_node);
7023 free_percpu(port->pcpu);
7024 free_percpu(port->stats);
7025 for (i = 0; i < port->ntxqs; i++)
7026 free_percpu(port->txqs[i]->pcpu);
7027 mvpp2_queue_vectors_deinit(port);
7028 free_netdev(port->dev);
7031 /* Initialize decoding windows */
7032 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7038 for (i = 0; i < 6; i++) {
7039 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7040 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7043 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7048 for (i = 0; i < dram->num_cs; i++) {
7049 const struct mbus_dram_window *cs = dram->cs + i;
7051 mvpp2_write(priv, MVPP2_WIN_BASE(i),
7052 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7053 dram->mbus_dram_target_id);
7055 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7056 (cs->size - 1) & 0xffff0000);
7058 win_enable |= (1 << i);
7061 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7064 /* Initialize Rx FIFO's */
7065 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7069 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7070 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7071 MVPP2_RX_FIFO_PORT_DATA_SIZE);
7072 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7073 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
7076 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7077 MVPP2_RX_FIFO_PORT_MIN_PKT);
7078 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7081 static void mvpp2_axi_init(struct mvpp2 *priv)
7083 u32 val, rdval, wrval;
7085 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7087 /* AXI Bridge Configuration */
7089 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7090 << MVPP22_AXI_ATTR_CACHE_OFFS;
7091 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7092 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7094 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7095 << MVPP22_AXI_ATTR_CACHE_OFFS;
7096 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7097 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7100 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7101 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7104 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7105 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7106 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7107 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7110 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7111 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7113 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7114 << MVPP22_AXI_CODE_CACHE_OFFS;
7115 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7116 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7117 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7118 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7120 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7121 << MVPP22_AXI_CODE_CACHE_OFFS;
7122 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7123 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7125 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7127 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7128 << MVPP22_AXI_CODE_CACHE_OFFS;
7129 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7130 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7132 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7135 /* Initialize network controller common part HW */
7136 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7138 const struct mbus_dram_target_info *dram_target_info;
7142 /* MBUS windows configuration */
7143 dram_target_info = mv_mbus_dram_info();
7144 if (dram_target_info)
7145 mvpp2_conf_mbus_windows(dram_target_info, priv);
7147 if (priv->hw_version == MVPP22)
7148 mvpp2_axi_init(priv);
7150 /* Disable HW PHY polling */
7151 if (priv->hw_version == MVPP21) {
7152 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7153 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7154 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7156 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7157 val &= ~MVPP22_SMI_POLLING_EN;
7158 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7161 /* Allocate and initialize aggregated TXQs */
7162 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
7163 sizeof(*priv->aggr_txqs),
7165 if (!priv->aggr_txqs)
7168 for_each_present_cpu(i) {
7169 priv->aggr_txqs[i].id = i;
7170 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7171 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
7172 MVPP2_AGGR_TXQ_SIZE, i, priv);
7178 mvpp2_rx_fifo_init(priv);
7180 if (priv->hw_version == MVPP21)
7181 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7182 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
7184 /* Allow cache snoop when transmiting packets */
7185 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7187 /* Buffer Manager initialization */
7188 err = mvpp2_bm_init(pdev, priv);
7192 /* Parser default initialization */
7193 err = mvpp2_prs_default_init(pdev, priv);
7197 /* Classifier default initialization */
7198 mvpp2_cls_init(priv);
7203 static int mvpp2_probe(struct platform_device *pdev)
7205 struct device_node *dn = pdev->dev.of_node;
7206 struct device_node *port_node;
7208 struct resource *res;
7213 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
7218 (unsigned long)of_device_get_match_data(&pdev->dev);
7220 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7221 base = devm_ioremap_resource(&pdev->dev, res);
7223 return PTR_ERR(base);
7225 if (priv->hw_version == MVPP21) {
7226 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7227 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
7228 if (IS_ERR(priv->lms_base))
7229 return PTR_ERR(priv->lms_base);
7231 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7232 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7233 if (IS_ERR(priv->iface_base))
7234 return PTR_ERR(priv->iface_base);
7237 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7240 addr_space_sz = (priv->hw_version == MVPP21 ?
7241 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
7242 priv->swth_base[i] = base + i * addr_space_sz;
7245 if (priv->hw_version == MVPP21)
7246 priv->max_port_rxqs = 8;
7248 priv->max_port_rxqs = 32;
7250 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7251 if (IS_ERR(priv->pp_clk))
7252 return PTR_ERR(priv->pp_clk);
7253 err = clk_prepare_enable(priv->pp_clk);
7257 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7258 if (IS_ERR(priv->gop_clk)) {
7259 err = PTR_ERR(priv->gop_clk);
7262 err = clk_prepare_enable(priv->gop_clk);
7266 if (priv->hw_version == MVPP22) {
7267 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7268 if (IS_ERR(priv->mg_clk)) {
7269 err = PTR_ERR(priv->mg_clk);
7273 err = clk_prepare_enable(priv->mg_clk);
7278 /* Get system's tclk rate */
7279 priv->tclk = clk_get_rate(priv->pp_clk);
7281 if (priv->hw_version == MVPP22) {
7282 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
7285 /* Sadly, the BM pools all share the same register to
7286 * store the high 32 bits of their address. So they
7287 * must all have the same high 32 bits, which forces
7288 * us to restrict coherent memory to DMA_BIT_MASK(32).
7290 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7295 /* Initialize network controller */
7296 err = mvpp2_init(pdev, priv);
7298 dev_err(&pdev->dev, "failed to initialize controller\n");
7302 port_count = of_get_available_child_count(dn);
7303 if (port_count == 0) {
7304 dev_err(&pdev->dev, "no ports enabled\n");
7309 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
7310 sizeof(*priv->port_list),
7312 if (!priv->port_list) {
7317 /* Initialize ports */
7318 for_each_available_child_of_node(dn, port_node) {
7319 err = mvpp2_port_probe(pdev, port_node, priv);
7324 platform_set_drvdata(pdev, priv);
7328 if (priv->hw_version == MVPP22)
7329 clk_disable_unprepare(priv->mg_clk);
7331 clk_disable_unprepare(priv->gop_clk);
7333 clk_disable_unprepare(priv->pp_clk);
7337 static int mvpp2_remove(struct platform_device *pdev)
7339 struct mvpp2 *priv = platform_get_drvdata(pdev);
7340 struct device_node *dn = pdev->dev.of_node;
7341 struct device_node *port_node;
7344 for_each_available_child_of_node(dn, port_node) {
7345 if (priv->port_list[i])
7346 mvpp2_port_remove(priv->port_list[i]);
7350 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
7351 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7353 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
7356 for_each_present_cpu(i) {
7357 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7359 dma_free_coherent(&pdev->dev,
7360 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7362 aggr_txq->descs_dma);
7365 clk_disable_unprepare(priv->mg_clk);
7366 clk_disable_unprepare(priv->pp_clk);
7367 clk_disable_unprepare(priv->gop_clk);
7372 static const struct of_device_id mvpp2_match[] = {
7374 .compatible = "marvell,armada-375-pp2",
7375 .data = (void *)MVPP21,
7378 .compatible = "marvell,armada-7k-pp22",
7379 .data = (void *)MVPP22,
7383 MODULE_DEVICE_TABLE(of, mvpp2_match);
7385 static struct platform_driver mvpp2_driver = {
7386 .probe = mvpp2_probe,
7387 .remove = mvpp2_remove,
7389 .name = MVPP2_DRIVER_NAME,
7390 .of_match_table = mvpp2_match,
7394 module_platform_driver(mvpp2_driver);
7396 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7397 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7398 MODULE_LICENSE("GPL v2");